@@ -124,6 +124,23 @@ static void ffa_mem_share(struct arm_smccc_res *res, u32 len, u32 fraglen)
124124 res );
125125}
126126
127+ static void ffa_mem_reclaim (struct arm_smccc_res * res , u32 handle_lo ,
128+ u32 handle_hi , u32 flags )
129+ {
130+ arm_smccc_1_1_smc (FFA_MEM_RECLAIM ,
131+ handle_lo , handle_hi , flags ,
132+ 0 , 0 , 0 , 0 ,
133+ res );
134+ }
135+
136+ static void ffa_retrieve_req (struct arm_smccc_res * res , u32 len )
137+ {
138+ arm_smccc_1_1_smc (FFA_FN64_MEM_RETRIEVE_REQ ,
139+ len , len ,
140+ 0 , 0 , 0 , 0 , 0 ,
141+ res );
142+ }
143+
127144static void do_ffa_rxtx_map (struct arm_smccc_res * res ,
128145 struct kvm_cpu_context * ctxt )
129146{
@@ -379,6 +396,65 @@ static void do_ffa_mem_share(struct arm_smccc_res *res,
379396 return ;
380397}
381398
399+ static void do_ffa_mem_reclaim (struct arm_smccc_res * res ,
400+ struct kvm_cpu_context * ctxt )
401+ {
402+ DECLARE_REG (u32 , handle_lo , ctxt , 1 );
403+ DECLARE_REG (u32 , handle_hi , ctxt , 2 );
404+ DECLARE_REG (u32 , flags , ctxt , 3 );
405+ struct ffa_composite_mem_region * reg ;
406+ struct ffa_mem_region * buf ;
407+ int ret = 0 ;
408+ u32 offset ;
409+ u64 handle ;
410+
411+ handle = PACK_HANDLE (handle_lo , handle_hi );
412+
413+ hyp_spin_lock (& host_buffers .lock );
414+
415+ buf = hyp_buffers .tx ;
416+ * buf = (struct ffa_mem_region ) {
417+ .sender_id = HOST_FFA_ID ,
418+ .handle = handle ,
419+ };
420+
421+ ffa_retrieve_req (res , sizeof (* buf ));
422+ buf = hyp_buffers .rx ;
423+ if (res -> a0 != FFA_MEM_RETRIEVE_RESP )
424+ goto out_unlock ;
425+
426+ /* Check for fragmentation */
427+ if (res -> a1 != res -> a2 ) {
428+ ret = FFA_RET_ABORTED ;
429+ goto out_unlock ;
430+ }
431+
432+ offset = buf -> ep_mem_access [0 ].composite_off ;
433+ /*
434+ * We can trust the SPMD to get this right, but let's at least
435+ * check that we end up with something that doesn't look _completely_
436+ * bogus.
437+ */
438+ if (WARN_ON (offset > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE )) {
439+ ret = FFA_RET_ABORTED ;
440+ goto out_unlock ;
441+ }
442+
443+ reg = (void * )buf + offset ;
444+ ffa_mem_reclaim (res , handle_lo , handle_hi , flags );
445+ if (res -> a0 != FFA_SUCCESS )
446+ goto out_unlock ;
447+
448+ /* If the SPMD was happy, then we should be too. */
449+ WARN_ON (ffa_host_unshare_ranges (reg -> constituents ,
450+ reg -> addr_range_cnt ));
451+ out_unlock :
452+ hyp_spin_unlock (& host_buffers .lock );
453+
454+ if (ret )
455+ ffa_to_smccc_res (res , ret );
456+ }
457+
382458/*
383459 * Is a given FFA function supported, either by forwarding on directly
384460 * or by handling at EL2?
@@ -445,6 +521,9 @@ bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt)
445521 case FFA_FN64_MEM_SHARE :
446522 do_ffa_mem_share (& res , host_ctxt );
447523 goto out_handled ;
524+ case FFA_MEM_RECLAIM :
525+ do_ffa_mem_reclaim (& res , host_ctxt );
526+ goto out_handled ;
448527 }
449528
450529 if (ffa_call_supported (func_id ))
0 commit comments