@@ -116,6 +116,14 @@ static int ffa_unmap_hyp_buffers(void)
116116 return res .a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res .a2 ;
117117}
118118
119+ static void ffa_mem_share (struct arm_smccc_res * res , u32 len , u32 fraglen )
120+ {
121+ arm_smccc_1_1_smc (FFA_FN64_MEM_SHARE ,
122+ len , fraglen ,
123+ 0 , 0 , 0 , 0 , 0 ,
124+ res );
125+ }
126+
119127static void do_ffa_rxtx_map (struct arm_smccc_res * res ,
120128 struct kvm_cpu_context * ctxt )
121129{
@@ -228,6 +236,149 @@ static void do_ffa_rxtx_unmap(struct arm_smccc_res *res,
228236 ffa_to_smccc_res (res , ret );
229237}
230238
239+ static u32 __ffa_host_share_ranges (struct ffa_mem_region_addr_range * ranges ,
240+ u32 nranges )
241+ {
242+ u32 i ;
243+
244+ for (i = 0 ; i < nranges ; ++ i ) {
245+ struct ffa_mem_region_addr_range * range = & ranges [i ];
246+ u64 sz = (u64 )range -> pg_cnt * FFA_PAGE_SIZE ;
247+ u64 pfn = hyp_phys_to_pfn (range -> address );
248+
249+ if (!PAGE_ALIGNED (sz ))
250+ break ;
251+
252+ if (__pkvm_host_share_ffa (pfn , sz / PAGE_SIZE ))
253+ break ;
254+ }
255+
256+ return i ;
257+ }
258+
259+ static u32 __ffa_host_unshare_ranges (struct ffa_mem_region_addr_range * ranges ,
260+ u32 nranges )
261+ {
262+ u32 i ;
263+
264+ for (i = 0 ; i < nranges ; ++ i ) {
265+ struct ffa_mem_region_addr_range * range = & ranges [i ];
266+ u64 sz = (u64 )range -> pg_cnt * FFA_PAGE_SIZE ;
267+ u64 pfn = hyp_phys_to_pfn (range -> address );
268+
269+ if (!PAGE_ALIGNED (sz ))
270+ break ;
271+
272+ if (__pkvm_host_unshare_ffa (pfn , sz / PAGE_SIZE ))
273+ break ;
274+ }
275+
276+ return i ;
277+ }
278+
279+ static int ffa_host_share_ranges (struct ffa_mem_region_addr_range * ranges ,
280+ u32 nranges )
281+ {
282+ u32 nshared = __ffa_host_share_ranges (ranges , nranges );
283+ int ret = 0 ;
284+
285+ if (nshared != nranges ) {
286+ WARN_ON (__ffa_host_unshare_ranges (ranges , nshared ) != nshared );
287+ ret = FFA_RET_DENIED ;
288+ }
289+
290+ return ret ;
291+ }
292+
293+ static int ffa_host_unshare_ranges (struct ffa_mem_region_addr_range * ranges ,
294+ u32 nranges )
295+ {
296+ u32 nunshared = __ffa_host_unshare_ranges (ranges , nranges );
297+ int ret = 0 ;
298+
299+ if (nunshared != nranges ) {
300+ WARN_ON (__ffa_host_share_ranges (ranges , nunshared ) != nunshared );
301+ ret = FFA_RET_DENIED ;
302+ }
303+
304+ return ret ;
305+ }
306+
307+ static void do_ffa_mem_share (struct arm_smccc_res * res ,
308+ struct kvm_cpu_context * ctxt )
309+ {
310+ DECLARE_REG (u32 , len , ctxt , 1 );
311+ DECLARE_REG (u32 , fraglen , ctxt , 2 );
312+ DECLARE_REG (u64 , addr_mbz , ctxt , 3 );
313+ DECLARE_REG (u32 , npages_mbz , ctxt , 4 );
314+ struct ffa_composite_mem_region * reg ;
315+ struct ffa_mem_region * buf ;
316+ int ret = 0 ;
317+ u32 offset ;
318+
319+ if (addr_mbz || npages_mbz || fraglen > len ||
320+ fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE ) {
321+ ret = FFA_RET_INVALID_PARAMETERS ;
322+ goto out ;
323+ }
324+
325+ if (fraglen < len ) {
326+ ret = FFA_RET_ABORTED ;
327+ goto out ;
328+ }
329+
330+ if (fraglen < sizeof (struct ffa_mem_region ) +
331+ sizeof (struct ffa_mem_region_attributes )) {
332+ ret = FFA_RET_INVALID_PARAMETERS ;
333+ goto out ;
334+ }
335+
336+ hyp_spin_lock (& host_buffers .lock );
337+ if (!host_buffers .tx ) {
338+ ret = FFA_RET_INVALID_PARAMETERS ;
339+ goto out_unlock ;
340+ }
341+
342+ buf = hyp_buffers .tx ;
343+ memcpy (buf , host_buffers .tx , fraglen );
344+
345+ offset = buf -> ep_mem_access [0 ].composite_off ;
346+ if (!offset || buf -> ep_count != 1 || buf -> sender_id != HOST_FFA_ID ) {
347+ ret = FFA_RET_INVALID_PARAMETERS ;
348+ goto out_unlock ;
349+ }
350+
351+ if (fraglen < offset + sizeof (struct ffa_composite_mem_region )) {
352+ ret = FFA_RET_INVALID_PARAMETERS ;
353+ goto out_unlock ;
354+ }
355+
356+ reg = (void * )buf + offset ;
357+ if (fraglen < offset + sizeof (struct ffa_composite_mem_region ) +
358+ reg -> addr_range_cnt *
359+ sizeof (struct ffa_mem_region_addr_range )) {
360+ ret = FFA_RET_INVALID_PARAMETERS ;
361+ goto out_unlock ;
362+ }
363+
364+ ret = ffa_host_share_ranges (reg -> constituents , reg -> addr_range_cnt );
365+ if (ret )
366+ goto out_unlock ;
367+
368+ ffa_mem_share (res , len , fraglen );
369+ if (res -> a0 != FFA_SUCCESS ) {
370+ WARN_ON (ffa_host_unshare_ranges (reg -> constituents ,
371+ reg -> addr_range_cnt ));
372+ }
373+
374+ out_unlock :
375+ hyp_spin_unlock (& host_buffers .lock );
376+ out :
377+ if (ret )
378+ ffa_to_smccc_res (res , ret );
379+ return ;
380+ }
381+
231382/*
232383 * Is a given FFA function supported, either by forwarding on directly
233384 * or by handling at EL2?
@@ -290,6 +441,10 @@ bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt)
290441 case FFA_RXTX_UNMAP :
291442 do_ffa_rxtx_unmap (& res , host_ctxt );
292443 goto out_handled ;
444+ case FFA_MEM_SHARE :
445+ case FFA_FN64_MEM_SHARE :
446+ do_ffa_mem_share (& res , host_ctxt );
447+ goto out_handled ;
293448 }
294449
295450 if (ffa_call_supported (func_id ))
0 commit comments