Skip to content

Commit 0a9f15f

Browse files
Quentin Perretoupton
authored andcommitted
KVM: arm64: pkvm: Add support for fragmented FF-A descriptors
FF-A memory descriptors may need to be sent in fragments when they don't fit in the mailboxes. Doing so involves using the FRAG_TX and FRAG_RX primitives defined in the FF-A protocol. Add support in the pKVM FF-A relayer for fragmented descriptors by monitoring outgoing FRAG_TX transactions and by buffering large descriptors on the reclaim path. Co-developed-by: Andrew Walbran <qwandor@google.com> Signed-off-by: Andrew Walbran <qwandor@google.com> Signed-off-by: Quentin Perret <qperret@google.com> Signed-off-by: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20230523101828.7328-11-will@kernel.org Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
1 parent 20936cd commit 0a9f15f

2 files changed

Lines changed: 162 additions & 25 deletions

File tree

arch/arm64/include/asm/kvm_pkvm.h

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,9 @@
66
#ifndef __ARM64_KVM_PKVM_H__
77
#define __ARM64_KVM_PKVM_H__
88

9+
#include <linux/arm_ffa.h>
910
#include <linux/memblock.h>
11+
#include <linux/scatterlist.h>
1012
#include <asm/kvm_pgtable.h>
1113

1214
/* Maximum number of VMs that can co-exist under pKVM. */
@@ -110,8 +112,19 @@ static inline unsigned long host_s2_pgtable_pages(void)
110112

111113
static inline unsigned long hyp_ffa_proxy_pages(void)
112114
{
113-
/* A page each for the hypervisor's RX and TX mailboxes. */
114-
return 2 * KVM_FFA_MBOX_NR_PAGES;
115+
size_t desc_max;
116+
117+
/*
118+
* The hypervisor FFA proxy needs enough memory to buffer a fragmented
119+
* descriptor returned from EL3 in response to a RETRIEVE_REQ call.
120+
*/
121+
desc_max = sizeof(struct ffa_mem_region) +
122+
sizeof(struct ffa_mem_region_attributes) +
123+
sizeof(struct ffa_composite_mem_region) +
124+
SG_MAX_SEGMENTS * sizeof(struct ffa_mem_region_addr_range);
125+
126+
/* Plus a page each for the hypervisor's RX and TX mailboxes. */
127+
return (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE);
115128
}
116129

117130
#endif /* __ARM64_KVM_PKVM_H__ */

arch/arm64/kvm/hyp/nvhe/ffa.c

Lines changed: 147 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,18 @@
4242
*/
4343
#define HOST_FFA_ID 0
4444

45+
/*
46+
* A buffer to hold the maximum descriptor size we can see from the host,
47+
* which is required when the SPMD returns a fragmented FFA_MEM_RETRIEVE_RESP
48+
* when resolving the handle on the reclaim path.
49+
*/
50+
struct kvm_ffa_descriptor_buffer {
51+
void *buf;
52+
size_t len;
53+
};
54+
55+
static struct kvm_ffa_descriptor_buffer ffa_desc_buf;
56+
4557
struct kvm_ffa_buffers {
4658
hyp_spinlock_t lock;
4759
void *tx;
@@ -122,6 +134,24 @@ static int ffa_unmap_hyp_buffers(void)
122134
return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
123135
}
124136

137+
static void ffa_mem_frag_tx(struct arm_smccc_res *res, u32 handle_lo,
138+
u32 handle_hi, u32 fraglen, u32 endpoint_id)
139+
{
140+
arm_smccc_1_1_smc(FFA_MEM_FRAG_TX,
141+
handle_lo, handle_hi, fraglen, endpoint_id,
142+
0, 0, 0,
143+
res);
144+
}
145+
146+
static void ffa_mem_frag_rx(struct arm_smccc_res *res, u32 handle_lo,
147+
u32 handle_hi, u32 fragoff)
148+
{
149+
arm_smccc_1_1_smc(FFA_MEM_FRAG_RX,
150+
handle_lo, handle_hi, fragoff, HOST_FFA_ID,
151+
0, 0, 0,
152+
res);
153+
}
154+
125155
static void ffa_mem_xfer(struct arm_smccc_res *res, u64 func_id, u32 len,
126156
u32 fraglen)
127157
{
@@ -327,6 +357,64 @@ static int ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
327357
return ret;
328358
}
329359

360+
static void do_ffa_mem_frag_tx(struct arm_smccc_res *res,
361+
struct kvm_cpu_context *ctxt)
362+
{
363+
DECLARE_REG(u32, handle_lo, ctxt, 1);
364+
DECLARE_REG(u32, handle_hi, ctxt, 2);
365+
DECLARE_REG(u32, fraglen, ctxt, 3);
366+
DECLARE_REG(u32, endpoint_id, ctxt, 4);
367+
struct ffa_mem_region_addr_range *buf;
368+
int ret = FFA_RET_INVALID_PARAMETERS;
369+
u32 nr_ranges;
370+
371+
if (fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)
372+
goto out;
373+
374+
if (fraglen % sizeof(*buf))
375+
goto out;
376+
377+
hyp_spin_lock(&host_buffers.lock);
378+
if (!host_buffers.tx)
379+
goto out_unlock;
380+
381+
buf = hyp_buffers.tx;
382+
memcpy(buf, host_buffers.tx, fraglen);
383+
nr_ranges = fraglen / sizeof(*buf);
384+
385+
ret = ffa_host_share_ranges(buf, nr_ranges);
386+
if (ret) {
387+
/*
388+
* We're effectively aborting the transaction, so we need
389+
* to restore the global state back to what it was prior to
390+
* transmission of the first fragment.
391+
*/
392+
ffa_mem_reclaim(res, handle_lo, handle_hi, 0);
393+
WARN_ON(res->a0 != FFA_SUCCESS);
394+
goto out_unlock;
395+
}
396+
397+
ffa_mem_frag_tx(res, handle_lo, handle_hi, fraglen, endpoint_id);
398+
if (res->a0 != FFA_SUCCESS && res->a0 != FFA_MEM_FRAG_RX)
399+
WARN_ON(ffa_host_unshare_ranges(buf, nr_ranges));
400+
401+
out_unlock:
402+
hyp_spin_unlock(&host_buffers.lock);
403+
out:
404+
if (ret)
405+
ffa_to_smccc_res(res, ret);
406+
407+
/*
408+
* If for any reason this did not succeed, we're in trouble as we have
409+
* now lost the content of the previous fragments and we can't rollback
410+
* the host stage-2 changes. The pages previously marked as shared will
411+
* remain stuck in that state forever, hence preventing the host from
412+
* sharing/donating them again and may possibly lead to subsequent
413+
* failures, but this will not compromise confidentiality.
414+
*/
415+
return;
416+
}
417+
330418
static __always_inline void do_ffa_mem_xfer(const u64 func_id,
331419
struct arm_smccc_res *res,
332420
struct kvm_cpu_context *ctxt)
@@ -337,8 +425,8 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id,
337425
DECLARE_REG(u32, npages_mbz, ctxt, 4);
338426
struct ffa_composite_mem_region *reg;
339427
struct ffa_mem_region *buf;
428+
u32 offset, nr_ranges;
340429
int ret = 0;
341-
u32 offset;
342430

343431
BUILD_BUG_ON(func_id != FFA_FN64_MEM_SHARE &&
344432
func_id != FFA_FN64_MEM_LEND);
@@ -349,11 +437,6 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id,
349437
goto out;
350438
}
351439

352-
if (fraglen < len) {
353-
ret = FFA_RET_ABORTED;
354-
goto out;
355-
}
356-
357440
if (fraglen < sizeof(struct ffa_mem_region) +
358441
sizeof(struct ffa_mem_region_attributes)) {
359442
ret = FFA_RET_INVALID_PARAMETERS;
@@ -381,21 +464,26 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id,
381464
}
382465

383466
reg = (void *)buf + offset;
384-
if (fraglen < offset + sizeof(struct ffa_composite_mem_region) +
385-
reg->addr_range_cnt *
386-
sizeof(struct ffa_mem_region_addr_range)) {
467+
nr_ranges = ((void *)buf + fraglen) - (void *)reg->constituents;
468+
if (nr_ranges % sizeof(reg->constituents[0])) {
387469
ret = FFA_RET_INVALID_PARAMETERS;
388470
goto out_unlock;
389471
}
390472

391-
ret = ffa_host_share_ranges(reg->constituents, reg->addr_range_cnt);
473+
nr_ranges /= sizeof(reg->constituents[0]);
474+
ret = ffa_host_share_ranges(reg->constituents, nr_ranges);
392475
if (ret)
393476
goto out_unlock;
394477

395478
ffa_mem_xfer(res, func_id, len, fraglen);
396-
if (res->a0 != FFA_SUCCESS) {
397-
WARN_ON(ffa_host_unshare_ranges(reg->constituents,
398-
reg->addr_range_cnt));
479+
if (fraglen != len) {
480+
if (res->a0 != FFA_MEM_FRAG_RX)
481+
goto err_unshare;
482+
483+
if (res->a3 != fraglen)
484+
goto err_unshare;
485+
} else if (res->a0 != FFA_SUCCESS) {
486+
goto err_unshare;
399487
}
400488

401489
out_unlock:
@@ -404,6 +492,10 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id,
404492
if (ret)
405493
ffa_to_smccc_res(res, ret);
406494
return;
495+
496+
err_unshare:
497+
WARN_ON(ffa_host_unshare_ranges(reg->constituents, nr_ranges));
498+
goto out_unlock;
407499
}
408500

409501
static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
@@ -413,9 +505,9 @@ static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
413505
DECLARE_REG(u32, handle_hi, ctxt, 2);
414506
DECLARE_REG(u32, flags, ctxt, 3);
415507
struct ffa_composite_mem_region *reg;
508+
u32 offset, len, fraglen, fragoff;
416509
struct ffa_mem_region *buf;
417510
int ret = 0;
418-
u32 offset;
419511
u64 handle;
420512

421513
handle = PACK_HANDLE(handle_lo, handle_hi);
@@ -433,28 +525,45 @@ static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
433525
if (res->a0 != FFA_MEM_RETRIEVE_RESP)
434526
goto out_unlock;
435527

436-
/* Check for fragmentation */
437-
if (res->a1 != res->a2) {
438-
ret = FFA_RET_ABORTED;
439-
goto out_unlock;
440-
}
528+
len = res->a1;
529+
fraglen = res->a2;
441530

442531
offset = buf->ep_mem_access[0].composite_off;
443532
/*
444533
* We can trust the SPMD to get this right, but let's at least
445534
* check that we end up with something that doesn't look _completely_
446535
* bogus.
447536
*/
448-
if (WARN_ON(offset > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) {
537+
if (WARN_ON(offset > len ||
538+
fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) {
449539
ret = FFA_RET_ABORTED;
450540
goto out_unlock;
451541
}
452542

453-
reg = (void *)buf + offset;
543+
if (len > ffa_desc_buf.len) {
544+
ret = FFA_RET_NO_MEMORY;
545+
goto out_unlock;
546+
}
547+
548+
buf = ffa_desc_buf.buf;
549+
memcpy(buf, hyp_buffers.rx, fraglen);
550+
551+
for (fragoff = fraglen; fragoff < len; fragoff += fraglen) {
552+
ffa_mem_frag_rx(res, handle_lo, handle_hi, fragoff);
553+
if (res->a0 != FFA_MEM_FRAG_TX) {
554+
ret = FFA_RET_INVALID_PARAMETERS;
555+
goto out_unlock;
556+
}
557+
558+
fraglen = res->a3;
559+
memcpy((void *)buf + fragoff, hyp_buffers.rx, fraglen);
560+
}
561+
454562
ffa_mem_reclaim(res, handle_lo, handle_hi, flags);
455563
if (res->a0 != FFA_SUCCESS)
456564
goto out_unlock;
457565

566+
reg = (void *)buf + offset;
458567
/* If the SPMD was happy, then we should be too. */
459568
WARN_ON(ffa_host_unshare_ranges(reg->constituents,
460569
reg->addr_range_cnt));
@@ -569,6 +678,9 @@ bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt)
569678
case FFA_FN64_MEM_LEND:
570679
do_ffa_mem_xfer(FFA_FN64_MEM_LEND, &res, host_ctxt);
571680
goto out_handled;
681+
case FFA_MEM_FRAG_TX:
682+
do_ffa_mem_frag_tx(&res, host_ctxt);
683+
goto out_handled;
572684
}
573685

574686
if (ffa_call_supported(func_id))
@@ -584,6 +696,7 @@ int hyp_ffa_init(void *pages)
584696
{
585697
struct arm_smccc_res res;
586698
size_t min_rxtx_sz;
699+
void *tx, *rx;
587700

588701
if (kvm_host_psci_config.smccc_version < ARM_SMCCC_VERSION_1_2)
589702
return 0;
@@ -624,10 +737,21 @@ int hyp_ffa_init(void *pages)
624737
if (min_rxtx_sz > PAGE_SIZE)
625738
return -EOPNOTSUPP;
626739

740+
tx = pages;
741+
pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
742+
rx = pages;
743+
pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
744+
745+
ffa_desc_buf = (struct kvm_ffa_descriptor_buffer) {
746+
.buf = pages,
747+
.len = PAGE_SIZE *
748+
(hyp_ffa_proxy_pages() - (2 * KVM_FFA_MBOX_NR_PAGES)),
749+
};
750+
627751
hyp_buffers = (struct kvm_ffa_buffers) {
628752
.lock = __HYP_SPIN_LOCK_UNLOCKED,
629-
.tx = pages,
630-
.rx = pages + (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE),
753+
.tx = tx,
754+
.rx = rx,
631755
};
632756

633757
host_buffers = (struct kvm_ffa_buffers) {

0 commit comments

Comments
 (0)