Skip to content

Commit 6f4c348

Browse files
Per LarsenMarc Zyngier
authored andcommitted
KVM: arm64: Use SMCCC 1.2 for FF-A initialization and in host handler
SMCCC 1.1 and prior allows four registers to be sent back as a result of an FF-A interface. SMCCC 1.2 increases the number of results that can be sent back to 8 and 16 for 32-bit and 64-bit SMC/HVCs respectively. FF-A 1.0 references SMCCC 1.2 (reference [4] on page xi) and FF-A 1.2 explicitly requires SMCCC 1.2 so it should be safe to use this version unconditionally. Moreover, it is simpler to implement FF-A features without having to worry about compatibility with SMCCC 1.1 and older. SMCCC 1.2 requires that SMC32/HVC32 from aarch64 mode preserves x8-x30 but given that there is no reliable way to distinguish 32-bit/64-bit calls, we assume SMC64 unconditionally. This has the benefit of being consistent with the handling of calls that are passed through, i.e., not proxied. (A cleaner solution will become available in FF-A 1.3.) Update the FF-A initialization and host handler code to use SMCCC 1.2. Signed-off-by: Per Larsen <perlarsen@google.com> Acked-by: Will Deacon <will@kernel.org> Signed-off-by: Marc Zyngier <maz@kernel.org>
1 parent f414269 commit 6f4c348

2 files changed

Lines changed: 125 additions & 69 deletions

File tree

arch/arm64/kvm/hyp/nvhe/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ hyp-obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o
2727
cache.o setup.o mm.o mem_protect.o sys_regs.o pkvm.o stacktrace.o ffa.o
2828
hyp-obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
2929
../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o
30+
hyp-obj-y += ../../../kernel/smccc-call.o
3031
hyp-obj-$(CONFIG_LIST_HARDENED) += list_debug.o
3132
hyp-obj-y += $(lib-objs)
3233

arch/arm64/kvm/hyp/nvhe/ffa.c

Lines changed: 124 additions & 69 deletions
Original file line numberDiff line numberDiff line change
@@ -71,36 +71,68 @@ static u32 hyp_ffa_version;
7171
static bool has_version_negotiated;
7272
static hyp_spinlock_t version_lock;
7373

74-
static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno)
74+
static void ffa_to_smccc_error(struct arm_smccc_1_2_regs *res, u64 ffa_errno)
7575
{
76-
*res = (struct arm_smccc_res) {
76+
*res = (struct arm_smccc_1_2_regs) {
7777
.a0 = FFA_ERROR,
7878
.a2 = ffa_errno,
7979
};
8080
}
8181

82-
static void ffa_to_smccc_res_prop(struct arm_smccc_res *res, int ret, u64 prop)
82+
static void ffa_to_smccc_res_prop(struct arm_smccc_1_2_regs *res, int ret, u64 prop)
8383
{
8484
if (ret == FFA_RET_SUCCESS) {
85-
*res = (struct arm_smccc_res) { .a0 = FFA_SUCCESS,
86-
.a2 = prop };
85+
*res = (struct arm_smccc_1_2_regs) { .a0 = FFA_SUCCESS,
86+
.a2 = prop };
8787
} else {
8888
ffa_to_smccc_error(res, ret);
8989
}
9090
}
9191

92-
static void ffa_to_smccc_res(struct arm_smccc_res *res, int ret)
92+
static void ffa_to_smccc_res(struct arm_smccc_1_2_regs *res, int ret)
9393
{
9494
ffa_to_smccc_res_prop(res, ret, 0);
9595
}
9696

9797
static void ffa_set_retval(struct kvm_cpu_context *ctxt,
98-
struct arm_smccc_res *res)
98+
struct arm_smccc_1_2_regs *res)
9999
{
100100
cpu_reg(ctxt, 0) = res->a0;
101101
cpu_reg(ctxt, 1) = res->a1;
102102
cpu_reg(ctxt, 2) = res->a2;
103103
cpu_reg(ctxt, 3) = res->a3;
104+
cpu_reg(ctxt, 4) = res->a4;
105+
cpu_reg(ctxt, 5) = res->a5;
106+
cpu_reg(ctxt, 6) = res->a6;
107+
cpu_reg(ctxt, 7) = res->a7;
108+
109+
/*
110+
* DEN0028C 2.6: SMC32/HVC32 call from aarch64 must preserve x8-x30.
111+
*
112+
* In FF-A 1.2, we cannot rely on the function ID sent by the caller to
113+
* detect 32-bit calls because the CPU cycle management interfaces (e.g.
114+
* FFA_MSG_WAIT, FFA_RUN) are 32-bit only but can have 64-bit responses.
115+
*
116+
* FFA-1.3 introduces 64-bit variants of the CPU cycle management
117+
* interfaces. Moreover, FF-A 1.3 clarifies that SMC32 direct requests
118+
* complete with SMC32 direct reponses which *should* allow us use the
119+
* function ID sent by the caller to determine whether to return x8-x17.
120+
*
121+
* Note that we also cannot rely on function IDs in the response.
122+
*
123+
* Given the above, assume SMC64 and send back x0-x17 unconditionally
124+
* as the passthrough code (__kvm_hyp_host_forward_smc) does the same.
125+
*/
126+
cpu_reg(ctxt, 8) = res->a8;
127+
cpu_reg(ctxt, 9) = res->a9;
128+
cpu_reg(ctxt, 10) = res->a10;
129+
cpu_reg(ctxt, 11) = res->a11;
130+
cpu_reg(ctxt, 12) = res->a12;
131+
cpu_reg(ctxt, 13) = res->a13;
132+
cpu_reg(ctxt, 14) = res->a14;
133+
cpu_reg(ctxt, 15) = res->a15;
134+
cpu_reg(ctxt, 16) = res->a16;
135+
cpu_reg(ctxt, 17) = res->a17;
104136
}
105137

106138
static bool is_ffa_call(u64 func_id)
@@ -113,82 +145,92 @@ static bool is_ffa_call(u64 func_id)
113145

114146
static int ffa_map_hyp_buffers(u64 ffa_page_count)
115147
{
116-
struct arm_smccc_res res;
148+
struct arm_smccc_1_2_regs res;
117149

118-
arm_smccc_1_1_smc(FFA_FN64_RXTX_MAP,
119-
hyp_virt_to_phys(hyp_buffers.tx),
120-
hyp_virt_to_phys(hyp_buffers.rx),
121-
ffa_page_count,
122-
0, 0, 0, 0,
123-
&res);
150+
arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
151+
.a0 = FFA_FN64_RXTX_MAP,
152+
.a1 = hyp_virt_to_phys(hyp_buffers.tx),
153+
.a2 = hyp_virt_to_phys(hyp_buffers.rx),
154+
.a3 = ffa_page_count,
155+
}, &res);
124156

125157
return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
126158
}
127159

128160
static int ffa_unmap_hyp_buffers(void)
129161
{
130-
struct arm_smccc_res res;
162+
struct arm_smccc_1_2_regs res;
131163

132-
arm_smccc_1_1_smc(FFA_RXTX_UNMAP,
133-
HOST_FFA_ID,
134-
0, 0, 0, 0, 0, 0,
135-
&res);
164+
arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
165+
.a0 = FFA_RXTX_UNMAP,
166+
.a1 = HOST_FFA_ID,
167+
}, &res);
136168

137169
return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
138170
}
139171

140-
static void ffa_mem_frag_tx(struct arm_smccc_res *res, u32 handle_lo,
172+
static void ffa_mem_frag_tx(struct arm_smccc_1_2_regs *res, u32 handle_lo,
141173
u32 handle_hi, u32 fraglen, u32 endpoint_id)
142174
{
143-
arm_smccc_1_1_smc(FFA_MEM_FRAG_TX,
144-
handle_lo, handle_hi, fraglen, endpoint_id,
145-
0, 0, 0,
146-
res);
175+
arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
176+
.a0 = FFA_MEM_FRAG_TX,
177+
.a1 = handle_lo,
178+
.a2 = handle_hi,
179+
.a3 = fraglen,
180+
.a4 = endpoint_id,
181+
}, res);
147182
}
148183

149-
static void ffa_mem_frag_rx(struct arm_smccc_res *res, u32 handle_lo,
184+
static void ffa_mem_frag_rx(struct arm_smccc_1_2_regs *res, u32 handle_lo,
150185
u32 handle_hi, u32 fragoff)
151186
{
152-
arm_smccc_1_1_smc(FFA_MEM_FRAG_RX,
153-
handle_lo, handle_hi, fragoff, HOST_FFA_ID,
154-
0, 0, 0,
155-
res);
187+
arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
188+
.a0 = FFA_MEM_FRAG_RX,
189+
.a1 = handle_lo,
190+
.a2 = handle_hi,
191+
.a3 = fragoff,
192+
.a4 = HOST_FFA_ID,
193+
}, res);
156194
}
157195

158-
static void ffa_mem_xfer(struct arm_smccc_res *res, u64 func_id, u32 len,
196+
static void ffa_mem_xfer(struct arm_smccc_1_2_regs *res, u64 func_id, u32 len,
159197
u32 fraglen)
160198
{
161-
arm_smccc_1_1_smc(func_id, len, fraglen,
162-
0, 0, 0, 0, 0,
163-
res);
199+
arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
200+
.a0 = func_id,
201+
.a1 = len,
202+
.a2 = fraglen,
203+
}, res);
164204
}
165205

166-
static void ffa_mem_reclaim(struct arm_smccc_res *res, u32 handle_lo,
206+
static void ffa_mem_reclaim(struct arm_smccc_1_2_regs *res, u32 handle_lo,
167207
u32 handle_hi, u32 flags)
168208
{
169-
arm_smccc_1_1_smc(FFA_MEM_RECLAIM,
170-
handle_lo, handle_hi, flags,
171-
0, 0, 0, 0,
172-
res);
209+
arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
210+
.a0 = FFA_MEM_RECLAIM,
211+
.a1 = handle_lo,
212+
.a2 = handle_hi,
213+
.a3 = flags,
214+
}, res);
173215
}
174216

175-
static void ffa_retrieve_req(struct arm_smccc_res *res, u32 len)
217+
static void ffa_retrieve_req(struct arm_smccc_1_2_regs *res, u32 len)
176218
{
177-
arm_smccc_1_1_smc(FFA_FN64_MEM_RETRIEVE_REQ,
178-
len, len,
179-
0, 0, 0, 0, 0,
180-
res);
219+
arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
220+
.a0 = FFA_FN64_MEM_RETRIEVE_REQ,
221+
.a1 = len,
222+
.a2 = len,
223+
}, res);
181224
}
182225

183-
static void ffa_rx_release(struct arm_smccc_res *res)
226+
static void ffa_rx_release(struct arm_smccc_1_2_regs *res)
184227
{
185-
arm_smccc_1_1_smc(FFA_RX_RELEASE,
186-
0, 0,
187-
0, 0, 0, 0, 0,
188-
res);
228+
arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
229+
.a0 = FFA_RX_RELEASE,
230+
}, res);
189231
}
190232

191-
static void do_ffa_rxtx_map(struct arm_smccc_res *res,
233+
static void do_ffa_rxtx_map(struct arm_smccc_1_2_regs *res,
192234
struct kvm_cpu_context *ctxt)
193235
{
194236
DECLARE_REG(phys_addr_t, tx, ctxt, 1);
@@ -267,7 +309,7 @@ static void do_ffa_rxtx_map(struct arm_smccc_res *res,
267309
goto out_unlock;
268310
}
269311

270-
static void do_ffa_rxtx_unmap(struct arm_smccc_res *res,
312+
static void do_ffa_rxtx_unmap(struct arm_smccc_1_2_regs *res,
271313
struct kvm_cpu_context *ctxt)
272314
{
273315
DECLARE_REG(u32, id, ctxt, 1);
@@ -368,7 +410,7 @@ static int ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
368410
return ret;
369411
}
370412

371-
static void do_ffa_mem_frag_tx(struct arm_smccc_res *res,
413+
static void do_ffa_mem_frag_tx(struct arm_smccc_1_2_regs *res,
372414
struct kvm_cpu_context *ctxt)
373415
{
374416
DECLARE_REG(u32, handle_lo, ctxt, 1);
@@ -427,7 +469,7 @@ static void do_ffa_mem_frag_tx(struct arm_smccc_res *res,
427469
}
428470

429471
static void __do_ffa_mem_xfer(const u64 func_id,
430-
struct arm_smccc_res *res,
472+
struct arm_smccc_1_2_regs *res,
431473
struct kvm_cpu_context *ctxt)
432474
{
433475
DECLARE_REG(u32, len, ctxt, 1);
@@ -521,7 +563,7 @@ static void __do_ffa_mem_xfer(const u64 func_id,
521563
__do_ffa_mem_xfer((fid), (res), (ctxt)); \
522564
} while (0);
523565

524-
static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
566+
static void do_ffa_mem_reclaim(struct arm_smccc_1_2_regs *res,
525567
struct kvm_cpu_context *ctxt)
526568
{
527569
DECLARE_REG(u32, handle_lo, ctxt, 1);
@@ -634,7 +676,7 @@ static bool ffa_call_supported(u64 func_id)
634676
return true;
635677
}
636678

637-
static bool do_ffa_features(struct arm_smccc_res *res,
679+
static bool do_ffa_features(struct arm_smccc_1_2_regs *res,
638680
struct kvm_cpu_context *ctxt)
639681
{
640682
DECLARE_REG(u32, id, ctxt, 1);
@@ -666,17 +708,21 @@ static bool do_ffa_features(struct arm_smccc_res *res,
666708
static int hyp_ffa_post_init(void)
667709
{
668710
size_t min_rxtx_sz;
669-
struct arm_smccc_res res;
711+
struct arm_smccc_1_2_regs res;
670712

671-
arm_smccc_1_1_smc(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0, &res);
713+
arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs){
714+
.a0 = FFA_ID_GET,
715+
}, &res);
672716
if (res.a0 != FFA_SUCCESS)
673717
return -EOPNOTSUPP;
674718

675719
if (res.a2 != HOST_FFA_ID)
676720
return -EINVAL;
677721

678-
arm_smccc_1_1_smc(FFA_FEATURES, FFA_FN64_RXTX_MAP,
679-
0, 0, 0, 0, 0, 0, &res);
722+
arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs){
723+
.a0 = FFA_FEATURES,
724+
.a1 = FFA_FN64_RXTX_MAP,
725+
}, &res);
680726
if (res.a0 != FFA_SUCCESS)
681727
return -EOPNOTSUPP;
682728

@@ -700,7 +746,7 @@ static int hyp_ffa_post_init(void)
700746
return 0;
701747
}
702748

703-
static void do_ffa_version(struct arm_smccc_res *res,
749+
static void do_ffa_version(struct arm_smccc_1_2_regs *res,
704750
struct kvm_cpu_context *ctxt)
705751
{
706752
DECLARE_REG(u32, ffa_req_version, ctxt, 1);
@@ -724,9 +770,10 @@ static void do_ffa_version(struct arm_smccc_res *res,
724770
* first if TEE supports it.
725771
*/
726772
if (FFA_MINOR_VERSION(ffa_req_version) < FFA_MINOR_VERSION(hyp_ffa_version)) {
727-
arm_smccc_1_1_smc(FFA_VERSION, ffa_req_version, 0,
728-
0, 0, 0, 0, 0,
729-
res);
773+
arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
774+
.a0 = FFA_VERSION,
775+
.a1 = ffa_req_version,
776+
}, res);
730777
if (res->a0 == FFA_RET_NOT_SUPPORTED)
731778
goto unlock;
732779

@@ -743,7 +790,7 @@ static void do_ffa_version(struct arm_smccc_res *res,
743790
hyp_spin_unlock(&version_lock);
744791
}
745792

746-
static void do_ffa_part_get(struct arm_smccc_res *res,
793+
static void do_ffa_part_get(struct arm_smccc_1_2_regs *res,
747794
struct kvm_cpu_context *ctxt)
748795
{
749796
DECLARE_REG(u32, uuid0, ctxt, 1);
@@ -759,9 +806,14 @@ static void do_ffa_part_get(struct arm_smccc_res *res,
759806
goto out_unlock;
760807
}
761808

762-
arm_smccc_1_1_smc(FFA_PARTITION_INFO_GET, uuid0, uuid1,
763-
uuid2, uuid3, flags, 0, 0,
764-
res);
809+
arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
810+
.a0 = FFA_PARTITION_INFO_GET,
811+
.a1 = uuid0,
812+
.a2 = uuid1,
813+
.a3 = uuid2,
814+
.a4 = uuid3,
815+
.a5 = flags,
816+
}, res);
765817

766818
if (res->a0 != FFA_SUCCESS)
767819
goto out_unlock;
@@ -794,7 +846,7 @@ static void do_ffa_part_get(struct arm_smccc_res *res,
794846

795847
bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
796848
{
797-
struct arm_smccc_res res;
849+
struct arm_smccc_1_2_regs res;
798850

799851
/*
800852
* There's no way we can tell what a non-standard SMC call might
@@ -863,13 +915,16 @@ bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
863915

864916
int hyp_ffa_init(void *pages)
865917
{
866-
struct arm_smccc_res res;
918+
struct arm_smccc_1_2_regs res;
867919
void *tx, *rx;
868920

869921
if (kvm_host_psci_config.smccc_version < ARM_SMCCC_VERSION_1_2)
870922
return 0;
871923

872-
arm_smccc_1_1_smc(FFA_VERSION, FFA_VERSION_1_1, 0, 0, 0, 0, 0, 0, &res);
924+
arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
925+
.a0 = FFA_VERSION,
926+
.a1 = FFA_VERSION_1_1,
927+
}, &res);
873928
if (res.a0 == FFA_RET_NOT_SUPPORTED)
874929
return 0;
875930

0 commit comments

Comments
 (0)