Skip to content

Commit d016264

Browse files
author
Marc Zyngier
committed
Merge branch kvm-arm64/nv-6.8-prefix into kvmarm-master/next
* kvm-arm64/nv-6.8-prefix: : . : Nested Virtualization support update, focussing on the : NV2 support (VNCR mapping and such). : . KVM: arm64: nv: Handle virtual EL2 registers in vcpu_read/write_sys_reg() KVM: arm64: nv: Map VNCR-capable registers to a separate page KVM: arm64: nv: Add EL2_REG_VNCR()/EL2_REG_REDIR() sysreg helpers KVM: arm64: Introduce a bad_trap() primitive for unexpected trap handling KVM: arm64: nv: Add include containing the VNCR_EL2 offsets KVM: arm64: nv: Add non-VHE-EL2->EL1 translation helpers KVM: arm64: nv: Drop EL12 register traps that are redirected to VNCR KVM: arm64: nv: Compute NV view of idregs as a one-off KVM: arm64: nv: Hoist vcpu_has_nv() into is_hyp_ctxt() arm64: cpufeatures: Restrict NV support to FEAT_NV2 Signed-off-by: Marc Zyngier <maz@kernel.org>
2 parents 53d5486 + fedc612 commit d016264

9 files changed

Lines changed: 457 additions & 118 deletions

File tree

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
#include <asm/esr.h>
1818
#include <asm/kvm_arm.h>
1919
#include <asm/kvm_hyp.h>
20+
#include <asm/kvm_nested.h>
2021
#include <asm/ptrace.h>
2122
#include <asm/cputype.h>
2223
#include <asm/virt.h>
@@ -54,11 +55,6 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
5455
int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
5556
int kvm_inject_nested_irq(struct kvm_vcpu *vcpu);
5657

57-
static inline bool vcpu_has_feature(const struct kvm_vcpu *vcpu, int feature)
58-
{
59-
return test_bit(feature, vcpu->kvm->arch.vcpu_features);
60-
}
61-
6258
#if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
6359
static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
6460
{
@@ -248,7 +244,7 @@ static inline bool __is_hyp_ctxt(const struct kvm_cpu_context *ctxt)
248244

249245
static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
250246
{
251-
return __is_hyp_ctxt(&vcpu->arch.ctxt);
247+
return vcpu_has_nv(vcpu) && __is_hyp_ctxt(&vcpu->arch.ctxt);
252248
}
253249

254250
/*

arch/arm64/include/asm/kvm_host.h

Lines changed: 92 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
#include <asm/fpsimd.h>
2828
#include <asm/kvm.h>
2929
#include <asm/kvm_asm.h>
30+
#include <asm/vncr_mapping.h>
3031

3132
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
3233

@@ -306,6 +307,7 @@ struct kvm_arch {
306307
* Atomic access to multiple idregs are guarded by kvm_arch.config_lock.
307308
*/
308309
#define IDREG_IDX(id) (((sys_reg_CRm(id) - 1) << 3) | sys_reg_Op2(id))
310+
#define IDX_IDREG(idx) sys_reg(3, 0, 0, ((idx) >> 3) + 1, (idx) & Op2_mask)
309311
#define IDREG(kvm, id) ((kvm)->arch.id_regs[IDREG_IDX(id)])
310312
#define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
311313
u64 id_regs[KVM_ARM_ID_REG_NUM];
@@ -324,33 +326,33 @@ struct kvm_vcpu_fault_info {
324326
u64 disr_el1; /* Deferred [SError] Status Register */
325327
};
326328

329+
/*
330+
* VNCR() just places the VNCR_capable registers in the enum after
331+
* __VNCR_START__, and the value (after correction) to be an 8-byte offset
332+
* from the VNCR base. As we don't require the enum to be otherwise ordered,
333+
* we need the terrible hack below to ensure that we correctly size the
334+
* sys_regs array, no matter what.
335+
*
336+
* The __MAX__ macro has been lifted from Sean Eron Anderson's wonderful
337+
* treasure trove of bit hacks:
338+
* https://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
339+
*/
340+
#define __MAX__(x,y) ((x) ^ (((x) ^ (y)) & -((x) < (y))))
341+
#define VNCR(r) \
342+
__before_##r, \
343+
r = __VNCR_START__ + ((VNCR_ ## r) / 8), \
344+
__after_##r = __MAX__(__before_##r - 1, r)
345+
327346
enum vcpu_sysreg {
328347
__INVALID_SYSREG__, /* 0 is reserved as an invalid value */
329348
MPIDR_EL1, /* MultiProcessor Affinity Register */
330349
CLIDR_EL1, /* Cache Level ID Register */
331350
CSSELR_EL1, /* Cache Size Selection Register */
332-
SCTLR_EL1, /* System Control Register */
333-
ACTLR_EL1, /* Auxiliary Control Register */
334-
CPACR_EL1, /* Coprocessor Access Control */
335-
ZCR_EL1, /* SVE Control */
336-
TTBR0_EL1, /* Translation Table Base Register 0 */
337-
TTBR1_EL1, /* Translation Table Base Register 1 */
338-
TCR_EL1, /* Translation Control Register */
339-
TCR2_EL1, /* Extended Translation Control Register */
340-
ESR_EL1, /* Exception Syndrome Register */
341-
AFSR0_EL1, /* Auxiliary Fault Status Register 0 */
342-
AFSR1_EL1, /* Auxiliary Fault Status Register 1 */
343-
FAR_EL1, /* Fault Address Register */
344-
MAIR_EL1, /* Memory Attribute Indirection Register */
345-
VBAR_EL1, /* Vector Base Address Register */
346-
CONTEXTIDR_EL1, /* Context ID Register */
347351
TPIDR_EL0, /* Thread ID, User R/W */
348352
TPIDRRO_EL0, /* Thread ID, User R/O */
349353
TPIDR_EL1, /* Thread ID, Privileged */
350-
AMAIR_EL1, /* Aux Memory Attribute Indirection Register */
351354
CNTKCTL_EL1, /* Timer Control Register (EL1) */
352355
PAR_EL1, /* Physical Address Register */
353-
MDSCR_EL1, /* Monitor Debug System Control Register */
354356
MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */
355357
OSLSR_EL1, /* OS Lock Status Register */
356358
DISR_EL1, /* Deferred Interrupt Status Register */
@@ -381,48 +383,26 @@ enum vcpu_sysreg {
381383
APGAKEYLO_EL1,
382384
APGAKEYHI_EL1,
383385

384-
ELR_EL1,
385-
SP_EL1,
386-
SPSR_EL1,
387-
388-
CNTVOFF_EL2,
389-
CNTV_CVAL_EL0,
390-
CNTV_CTL_EL0,
391-
CNTP_CVAL_EL0,
392-
CNTP_CTL_EL0,
393-
394386
/* Memory Tagging Extension registers */
395387
RGSR_EL1, /* Random Allocation Tag Seed Register */
396388
GCR_EL1, /* Tag Control Register */
397-
TFSR_EL1, /* Tag Fault Status Register (EL1) */
398389
TFSRE0_EL1, /* Tag Fault Status Register (EL0) */
399390

400-
/* Permission Indirection Extension registers */
401-
PIR_EL1, /* Permission Indirection Register 1 (EL1) */
402-
PIRE0_EL1, /* Permission Indirection Register 0 (EL1) */
403-
404391
/* 32bit specific registers. */
405392
DACR32_EL2, /* Domain Access Control Register */
406393
IFSR32_EL2, /* Instruction Fault Status Register */
407394
FPEXC32_EL2, /* Floating-Point Exception Control Register */
408395
DBGVCR32_EL2, /* Debug Vector Catch Register */
409396

410397
/* EL2 registers */
411-
VPIDR_EL2, /* Virtualization Processor ID Register */
412-
VMPIDR_EL2, /* Virtualization Multiprocessor ID Register */
413398
SCTLR_EL2, /* System Control Register (EL2) */
414399
ACTLR_EL2, /* Auxiliary Control Register (EL2) */
415-
HCR_EL2, /* Hypervisor Configuration Register */
416400
MDCR_EL2, /* Monitor Debug Configuration Register (EL2) */
417401
CPTR_EL2, /* Architectural Feature Trap Register (EL2) */
418-
HSTR_EL2, /* Hypervisor System Trap Register */
419402
HACR_EL2, /* Hypervisor Auxiliary Control Register */
420-
HCRX_EL2, /* Extended Hypervisor Configuration Register */
421403
TTBR0_EL2, /* Translation Table Base Register 0 (EL2) */
422404
TTBR1_EL2, /* Translation Table Base Register 1 (EL2) */
423405
TCR_EL2, /* Translation Control Register (EL2) */
424-
VTTBR_EL2, /* Virtualization Translation Table Base Register */
425-
VTCR_EL2, /* Virtualization Translation Control Register */
426406
SPSR_EL2, /* EL2 saved program status register */
427407
ELR_EL2, /* EL2 exception link register */
428408
AFSR0_EL2, /* Auxiliary Fault Status Register 0 (EL2) */
@@ -435,20 +415,62 @@ enum vcpu_sysreg {
435415
VBAR_EL2, /* Vector Base Address Register (EL2) */
436416
RVBAR_EL2, /* Reset Vector Base Address Register */
437417
CONTEXTIDR_EL2, /* Context ID Register (EL2) */
438-
TPIDR_EL2, /* EL2 Software Thread ID Register */
439418
CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */
440419
SP_EL2, /* EL2 Stack Pointer */
441-
HFGRTR_EL2,
442-
HFGWTR_EL2,
443-
HFGITR_EL2,
444-
HDFGRTR_EL2,
445-
HDFGWTR_EL2,
446-
HAFGRTR_EL2,
447420
CNTHP_CTL_EL2,
448421
CNTHP_CVAL_EL2,
449422
CNTHV_CTL_EL2,
450423
CNTHV_CVAL_EL2,
451424

425+
__VNCR_START__, /* Any VNCR-capable reg goes after this point */
426+
427+
VNCR(SCTLR_EL1),/* System Control Register */
428+
VNCR(ACTLR_EL1),/* Auxiliary Control Register */
429+
VNCR(CPACR_EL1),/* Coprocessor Access Control */
430+
VNCR(ZCR_EL1), /* SVE Control */
431+
VNCR(TTBR0_EL1),/* Translation Table Base Register 0 */
432+
VNCR(TTBR1_EL1),/* Translation Table Base Register 1 */
433+
VNCR(TCR_EL1), /* Translation Control Register */
434+
VNCR(TCR2_EL1), /* Extended Translation Control Register */
435+
VNCR(ESR_EL1), /* Exception Syndrome Register */
436+
VNCR(AFSR0_EL1),/* Auxiliary Fault Status Register 0 */
437+
VNCR(AFSR1_EL1),/* Auxiliary Fault Status Register 1 */
438+
VNCR(FAR_EL1), /* Fault Address Register */
439+
VNCR(MAIR_EL1), /* Memory Attribute Indirection Register */
440+
VNCR(VBAR_EL1), /* Vector Base Address Register */
441+
VNCR(CONTEXTIDR_EL1), /* Context ID Register */
442+
VNCR(AMAIR_EL1),/* Aux Memory Attribute Indirection Register */
443+
VNCR(MDSCR_EL1),/* Monitor Debug System Control Register */
444+
VNCR(ELR_EL1),
445+
VNCR(SP_EL1),
446+
VNCR(SPSR_EL1),
447+
VNCR(TFSR_EL1), /* Tag Fault Status Register (EL1) */
448+
VNCR(VPIDR_EL2),/* Virtualization Processor ID Register */
449+
VNCR(VMPIDR_EL2),/* Virtualization Multiprocessor ID Register */
450+
VNCR(HCR_EL2), /* Hypervisor Configuration Register */
451+
VNCR(HSTR_EL2), /* Hypervisor System Trap Register */
452+
VNCR(VTTBR_EL2),/* Virtualization Translation Table Base Register */
453+
VNCR(VTCR_EL2), /* Virtualization Translation Control Register */
454+
VNCR(TPIDR_EL2),/* EL2 Software Thread ID Register */
455+
VNCR(HCRX_EL2), /* Extended Hypervisor Configuration Register */
456+
457+
/* Permission Indirection Extension registers */
458+
VNCR(PIR_EL1), /* Permission Indirection Register 1 (EL1) */
459+
VNCR(PIRE0_EL1), /* Permission Indirection Register 0 (EL1) */
460+
461+
VNCR(HFGRTR_EL2),
462+
VNCR(HFGWTR_EL2),
463+
VNCR(HFGITR_EL2),
464+
VNCR(HDFGRTR_EL2),
465+
VNCR(HDFGWTR_EL2),
466+
VNCR(HAFGRTR_EL2),
467+
468+
VNCR(CNTVOFF_EL2),
469+
VNCR(CNTV_CVAL_EL0),
470+
VNCR(CNTV_CTL_EL0),
471+
VNCR(CNTP_CVAL_EL0),
472+
VNCR(CNTP_CTL_EL0),
473+
452474
NR_SYS_REGS /* Nothing after this line! */
453475
};
454476

@@ -465,6 +487,9 @@ struct kvm_cpu_context {
465487
u64 sys_regs[NR_SYS_REGS];
466488

467489
struct kvm_vcpu *__hyp_running_vcpu;
490+
491+
/* This pointer has to be 4kB aligned. */
492+
u64 *vncr_array;
468493
};
469494

470495
struct kvm_host_data {
@@ -827,8 +852,19 @@ struct kvm_vcpu_arch {
827852
* accessed by a running VCPU. For example, for userspace access or
828853
* for system registers that are never context switched, but only
829854
* emulated.
855+
*
856+
* Don't bother with VNCR-based accesses in the nVHE code, it has no
857+
* business dealing with NV.
830858
*/
831-
#define __ctxt_sys_reg(c,r) (&(c)->sys_regs[(r)])
859+
static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
860+
{
861+
#if !defined (__KVM_NVHE_HYPERVISOR__)
862+
if (unlikely(cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) &&
863+
r >= __VNCR_START__ && ctxt->vncr_array))
864+
return &ctxt->vncr_array[r - __VNCR_START__];
865+
#endif
866+
return (u64 *)&ctxt->sys_regs[r];
867+
}
832868

833869
#define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
834870

@@ -872,6 +908,7 @@ static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
872908
case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break;
873909
case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
874910
case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break;
911+
case SPSR_EL1: *val = read_sysreg_s(SYS_SPSR_EL12); break;
875912
case PAR_EL1: *val = read_sysreg_par(); break;
876913
case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break;
877914
case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break;
@@ -916,6 +953,7 @@ static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
916953
case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break;
917954
case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break;
918955
case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break;
956+
case SPSR_EL1: write_sysreg_s(val, SYS_SPSR_EL12); break;
919957
case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break;
920958
case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break;
921959
case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
@@ -1178,6 +1216,13 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
11781216
#define kvm_vm_has_ran_once(kvm) \
11791217
(test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &(kvm)->arch.flags))
11801218

1219+
static inline bool __vcpu_has_feature(const struct kvm_arch *ka, int feature)
1220+
{
1221+
return test_bit(feature, ka->vcpu_features);
1222+
}
1223+
1224+
#define vcpu_has_feature(v, f) __vcpu_has_feature(&(v)->kvm->arch, (f))
1225+
11811226
int kvm_trng_call(struct kvm_vcpu *vcpu);
11821227
#ifdef CONFIG_KVM
11831228
extern phys_addr_t hyp_mem_base;

arch/arm64/include/asm/kvm_nested.h

Lines changed: 50 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,9 @@
22
#ifndef __ARM64_KVM_NESTED_H
33
#define __ARM64_KVM_NESTED_H
44

5-
#include <asm/kvm_emulate.h>
5+
#include <linux/bitfield.h>
66
#include <linux/kvm_host.h>
7+
#include <asm/kvm_emulate.h>
78

89
static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu)
910
{
@@ -12,12 +13,55 @@ static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu)
1213
vcpu_has_feature(vcpu, KVM_ARM_VCPU_HAS_EL2));
1314
}
1415

15-
extern bool __check_nv_sr_forward(struct kvm_vcpu *vcpu);
16+
/* Translation helpers from non-VHE EL2 to EL1 */
17+
static inline u64 tcr_el2_ps_to_tcr_el1_ips(u64 tcr_el2)
18+
{
19+
return (u64)FIELD_GET(TCR_EL2_PS_MASK, tcr_el2) << TCR_IPS_SHIFT;
20+
}
21+
22+
static inline u64 translate_tcr_el2_to_tcr_el1(u64 tcr)
23+
{
24+
return TCR_EPD1_MASK | /* disable TTBR1_EL1 */
25+
((tcr & TCR_EL2_TBI) ? TCR_TBI0 : 0) |
26+
tcr_el2_ps_to_tcr_el1_ips(tcr) |
27+
(tcr & TCR_EL2_TG0_MASK) |
28+
(tcr & TCR_EL2_ORGN0_MASK) |
29+
(tcr & TCR_EL2_IRGN0_MASK) |
30+
(tcr & TCR_EL2_T0SZ_MASK);
31+
}
32+
33+
static inline u64 translate_cptr_el2_to_cpacr_el1(u64 cptr_el2)
34+
{
35+
u64 cpacr_el1 = 0;
36+
37+
if (cptr_el2 & CPTR_EL2_TTA)
38+
cpacr_el1 |= CPACR_ELx_TTA;
39+
if (!(cptr_el2 & CPTR_EL2_TFP))
40+
cpacr_el1 |= CPACR_ELx_FPEN;
41+
if (!(cptr_el2 & CPTR_EL2_TZ))
42+
cpacr_el1 |= CPACR_ELx_ZEN;
1643

17-
struct sys_reg_params;
18-
struct sys_reg_desc;
44+
return cpacr_el1;
45+
}
46+
47+
static inline u64 translate_sctlr_el2_to_sctlr_el1(u64 val)
48+
{
49+
/* Only preserve the minimal set of bits we support */
50+
val &= (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | SCTLR_ELx_SA |
51+
SCTLR_ELx_I | SCTLR_ELx_IESB | SCTLR_ELx_WXN | SCTLR_ELx_EE);
52+
val |= SCTLR_EL1_RES1;
53+
54+
return val;
55+
}
56+
57+
static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0)
58+
{
59+
/* Clear the ASID field */
60+
return ttbr0 & ~GENMASK_ULL(63, 48);
61+
}
62+
63+
extern bool __check_nv_sr_forward(struct kvm_vcpu *vcpu);
1964

20-
void access_nested_id_reg(struct kvm_vcpu *v, struct sys_reg_params *p,
21-
const struct sys_reg_desc *r);
65+
int kvm_init_nv_sysregs(struct kvm *kvm);
2266

2367
#endif /* __ARM64_KVM_NESTED_H */

0 commit comments

Comments
 (0)