Skip to content

Commit 75c76ab

Browse files
Marc Zyngieroupton
authored andcommitted
KVM: arm64: Rework CPTR_EL2 programming for HVHE configuration
Just like we repainted the early arm64 code, we need to update the CPTR_EL2 accesses that are taking place in the nVHE code when hVHE is used, making them look as if they were CPACR_EL1 accesses. Just like the VHE code. Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20230609162200.2024064-14-maz@kernel.org Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
1 parent 6537565 commit 75c76ab

10 files changed

Lines changed: 77 additions & 28 deletions

File tree

arch/arm64/include/asm/kvm_arm.h

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -285,7 +285,6 @@
285285
#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)
286286
#define CPTR_EL2_TZ (1 << 8)
287287
#define CPTR_NVHE_EL2_RES1 0x000032ff /* known RES1 bits in CPTR_EL2 (nVHE) */
288-
#define CPTR_EL2_DEFAULT CPTR_NVHE_EL2_RES1
289288
#define CPTR_NVHE_EL2_RES0 (GENMASK(63, 32) | \
290289
GENMASK(29, 21) | \
291290
GENMASK(19, 14) | \
@@ -347,8 +346,7 @@
347346
ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \
348347
ECN(BKPT32), ECN(VECTOR32), ECN(BRK64), ECN(ERET)
349348

350-
#define CPACR_EL1_DEFAULT (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |\
351-
CPACR_EL1_ZEN_EL1EN)
349+
#define CPACR_EL1_TTA (1 << 28)
352350

353351
#define kvm_mode_names \
354352
{ PSR_MODE_EL0t, "EL0t" }, \

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -570,4 +570,35 @@ static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
570570
return test_bit(feature, vcpu->arch.features);
571571
}
572572

573+
static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
574+
{
575+
u64 val;
576+
577+
if (has_vhe()) {
578+
val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
579+
CPACR_EL1_ZEN_EL1EN);
580+
} else if (has_hvhe()) {
581+
val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
582+
} else {
583+
val = CPTR_NVHE_EL2_RES1;
584+
585+
if (vcpu_has_sve(vcpu) &&
586+
(vcpu->arch.fp_state == FP_STATE_GUEST_OWNED))
587+
val |= CPTR_EL2_TZ;
588+
if (cpus_have_final_cap(ARM64_SME))
589+
val &= ~CPTR_EL2_TSM;
590+
}
591+
592+
return val;
593+
}
594+
595+
static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
596+
{
597+
u64 val = kvm_get_reset_cptr_el2(vcpu);
598+
599+
if (has_vhe() || has_hvhe())
600+
write_sysreg(val, cpacr_el1);
601+
else
602+
write_sysreg(val, cptr_el2);
603+
}
573604
#endif /* __ARM64_KVM_EMULATE_H__ */

arch/arm64/kvm/arm.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1240,7 +1240,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
12401240
}
12411241

12421242
vcpu_reset_hcr(vcpu);
1243-
vcpu->arch.cptr_el2 = CPTR_EL2_DEFAULT;
1243+
vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
12441244

12451245
/*
12461246
* Handle the "start in power-off" case.

arch/arm64/kvm/fpsimd.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
180180

181181
/*
182182
* If we have VHE then the Hyp code will reset CPACR_EL1 to
183-
* CPACR_EL1_DEFAULT and we need to reenable SME.
183+
* the default value and we need to reenable SME.
184184
*/
185185
if (has_vhe() && system_supports_sme()) {
186186
/* Also restore EL0 state seen on entry */
@@ -210,7 +210,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
210210
/*
211211
* The FPSIMD/SVE state in the CPU has not been touched, and we
212212
* have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
213-
* reset to CPACR_EL1_DEFAULT by the Hyp code, disabling SVE
213+
* reset by kvm_reset_cptr_el2() in the Hyp code, disabling SVE
214214
* for EL0. To avoid spurious traps, restore the trap state
215215
* seen by kvm_arch_vcpu_load_fp():
216216
*/

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -192,7 +192,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
192192
/* Valid trap. Switch the context: */
193193

194194
/* First disable enough traps to allow us to update the registers */
195-
if (has_vhe()) {
195+
if (has_vhe() || has_hvhe()) {
196196
reg = CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN;
197197
if (sve_guest)
198198
reg |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;

arch/arm64/kvm/hyp/nvhe/hyp-main.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -392,7 +392,11 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
392392
handle_host_smc(host_ctxt);
393393
break;
394394
case ESR_ELx_EC_SVE:
395-
sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0);
395+
if (has_hvhe())
396+
sysreg_clear_set(cpacr_el1, 0, (CPACR_EL1_ZEN_EL1EN |
397+
CPACR_EL1_ZEN_EL0EN));
398+
else
399+
sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0);
396400
isb();
397401
sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
398402
break;

arch/arm64/kvm/hyp/nvhe/pkvm.c

Lines changed: 18 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
2727
u64 hcr_set = HCR_RW;
2828
u64 hcr_clear = 0;
2929
u64 cptr_set = 0;
30+
u64 cptr_clear = 0;
3031

3132
/* Protected KVM does not support AArch32 guests. */
3233
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
@@ -57,12 +58,17 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
5758
}
5859

5960
/* Trap SVE */
60-
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids))
61-
cptr_set |= CPTR_EL2_TZ;
61+
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
62+
if (has_hvhe())
63+
cptr_clear |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
64+
else
65+
cptr_set |= CPTR_EL2_TZ;
66+
}
6267

6368
vcpu->arch.hcr_el2 |= hcr_set;
6469
vcpu->arch.hcr_el2 &= ~hcr_clear;
6570
vcpu->arch.cptr_el2 |= cptr_set;
71+
vcpu->arch.cptr_el2 &= ~cptr_clear;
6672
}
6773

6874
/*
@@ -120,8 +126,12 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
120126
mdcr_set |= MDCR_EL2_TTRF;
121127

122128
/* Trap Trace */
123-
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids))
124-
cptr_set |= CPTR_EL2_TTA;
129+
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) {
130+
if (has_hvhe())
131+
cptr_set |= CPACR_EL1_TTA;
132+
else
133+
cptr_set |= CPTR_EL2_TTA;
134+
}
125135

126136
vcpu->arch.mdcr_el2 |= mdcr_set;
127137
vcpu->arch.mdcr_el2 &= ~mdcr_clear;
@@ -176,8 +186,10 @@ static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
176186
/* Clear res0 and set res1 bits to trap potential new features. */
177187
vcpu->arch.hcr_el2 &= ~(HCR_RES0);
178188
vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
179-
vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
180-
vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
189+
if (!has_hvhe()) {
190+
vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
191+
vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
192+
}
181193
}
182194

183195
/*

arch/arm64/kvm/hyp/nvhe/switch.c

Lines changed: 16 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -44,13 +44,24 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
4444
__activate_traps_common(vcpu);
4545

4646
val = vcpu->arch.cptr_el2;
47-
val |= CPTR_EL2_TTA | CPTR_EL2_TAM;
47+
val |= CPTR_EL2_TAM; /* Same bit irrespective of E2H */
48+
val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA;
49+
if (cpus_have_final_cap(ARM64_SME)) {
50+
if (has_hvhe())
51+
val &= ~(CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN);
52+
else
53+
val |= CPTR_EL2_TSM;
54+
}
55+
4856
if (!guest_owns_fp_regs(vcpu)) {
49-
val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
57+
if (has_hvhe())
58+
val &= ~(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
59+
CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN);
60+
else
61+
val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
62+
5063
__activate_traps_fpsimd32(vcpu);
5164
}
52-
if (cpus_have_final_cap(ARM64_SME))
53-
val |= CPTR_EL2_TSM;
5465

5566
write_sysreg(val, cptr_el2);
5667
write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
@@ -73,7 +84,6 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
7384
static void __deactivate_traps(struct kvm_vcpu *vcpu)
7485
{
7586
extern char __kvm_hyp_host_vector[];
76-
u64 cptr;
7787

7888
___deactivate_traps(vcpu);
7989

@@ -98,13 +108,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
98108

99109
write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
100110

101-
cptr = CPTR_EL2_DEFAULT;
102-
if (vcpu_has_sve(vcpu) && (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED))
103-
cptr |= CPTR_EL2_TZ;
104-
if (cpus_have_final_cap(ARM64_SME))
105-
cptr &= ~CPTR_EL2_TSM;
106-
107-
write_sysreg(cptr, cptr_el2);
111+
kvm_reset_cptr_el2(vcpu);
108112
write_sysreg(__kvm_hyp_host_vector, vbar_el2);
109113
}
110114

arch/arm64/kvm/hyp/vhe/switch.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
8484
*/
8585
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
8686

87-
write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
87+
kvm_reset_cptr_el2(vcpu);
8888

8989
if (!arm64_kernel_unmapped_at_el0())
9090
host_vectors = __this_cpu_read(this_cpu_vector);

arch/arm64/kvm/sys_regs.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2180,7 +2180,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
21802180
EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
21812181
EL2_REG(HCR_EL2, access_rw, reset_val, 0),
21822182
EL2_REG(MDCR_EL2, access_rw, reset_val, 0),
2183-
EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_EL2_DEFAULT ),
2183+
EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
21842184
EL2_REG(HSTR_EL2, access_rw, reset_val, 0),
21852185
EL2_REG(HACR_EL2, access_rw, reset_val, 0),
21862186

0 commit comments

Comments
 (0)