Skip to content

Commit 1c07f35

Browse files
author
Marc Zyngier
committed
Merge branch kvm-arm64/vtcr into kvmarm-master/next
* kvm-arm64/vtcr: : . : VTCR_EL2 conversion to the configuration-driven RESx framework, : fixing a couple of UXN/PXN/XN bugs in the process. : . KVM: arm64: nv: Return correct RES0 bits for FGT registers KVM: arm64: Always populate FGT masks at boot time KVM: arm64: Honor UX/PX attributes for EL2 S1 mappings KVM: arm64: Convert VTCR_EL2 to config-driven sanitisation KVM: arm64: Account for RES1 bits in DECLARE_FEAT_MAP() and co arm64: Convert VTCR_EL2 to sysreg infratructure arm64: Convert ID_AA64MMFR0_EL1.TGRAN{4,16,64}_2 to UnsignedEnum KVM: arm64: Invert KVM_PGTABLE_WALK_HANDLE_FAULT to fix pKVM walkers KVM: arm64: Don't blindly set set PSTATE.PAN on guest exit KVM: arm64: nv: Respect stage-2 write permssion when setting stage-1 AF KVM: arm64: Remove unused vcpu_{clear,set}_wfx_traps() KVM: arm64: Remove unused parameter in synchronize_vcpu_pstate() KVM: arm64: Remove extra argument for __pvkm_host_{share,unshare}_hyp() KVM: arm64: Inject UNDEF for a register trap without accessor KVM: arm64: Copy FGT traps to unprotected pKVM VCPU on VCPU load KVM: arm64: Fix EL2 S1 XN handling for hVHE setups KVM: arm64: gic: Check for vGICv3 when clearing TWI Signed-off-by: Marc Zyngier <maz@kernel.org>
2 parents 31c70b9 + 2eb80a2 commit 1c07f35

19 files changed

Lines changed: 257 additions & 140 deletions

File tree

arch/arm64/include/asm/kvm_arm.h

Lines changed: 12 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -124,47 +124,14 @@
124124
#define TCR_EL2_MASK (TCR_EL2_TG0_MASK | TCR_EL2_SH0_MASK | \
125125
TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK)
126126

127-
/* VTCR_EL2 Registers bits */
128-
#define VTCR_EL2_DS TCR_EL2_DS
129-
#define VTCR_EL2_RES1 (1U << 31)
130-
#define VTCR_EL2_HD (1 << 22)
131-
#define VTCR_EL2_HA (1 << 21)
132-
#define VTCR_EL2_PS_SHIFT TCR_EL2_PS_SHIFT
133-
#define VTCR_EL2_PS_MASK TCR_EL2_PS_MASK
134-
#define VTCR_EL2_TG0_MASK TCR_TG0_MASK
135-
#define VTCR_EL2_TG0_4K TCR_TG0_4K
136-
#define VTCR_EL2_TG0_16K TCR_TG0_16K
137-
#define VTCR_EL2_TG0_64K TCR_TG0_64K
138-
#define VTCR_EL2_SH0_MASK TCR_SH0_MASK
139-
#define VTCR_EL2_SH0_INNER TCR_SH0_INNER
140-
#define VTCR_EL2_ORGN0_MASK TCR_ORGN0_MASK
141-
#define VTCR_EL2_ORGN0_WBWA TCR_ORGN0_WBWA
142-
#define VTCR_EL2_IRGN0_MASK TCR_IRGN0_MASK
143-
#define VTCR_EL2_IRGN0_WBWA TCR_IRGN0_WBWA
144-
#define VTCR_EL2_SL0_SHIFT 6
145-
#define VTCR_EL2_SL0_MASK (3 << VTCR_EL2_SL0_SHIFT)
146-
#define VTCR_EL2_T0SZ_MASK 0x3f
147-
#define VTCR_EL2_VS_SHIFT 19
148-
#define VTCR_EL2_VS_8BIT (0 << VTCR_EL2_VS_SHIFT)
149-
#define VTCR_EL2_VS_16BIT (1 << VTCR_EL2_VS_SHIFT)
150-
151-
#define VTCR_EL2_T0SZ(x) TCR_T0SZ(x)
152-
153127
/*
154-
* We configure the Stage-2 page tables to always restrict the IPA space to be
155-
* 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are
156-
* not known to exist and will break with this configuration.
157-
*
158128
* The VTCR_EL2 is configured per VM and is initialised in kvm_init_stage2_mmu.
159129
*
160130
* Note that when using 4K pages, we concatenate two first level page tables
161131
* together. With 16K pages, we concatenate 16 first level page tables.
162132
*
163133
*/
164134

165-
#define VTCR_EL2_COMMON_BITS (VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \
166-
VTCR_EL2_IRGN0_WBWA | VTCR_EL2_RES1)
167-
168135
/*
169136
* VTCR_EL2:SL0 indicates the entry level for Stage2 translation.
170137
* Interestingly, it depends on the page size.
@@ -196,30 +163,35 @@
196163
*/
197164
#ifdef CONFIG_ARM64_64K_PAGES
198165

199-
#define VTCR_EL2_TGRAN VTCR_EL2_TG0_64K
166+
#define VTCR_EL2_TGRAN 64K
200167
#define VTCR_EL2_TGRAN_SL0_BASE 3UL
201168

202169
#elif defined(CONFIG_ARM64_16K_PAGES)
203170

204-
#define VTCR_EL2_TGRAN VTCR_EL2_TG0_16K
171+
#define VTCR_EL2_TGRAN 16K
205172
#define VTCR_EL2_TGRAN_SL0_BASE 3UL
206173

207174
#else /* 4K */
208175

209-
#define VTCR_EL2_TGRAN VTCR_EL2_TG0_4K
176+
#define VTCR_EL2_TGRAN 4K
210177
#define VTCR_EL2_TGRAN_SL0_BASE 2UL
211178

212179
#endif
213180

214181
#define VTCR_EL2_LVLS_TO_SL0(levels) \
215-
((VTCR_EL2_TGRAN_SL0_BASE - (4 - (levels))) << VTCR_EL2_SL0_SHIFT)
182+
FIELD_PREP(VTCR_EL2_SL0, (VTCR_EL2_TGRAN_SL0_BASE - (4 - (levels))))
216183
#define VTCR_EL2_SL0_TO_LVLS(sl0) \
217184
((sl0) + 4 - VTCR_EL2_TGRAN_SL0_BASE)
218185
#define VTCR_EL2_LVLS(vtcr) \
219-
VTCR_EL2_SL0_TO_LVLS(((vtcr) & VTCR_EL2_SL0_MASK) >> VTCR_EL2_SL0_SHIFT)
186+
VTCR_EL2_SL0_TO_LVLS(FIELD_GET(VTCR_EL2_SL0, (vtcr)))
187+
188+
#define VTCR_EL2_FLAGS (SYS_FIELD_PREP_ENUM(VTCR_EL2, SH0, INNER) | \
189+
SYS_FIELD_PREP_ENUM(VTCR_EL2, ORGN0, WBWA) | \
190+
SYS_FIELD_PREP_ENUM(VTCR_EL2, IRGN0, WBWA) | \
191+
SYS_FIELD_PREP_ENUM(VTCR_EL2, TG0, VTCR_EL2_TGRAN) | \
192+
VTCR_EL2_RES1)
220193

221-
#define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN)
222-
#define VTCR_EL2_IPA(vtcr) (64 - ((vtcr) & VTCR_EL2_T0SZ_MASK))
194+
#define VTCR_EL2_IPA(vtcr) (64 - FIELD_GET(VTCR_EL2_T0SZ, (vtcr)))
223195

224196
/*
225197
* ARM VMSAv8-64 defines an algorithm for finding the translation table

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -120,22 +120,6 @@ static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
120120
return (unsigned long *)&vcpu->arch.hcr_el2;
121121
}
122122

123-
static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
124-
{
125-
vcpu->arch.hcr_el2 &= ~HCR_TWE;
126-
if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
127-
vcpu->kvm->arch.vgic.nassgireq)
128-
vcpu->arch.hcr_el2 &= ~HCR_TWI;
129-
else
130-
vcpu->arch.hcr_el2 |= HCR_TWI;
131-
}
132-
133-
static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
134-
{
135-
vcpu->arch.hcr_el2 |= HCR_TWE;
136-
vcpu->arch.hcr_el2 |= HCR_TWI;
137-
}
138-
139123
static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
140124
{
141125
return vcpu->arch.vsesr_el2;

arch/arm64/include/asm/kvm_host.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -638,6 +638,7 @@ struct fgt_masks {
638638
u64 mask;
639639
u64 nmask;
640640
u64 res0;
641+
u64 res1;
641642
};
642643

643644
extern struct fgt_masks hfgrtr_masks;

arch/arm64/include/asm/kvm_pgtable.h

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,8 @@ typedef u64 kvm_pte_t;
8888
#define KVM_PTE_LEAF_ATTR_HI_SW GENMASK(58, 55)
8989

9090
#define KVM_PTE_LEAF_ATTR_HI_S1_XN BIT(54)
91+
#define KVM_PTE_LEAF_ATTR_HI_S1_UXN BIT(54)
92+
#define KVM_PTE_LEAF_ATTR_HI_S1_PXN BIT(53)
9193

9294
#define KVM_PTE_LEAF_ATTR_HI_S2_XN GENMASK(54, 53)
9395

@@ -293,8 +295,8 @@ typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end,
293295
* children.
294296
* @KVM_PGTABLE_WALK_SHARED: Indicates the page-tables may be shared
295297
* with other software walkers.
296-
* @KVM_PGTABLE_WALK_HANDLE_FAULT: Indicates the page-table walk was
297-
* invoked from a fault handler.
298+
* @KVM_PGTABLE_WALK_IGNORE_EAGAIN: Don't terminate the walk early if
299+
* the walker returns -EAGAIN.
298300
* @KVM_PGTABLE_WALK_SKIP_BBM_TLBI: Visit and update table entries
299301
* without Break-before-make's
300302
* TLB invalidation.
@@ -307,7 +309,7 @@ enum kvm_pgtable_walk_flags {
307309
KVM_PGTABLE_WALK_TABLE_PRE = BIT(1),
308310
KVM_PGTABLE_WALK_TABLE_POST = BIT(2),
309311
KVM_PGTABLE_WALK_SHARED = BIT(3),
310-
KVM_PGTABLE_WALK_HANDLE_FAULT = BIT(4),
312+
KVM_PGTABLE_WALK_IGNORE_EAGAIN = BIT(4),
311313
KVM_PGTABLE_WALK_SKIP_BBM_TLBI = BIT(5),
312314
KVM_PGTABLE_WALK_SKIP_CMO = BIT(6),
313315
};

arch/arm64/include/asm/sysreg.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,8 @@
9191
*/
9292
#define pstate_field(op1, op2) ((op1) << Op1_shift | (op2) << Op2_shift)
9393
#define PSTATE_Imm_shift CRm_shift
94-
#define SET_PSTATE(x, r) __emit_inst(0xd500401f | PSTATE_ ## r | ((!!x) << PSTATE_Imm_shift))
94+
#define ENCODE_PSTATE(x, r) (0xd500401f | PSTATE_ ## r | ((!!x) << PSTATE_Imm_shift))
95+
#define SET_PSTATE(x, r) __emit_inst(ENCODE_PSTATE(x, r))
9596

9697
#define PSTATE_PAN pstate_field(0, 4)
9798
#define PSTATE_UAO pstate_field(0, 3)
@@ -516,7 +517,6 @@
516517
#define SYS_TTBR1_EL2 sys_reg(3, 4, 2, 0, 1)
517518
#define SYS_TCR_EL2 sys_reg(3, 4, 2, 0, 2)
518519
#define SYS_VTTBR_EL2 sys_reg(3, 4, 2, 1, 0)
519-
#define SYS_VTCR_EL2 sys_reg(3, 4, 2, 1, 2)
520520

521521
#define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6)
522522
#define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0)

arch/arm64/kvm/arm.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -569,6 +569,7 @@ static bool kvm_vcpu_should_clear_twi(struct kvm_vcpu *vcpu)
569569
return kvm_wfi_trap_policy == KVM_WFX_NOTRAP;
570570

571571
return single_task_running() &&
572+
vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 &&
572573
(atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
573574
vcpu->kvm->arch.vgic.nassgireq);
574575
}

arch/arm64/kvm/at.c

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -403,6 +403,7 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
403403
struct s1_walk_result *wr, u64 va)
404404
{
405405
u64 va_top, va_bottom, baddr, desc, new_desc, ipa;
406+
struct kvm_s2_trans s2_trans = {};
406407
int level, stride, ret;
407408

408409
level = wi->sl;
@@ -420,8 +421,6 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
420421
ipa = baddr | index;
421422

422423
if (wi->s2) {
423-
struct kvm_s2_trans s2_trans = {};
424-
425424
ret = kvm_walk_nested_s2(vcpu, ipa, &s2_trans);
426425
if (ret) {
427426
fail_s1_walk(wr,
@@ -515,6 +514,11 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
515514
new_desc |= PTE_AF;
516515

517516
if (new_desc != desc) {
517+
if (wi->s2 && !kvm_s2_trans_writable(&s2_trans)) {
518+
fail_s1_walk(wr, ESR_ELx_FSC_PERM_L(level), true);
519+
return -EPERM;
520+
}
521+
518522
ret = kvm_swap_s1_desc(vcpu, ipa, desc, new_desc, wi);
519523
if (ret)
520524
return ret;

arch/arm64/kvm/config.c

Lines changed: 82 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -16,14 +16,14 @@
1616
*/
1717
struct reg_bits_to_feat_map {
1818
union {
19-
u64 bits;
20-
u64 *res0p;
19+
u64 bits;
20+
struct fgt_masks *masks;
2121
};
2222

2323
#define NEVER_FGU BIT(0) /* Can trap, but never UNDEF */
2424
#define CALL_FUNC BIT(1) /* Needs to evaluate tons of crap */
2525
#define FIXED_VALUE BIT(2) /* RAZ/WI or RAO/WI in KVM */
26-
#define RES0_POINTER BIT(3) /* Pointer to RES0 value instead of bits */
26+
#define MASKS_POINTER BIT(3) /* Pointer to fgt_masks struct instead of bits */
2727

2828
unsigned long flags;
2929

@@ -92,8 +92,8 @@ struct reg_feat_map_desc {
9292
#define NEEDS_FEAT_FIXED(m, ...) \
9393
__NEEDS_FEAT_FLAG(m, FIXED_VALUE, bits, __VA_ARGS__, 0)
9494

95-
#define NEEDS_FEAT_RES0(p, ...) \
96-
__NEEDS_FEAT_FLAG(p, RES0_POINTER, res0p, __VA_ARGS__)
95+
#define NEEDS_FEAT_MASKS(p, ...) \
96+
__NEEDS_FEAT_FLAG(p, MASKS_POINTER, masks, __VA_ARGS__)
9797

9898
/*
9999
* Declare the dependency between a set of bits and a set of features,
@@ -109,19 +109,20 @@ struct reg_feat_map_desc {
109109
#define DECLARE_FEAT_MAP(n, r, m, f) \
110110
struct reg_feat_map_desc n = { \
111111
.name = #r, \
112-
.feat_map = NEEDS_FEAT(~r##_RES0, f), \
112+
.feat_map = NEEDS_FEAT(~(r##_RES0 | \
113+
r##_RES1), f), \
113114
.bit_feat_map = m, \
114115
.bit_feat_map_sz = ARRAY_SIZE(m), \
115116
}
116117

117118
/*
118119
* Specialised version of the above for FGT registers that have their
119-
* RES0 masks described as struct fgt_masks.
120+
* RESx masks described as struct fgt_masks.
120121
*/
121122
#define DECLARE_FEAT_MAP_FGT(n, msk, m, f) \
122123
struct reg_feat_map_desc n = { \
123124
.name = #msk, \
124-
.feat_map = NEEDS_FEAT_RES0(&msk.res0, f),\
125+
.feat_map = NEEDS_FEAT_MASKS(&msk, f), \
125126
.bit_feat_map = m, \
126127
.bit_feat_map_sz = ARRAY_SIZE(m), \
127128
}
@@ -140,6 +141,7 @@ struct reg_feat_map_desc {
140141
#define FEAT_AA64EL1 ID_AA64PFR0_EL1, EL1, IMP
141142
#define FEAT_AA64EL2 ID_AA64PFR0_EL1, EL2, IMP
142143
#define FEAT_AA64EL3 ID_AA64PFR0_EL1, EL3, IMP
144+
#define FEAT_SEL2 ID_AA64PFR0_EL1, SEL2, IMP
143145
#define FEAT_AIE ID_AA64MMFR3_EL1, AIE, IMP
144146
#define FEAT_S2POE ID_AA64MMFR3_EL1, S2POE, IMP
145147
#define FEAT_S1POE ID_AA64MMFR3_EL1, S1POE, IMP
@@ -201,6 +203,8 @@ struct reg_feat_map_desc {
201203
#define FEAT_ASID2 ID_AA64MMFR4_EL1, ASID2, IMP
202204
#define FEAT_MEC ID_AA64MMFR3_EL1, MEC, IMP
203205
#define FEAT_HAFT ID_AA64MMFR1_EL1, HAFDBS, HAFT
206+
#define FEAT_HDBSS ID_AA64MMFR1_EL1, HAFDBS, HDBSS
207+
#define FEAT_HPDS2 ID_AA64MMFR1_EL1, HPDS, HPDS2
204208
#define FEAT_BTI ID_AA64PFR1_EL1, BT, IMP
205209
#define FEAT_ExS ID_AA64MMFR0_EL1, EXS, IMP
206210
#define FEAT_IESB ID_AA64MMFR2_EL1, IESB, IMP
@@ -218,6 +222,7 @@ struct reg_feat_map_desc {
218222
#define FEAT_FGT2 ID_AA64MMFR0_EL1, FGT, FGT2
219223
#define FEAT_MTPMU ID_AA64DFR0_EL1, MTPMU, IMP
220224
#define FEAT_HCX ID_AA64MMFR1_EL1, HCX, IMP
225+
#define FEAT_S2PIE ID_AA64MMFR3_EL1, S2PIE, IMP
221226

222227
static bool not_feat_aa64el3(struct kvm *kvm)
223228
{
@@ -361,6 +366,28 @@ static bool feat_pmuv3p9(struct kvm *kvm)
361366
return check_pmu_revision(kvm, V3P9);
362367
}
363368

369+
#define has_feat_s2tgran(k, s) \
370+
((kvm_has_feat_enum(kvm, ID_AA64MMFR0_EL1, TGRAN##s##_2, TGRAN##s) && \
371+
kvm_has_feat(kvm, ID_AA64MMFR0_EL1, TGRAN##s, IMP)) || \
372+
kvm_has_feat(kvm, ID_AA64MMFR0_EL1, TGRAN##s##_2, IMP))
373+
374+
static bool feat_lpa2(struct kvm *kvm)
375+
{
376+
return ((kvm_has_feat(kvm, ID_AA64MMFR0_EL1, TGRAN4, 52_BIT) ||
377+
!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, TGRAN4, IMP)) &&
378+
(kvm_has_feat(kvm, ID_AA64MMFR0_EL1, TGRAN16, 52_BIT) ||
379+
!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, TGRAN16, IMP)) &&
380+
(kvm_has_feat(kvm, ID_AA64MMFR0_EL1, TGRAN4_2, 52_BIT) ||
381+
!has_feat_s2tgran(kvm, 4)) &&
382+
(kvm_has_feat(kvm, ID_AA64MMFR0_EL1, TGRAN16_2, 52_BIT) ||
383+
!has_feat_s2tgran(kvm, 16)));
384+
}
385+
386+
static bool feat_vmid16(struct kvm *kvm)
387+
{
388+
return kvm_has_feat_enum(kvm, ID_AA64MMFR1_EL1, VMIDBits, 16);
389+
}
390+
364391
static bool compute_hcr_rw(struct kvm *kvm, u64 *bits)
365392
{
366393
/* This is purely academic: AArch32 and NV are mutually exclusive */
@@ -1167,22 +1194,60 @@ static const struct reg_bits_to_feat_map mdcr_el2_feat_map[] = {
11671194
static const DECLARE_FEAT_MAP(mdcr_el2_desc, MDCR_EL2,
11681195
mdcr_el2_feat_map, FEAT_AA64EL2);
11691196

1197+
static const struct reg_bits_to_feat_map vtcr_el2_feat_map[] = {
1198+
NEEDS_FEAT(VTCR_EL2_HDBSS, FEAT_HDBSS),
1199+
NEEDS_FEAT(VTCR_EL2_HAFT, FEAT_HAFT),
1200+
NEEDS_FEAT(VTCR_EL2_TL0 |
1201+
VTCR_EL2_TL1 |
1202+
VTCR_EL2_AssuredOnly |
1203+
VTCR_EL2_GCSH,
1204+
FEAT_THE),
1205+
NEEDS_FEAT(VTCR_EL2_D128, FEAT_D128),
1206+
NEEDS_FEAT(VTCR_EL2_S2POE, FEAT_S2POE),
1207+
NEEDS_FEAT(VTCR_EL2_S2PIE, FEAT_S2PIE),
1208+
NEEDS_FEAT(VTCR_EL2_SL2 |
1209+
VTCR_EL2_DS,
1210+
feat_lpa2),
1211+
NEEDS_FEAT(VTCR_EL2_NSA |
1212+
VTCR_EL2_NSW,
1213+
FEAT_SEL2),
1214+
NEEDS_FEAT(VTCR_EL2_HWU62 |
1215+
VTCR_EL2_HWU61 |
1216+
VTCR_EL2_HWU60 |
1217+
VTCR_EL2_HWU59,
1218+
FEAT_HPDS2),
1219+
NEEDS_FEAT(VTCR_EL2_HD, ID_AA64MMFR1_EL1, HAFDBS, DBM),
1220+
NEEDS_FEAT(VTCR_EL2_HA, ID_AA64MMFR1_EL1, HAFDBS, AF),
1221+
NEEDS_FEAT(VTCR_EL2_VS, feat_vmid16),
1222+
NEEDS_FEAT(VTCR_EL2_PS |
1223+
VTCR_EL2_TG0 |
1224+
VTCR_EL2_SH0 |
1225+
VTCR_EL2_ORGN0 |
1226+
VTCR_EL2_IRGN0 |
1227+
VTCR_EL2_SL0 |
1228+
VTCR_EL2_T0SZ,
1229+
FEAT_AA64EL1),
1230+
};
1231+
1232+
static const DECLARE_FEAT_MAP(vtcr_el2_desc, VTCR_EL2,
1233+
vtcr_el2_feat_map, FEAT_AA64EL2);
1234+
11701235
static void __init check_feat_map(const struct reg_bits_to_feat_map *map,
1171-
int map_size, u64 res0, const char *str)
1236+
int map_size, u64 resx, const char *str)
11721237
{
11731238
u64 mask = 0;
11741239

11751240
for (int i = 0; i < map_size; i++)
11761241
mask |= map[i].bits;
11771242

1178-
if (mask != ~res0)
1243+
if (mask != ~resx)
11791244
kvm_err("Undefined %s behaviour, bits %016llx\n",
1180-
str, mask ^ ~res0);
1245+
str, mask ^ ~resx);
11811246
}
11821247

11831248
static u64 reg_feat_map_bits(const struct reg_bits_to_feat_map *map)
11841249
{
1185-
return map->flags & RES0_POINTER ? ~(*map->res0p) : map->bits;
1250+
return map->flags & MASKS_POINTER ? (map->masks->mask | map->masks->nmask) : map->bits;
11861251
}
11871252

11881253
static void __init check_reg_desc(const struct reg_feat_map_desc *r)
@@ -1210,6 +1275,7 @@ void __init check_feature_map(void)
12101275
check_reg_desc(&tcr2_el2_desc);
12111276
check_reg_desc(&sctlr_el1_desc);
12121277
check_reg_desc(&mdcr_el2_desc);
1278+
check_reg_desc(&vtcr_el2_desc);
12131279
}
12141280

12151281
static bool idreg_feat_match(struct kvm *kvm, const struct reg_bits_to_feat_map *map)
@@ -1424,6 +1490,10 @@ void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *r
14241490
*res0 = compute_reg_res0_bits(kvm, &mdcr_el2_desc, 0, 0);
14251491
*res1 = MDCR_EL2_RES1;
14261492
break;
1493+
case VTCR_EL2:
1494+
*res0 = compute_reg_res0_bits(kvm, &vtcr_el2_desc, 0, 0);
1495+
*res1 = VTCR_EL2_RES1;
1496+
break;
14271497
default:
14281498
WARN_ON_ONCE(1);
14291499
*res0 = *res1 = 0;

0 commit comments

Comments
 (0)