Skip to content

Commit 6866724

Browse files
committed
KVM: arm64: Rip out the vestiges of the 'old' ID register scheme
There's no longer a need for the baggage of the old scheme for handling configurable ID register fields. Rip it all out in favor of the generalized infrastructure. Link: https://lore.kernel.org/r/20230609190054.1542113-12-oliver.upton@linux.dev Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
1 parent 6db7af0 commit 6866724

4 files changed

Lines changed: 10 additions & 117 deletions

File tree

arch/arm64/include/asm/kvm_host.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -241,10 +241,6 @@ struct kvm_arch {
241241

242242
cpumask_var_t supported_cpus;
243243

244-
u8 pfr0_csv2;
245-
u8 pfr0_csv3;
246-
u8 dfr0_pmuver;
247-
248244
/* Hypercall features firmware registers' descriptor */
249245
struct kvm_smccc_features smccc_feat;
250246
struct maple_tree smccc_filter;

arch/arm64/kvm/arm.c

Lines changed: 0 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -102,22 +102,6 @@ static int kvm_arm_default_max_vcpus(void)
102102
return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
103103
}
104104

105-
static void set_default_spectre(struct kvm *kvm)
106-
{
107-
/*
108-
* The default is to expose CSV2 == 1 if the HW isn't affected.
109-
* Although this is a per-CPU feature, we make it global because
110-
* asymmetric systems are just a nuisance.
111-
*
112-
* Userspace can override this as long as it doesn't promise
113-
* the impossible.
114-
*/
115-
if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED)
116-
kvm->arch.pfr0_csv2 = 1;
117-
if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED)
118-
kvm->arch.pfr0_csv3 = 1;
119-
}
120-
121105
/**
122106
* kvm_arch_init_vm - initializes a VM data structure
123107
* @kvm: pointer to the KVM struct
@@ -161,15 +145,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
161145
/* The maximum number of VCPUs is limited by the host's GIC model */
162146
kvm->max_vcpus = kvm_arm_default_max_vcpus();
163147

164-
set_default_spectre(kvm);
165148
kvm_arm_init_hypercalls(kvm);
166149

167-
/*
168-
* Initialise the default PMUver before there is a chance to
169-
* create an actual PMU.
170-
*/
171-
kvm->arch.dfr0_pmuver = kvm_arm_pmu_get_pmuver_limit();
172-
173150
bitmap_zero(kvm->arch.vcpu_features, KVM_VCPU_MAX_FEATURES);
174151

175152
return 0;

arch/arm64/kvm/sys_regs.c

Lines changed: 4 additions & 88 deletions
Original file line numberDiff line numberDiff line change
@@ -1195,14 +1195,6 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
11951195
return true;
11961196
}
11971197

1198-
static u8 vcpu_pmuver(const struct kvm_vcpu *vcpu)
1199-
{
1200-
if (kvm_vcpu_has_pmu(vcpu))
1201-
return vcpu->kvm->arch.dfr0_pmuver;
1202-
1203-
return 0;
1204-
}
1205-
12061198
static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
12071199
s64 new, s64 cur)
12081200
{
@@ -1288,19 +1280,6 @@ static int arm64_check_features(struct kvm_vcpu *vcpu,
12881280
return 0;
12891281
}
12901282

1291-
static u8 perfmon_to_pmuver(u8 perfmon)
1292-
{
1293-
switch (perfmon) {
1294-
case ID_DFR0_EL1_PerfMon_PMUv3:
1295-
return ID_AA64DFR0_EL1_PMUVer_IMP;
1296-
case ID_DFR0_EL1_PerfMon_IMPDEF:
1297-
return ID_AA64DFR0_EL1_PMUVer_IMP_DEF;
1298-
default:
1299-
/* Anything ARMv8.1+ and NI have the same value. For now. */
1300-
return perfmon;
1301-
}
1302-
}
1303-
13041283
static u8 pmuver_to_perfmon(u8 pmuver)
13051284
{
13061285
switch (pmuver) {
@@ -1327,19 +1306,6 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
13271306
val = read_sanitised_ftr_reg(id);
13281307

13291308
switch (id) {
1330-
case SYS_ID_AA64PFR0_EL1:
1331-
if (!vcpu_has_sve(vcpu))
1332-
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE);
1333-
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU);
1334-
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2);
1335-
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
1336-
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3);
1337-
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
1338-
if (kvm_vgic_global_state.type == VGIC_V3) {
1339-
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC);
1340-
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 1);
1341-
}
1342-
break;
13431309
case SYS_ID_AA64PFR1_EL1:
13441310
if (!kvm_has_mte(vcpu->kvm))
13451311
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
@@ -1360,22 +1326,6 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
13601326
if (!cpus_have_final_cap(ARM64_HAS_WFXT))
13611327
val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
13621328
break;
1363-
case SYS_ID_AA64DFR0_EL1:
1364-
/* Limit debug to ARMv8.0 */
1365-
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer);
1366-
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), 6);
1367-
/* Set PMUver to the required version */
1368-
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
1369-
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
1370-
vcpu_pmuver(vcpu));
1371-
/* Hide SPE from guests */
1372-
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer);
1373-
break;
1374-
case SYS_ID_DFR0_EL1:
1375-
val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon);
1376-
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon),
1377-
pmuver_to_perfmon(vcpu_pmuver(vcpu)));
1378-
break;
13791329
case SYS_ID_AA64MMFR2_EL1:
13801330
val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
13811331
break;
@@ -1505,26 +1455,6 @@ static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
15051455
return val;
15061456
}
15071457

1508-
static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
1509-
const struct sys_reg_desc *rd,
1510-
u64 val)
1511-
{
1512-
u8 csv2, csv3;
1513-
int r;
1514-
1515-
csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV2_SHIFT);
1516-
csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV3_SHIFT);
1517-
1518-
r = set_id_reg(vcpu, rd, val);
1519-
if (r)
1520-
return r;
1521-
1522-
vcpu->kvm->arch.pfr0_csv2 = csv2;
1523-
vcpu->kvm->arch.pfr0_csv3 = csv3;
1524-
1525-
return 0;
1526-
}
1527-
15281458
static u64 read_sanitised_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
15291459
const struct sys_reg_desc *rd)
15301460
{
@@ -1553,7 +1483,6 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
15531483
u64 val)
15541484
{
15551485
u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val);
1556-
int r;
15571486

15581487
/*
15591488
* Prior to commit 3d0dba5764b9 ("KVM: arm64: PMU: Move the
@@ -1569,17 +1498,10 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
15691498
* surprising than an ill-guided PMU driver poking at impdef system
15701499
* registers that end in an UNDEF...
15711500
*/
1572-
if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) {
1501+
if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
15731502
val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
1574-
pmuver = 0;
1575-
}
15761503

1577-
r = set_id_reg(vcpu, rd, val);
1578-
if (r)
1579-
return r;
1580-
1581-
vcpu->kvm->arch.dfr0_pmuver = pmuver;
1582-
return 0;
1504+
return set_id_reg(vcpu, rd, val);
15831505
}
15841506

15851507
static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu,
@@ -1600,7 +1522,6 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
16001522
u64 val)
16011523
{
16021524
u8 perfmon = SYS_FIELD_GET(ID_DFR0_EL1, PerfMon, val);
1603-
int r;
16041525

16051526
if (perfmon == ID_DFR0_EL1_PerfMon_IMPDEF) {
16061527
val &= ~ID_DFR0_EL1_PerfMon_MASK;
@@ -1616,12 +1537,7 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
16161537
if (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3)
16171538
return -EINVAL;
16181539

1619-
r = set_id_reg(vcpu, rd, val);
1620-
if (r)
1621-
return r;
1622-
1623-
vcpu->kvm->arch.dfr0_pmuver = perfmon_to_pmuver(perfmon);
1624-
return 0;
1540+
return set_id_reg(vcpu, rd, val);
16251541
}
16261542

16271543
/*
@@ -2076,7 +1992,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
20761992
{ SYS_DESC(SYS_ID_AA64PFR0_EL1),
20771993
.access = access_id_reg,
20781994
.get_user = get_id_reg,
2079-
.set_user = set_id_aa64pfr0_el1,
1995+
.set_user = set_id_reg,
20801996
.reset = read_sanitised_id_aa64pfr0_el1,
20811997
.val = ID_AA64PFR0_EL1_CSV2_MASK | ID_AA64PFR0_EL1_CSV3_MASK, },
20821998
ID_SANITISED(ID_AA64PFR1_EL1),

include/kvm/arm_pmu.h

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -92,8 +92,12 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
9292
/*
9393
* Evaluates as true when emulating PMUv3p5, and false otherwise.
9494
*/
95-
#define kvm_pmu_is_3p5(vcpu) \
96-
(vcpu->kvm->arch.dfr0_pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5)
95+
#define kvm_pmu_is_3p5(vcpu) ({ \
96+
u64 val = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1); \
97+
u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val); \
98+
\
99+
pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5; \
100+
})
97101

98102
u8 kvm_arm_pmu_get_pmuver_limit(void);
99103

0 commit comments

Comments
 (0)