Skip to content

Commit d6e32fc

Browse files
ouptonjannau
authored andcommitted
KVM: arm64: Use a cpucap to determine if system supports FEAT_PMUv3
KVM is about to learn some new tricks to virtualize PMUv3 on IMPDEF hardware. As part of that, we now need to differentiate host support from guest support for PMUv3. Add a cpucap to determine if an architectural PMUv3 is present to guard host usage of PMUv3 controls. Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
1 parent 4e88836 commit d6e32fc

6 files changed

Lines changed: 33 additions & 8 deletions

File tree

arch/arm64/include/asm/cpufeature.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -838,6 +838,11 @@ static inline bool system_supports_poe(void)
838838
alternative_has_cap_unlikely(ARM64_HAS_S1POE);
839839
}
840840

841+
static inline bool system_supports_pmuv3(void)
842+
{
843+
return cpus_have_final_cap(ARM64_HAS_PMUV3);
844+
}
845+
841846
int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
842847
bool try_emulate_mrs(struct pt_regs *regs, u32 isn);
843848

arch/arm64/kernel/cpufeature.c

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1866,6 +1866,19 @@ static bool has_lpa2(const struct arm64_cpu_capabilities *entry, int scope)
18661866
}
18671867
#endif
18681868

1869+
static bool has_pmuv3(const struct arm64_cpu_capabilities *entry, int scope)
1870+
{
1871+
u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1872+
unsigned int pmuver;
1873+
1874+
pmuver = cpuid_feature_extract_unsigned_field(dfr0,
1875+
ID_AA64DFR0_EL1_PMUVer_SHIFT);
1876+
if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
1877+
return false;
1878+
1879+
return pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP;
1880+
}
1881+
18691882
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
18701883
#define KPTI_NG_TEMP_VA (-(1UL << PMD_SHIFT))
18711884

@@ -2890,6 +2903,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
28902903
ARM64_CPUID_FIELDS(ID_AA64MMFR3_EL1, S1POE, IMP)
28912904
},
28922905
#endif
2906+
{
2907+
.desc = "PMUv3",
2908+
.capability = ARM64_HAS_PMUV3,
2909+
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2910+
.matches = has_pmuv3,
2911+
},
28932912
{},
28942913
};
28952914

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -215,7 +215,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
215215
* counter, which could make a PMXEVCNTR_EL0 access UNDEF at
216216
* EL1 instead of being trapped to EL2.
217217
*/
218-
if (kvm_arm_support_pmu_v3()) {
218+
if (system_supports_pmuv3()) {
219219
struct kvm_cpu_context *hctxt;
220220

221221
write_sysreg(0, pmselr_el0);
@@ -251,7 +251,7 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
251251
write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2);
252252

253253
write_sysreg(0, hstr_el2);
254-
if (kvm_arm_support_pmu_v3()) {
254+
if (system_supports_pmuv3()) {
255255
struct kvm_cpu_context *hctxt;
256256

257257
hctxt = host_data_ptr(host_ctxt);

arch/arm64/kvm/pmu.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr)
4141
{
4242
struct kvm_pmu_events *pmu = kvm_get_pmu_events();
4343

44-
if (!kvm_arm_support_pmu_v3() || !kvm_pmu_switch_needed(attr))
44+
if (!system_supports_pmuv3() || !kvm_pmu_switch_needed(attr))
4545
return;
4646

4747
if (!attr->exclude_host)
@@ -57,7 +57,7 @@ void kvm_clr_pmu_events(u64 clr)
5757
{
5858
struct kvm_pmu_events *pmu = kvm_get_pmu_events();
5959

60-
if (!kvm_arm_support_pmu_v3())
60+
if (!system_supports_pmuv3())
6161
return;
6262

6363
pmu->events_host &= ~clr;
@@ -133,7 +133,7 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
133133
struct kvm_pmu_events *pmu;
134134
u64 events_guest, events_host;
135135

136-
if (!kvm_arm_support_pmu_v3() || !has_vhe())
136+
if (!system_supports_pmuv3() || !has_vhe())
137137
return;
138138

139139
preempt_disable();
@@ -154,7 +154,7 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
154154
struct kvm_pmu_events *pmu;
155155
u64 events_guest, events_host;
156156

157-
if (!kvm_arm_support_pmu_v3() || !has_vhe())
157+
if (!system_supports_pmuv3() || !has_vhe())
158158
return;
159159

160160
pmu = kvm_get_pmu_events();
@@ -180,7 +180,7 @@ bool kvm_set_pmuserenr(u64 val)
180180
struct kvm_cpu_context *hctxt;
181181
struct kvm_vcpu *vcpu;
182182

183-
if (!kvm_arm_support_pmu_v3() || !has_vhe())
183+
if (!system_supports_pmuv3() || !has_vhe())
184184
return false;
185185

186186
vcpu = kvm_get_running_vcpu();

arch/arm64/tools/cpucaps

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@ HAS_LSE_ATOMICS
4444
HAS_MOPS
4545
HAS_NESTED_VIRT
4646
HAS_PAN
47+
HAS_PMUV3
4748
HAS_S1PIE
4849
HAS_S1POE
4950
HAS_RAS_EXTN

include/kvm/arm_pmu.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ void kvm_vcpu_pmu_resync_el0(void);
8686
*/
8787
#define kvm_pmu_update_vcpu_events(vcpu) \
8888
do { \
89-
if (!has_vhe() && kvm_arm_support_pmu_v3()) \
89+
if (!has_vhe() && system_supports_pmuv3()) \
9090
vcpu->arch.pmu.events = *kvm_get_pmu_events(); \
9191
} while (0)
9292

0 commit comments

Comments
 (0)