Skip to content

Commit 5eb8493

Browse files
khueybonzini
authored andcommitted
KVM: x86/svm: Account for family 17h event renumberings in amd_pmc_perf_hw_id
Zen renumbered some of the performance counters that correspond to the well known events in perf_hw_id. This code in KVM was never updated for that, so guest that attempt to use counters on Zen that correspond to the pre-Zen perf_hw_id values will silently receive the wrong values. This has been observed in the wild with rr[0] when running in Zen 3 guests. rr uses the retired conditional branch counter 00d1 which is incorrectly recognized by KVM as PERF_COUNT_HW_STALLED_CYCLES_BACKEND. [0] https://rr-project.org/ Signed-off-by: Kyle Huey <me@kylehuey.com> Message-Id: <20220503050136.86298-1-khuey@kylehuey.com> Cc: stable@vger.kernel.org [Check guest family, not host. - Paolo] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent 4418723 commit 5eb8493

1 file changed

Lines changed: 25 additions & 3 deletions

File tree

arch/x86/kvm/svm/pmu.c

Lines changed: 25 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,22 @@ static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
4545
[7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
4646
};
4747

48+
/* duplicated from amd_f17h_perfmon_event_map. */
49+
static struct kvm_event_hw_type_mapping amd_f17h_event_mapping[] = {
50+
[0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
51+
[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
52+
[2] = { 0x60, 0xff, PERF_COUNT_HW_CACHE_REFERENCES },
53+
[3] = { 0x64, 0x09, PERF_COUNT_HW_CACHE_MISSES },
54+
[4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
55+
[5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
56+
[6] = { 0x87, 0x02, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
57+
[7] = { 0x87, 0x01, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
58+
};
59+
60+
/* amd_pmc_perf_hw_id depends on these being the same size */
61+
static_assert(ARRAY_SIZE(amd_event_mapping) ==
62+
ARRAY_SIZE(amd_f17h_event_mapping));
63+
4864
static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
4965
{
5066
struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
@@ -140,6 +156,7 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
140156

141157
static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
142158
{
159+
struct kvm_event_hw_type_mapping *event_mapping;
143160
u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
144161
u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
145162
int i;
@@ -148,15 +165,20 @@ static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
148165
if (WARN_ON(pmc_is_fixed(pmc)))
149166
return PERF_COUNT_HW_MAX;
150167

168+
if (guest_cpuid_family(pmc->vcpu) >= 0x17)
169+
event_mapping = amd_f17h_event_mapping;
170+
else
171+
event_mapping = amd_event_mapping;
172+
151173
for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
152-
if (amd_event_mapping[i].eventsel == event_select
153-
&& amd_event_mapping[i].unit_mask == unit_mask)
174+
if (event_mapping[i].eventsel == event_select
175+
&& event_mapping[i].unit_mask == unit_mask)
154176
break;
155177

156178
if (i == ARRAY_SIZE(amd_event_mapping))
157179
return PERF_COUNT_HW_MAX;
158180

159-
return amd_event_mapping[i].event_type;
181+
return event_mapping[i].event_type;
160182
}
161183

162184
/* check if a PMC is enabled by comparing it against global_ctrl bits. Because

0 commit comments

Comments
 (0)