Skip to content

Commit e644896

Browse files
Like Xubonzini
authored andcommitted
KVM: x86/pmu: Fix and isolate TSX-specific performance event logic
HSW_IN_TX* bits are used in generic code which are not supported on AMD. Worse, these bits overlap with AMD EventSelect[11:8] and hence using HSW_IN_TX* bits unconditionally in generic code is resulting in unintentional pmu behavior on AMD. For example, if EventSelect[11:8] is 0x2, pmc_reprogram_counter() wrongly assumes that HSW_IN_TX_CHECKPOINTED is set and thus forces sampling period to be 0. Also per the SDM, both bits 32 and 33 "may only be set if the processor supports HLE or RTM" and for "IN_TXCP (bit 33): this bit may only be set for IA32_PERFEVTSEL2." Opportunistically eliminate code redundancy, because if the HSW_IN_TX* bit is set in pmc->eventsel, it is already set in attr.config. Reported-by: Ravi Bangoria <ravi.bangoria@amd.com> Reported-by: Jim Mattson <jmattson@google.com> Fixes: 103af0a ("perf, kvm: Support the in_tx/in_tx_cp modifiers in KVM arch perfmon emulation v5") Co-developed-by: Ravi Bangoria <ravi.bangoria@amd.com> Signed-off-by: Ravi Bangoria <ravi.bangoria@amd.com> Signed-off-by: Like Xu <likexu@tencent.com> Message-Id: <20220309084257.88931-1-likexu@tencent.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent 5959ff4 commit e644896

2 files changed

Lines changed: 15 additions & 13 deletions

File tree

arch/x86/kvm/pmu.c

Lines changed: 5 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -96,8 +96,7 @@ static void kvm_perf_overflow(struct perf_event *perf_event,
9696

9797
static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
9898
u64 config, bool exclude_user,
99-
bool exclude_kernel, bool intr,
100-
bool in_tx, bool in_tx_cp)
99+
bool exclude_kernel, bool intr)
101100
{
102101
struct perf_event *event;
103102
struct perf_event_attr attr = {
@@ -116,16 +115,14 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
116115

117116
attr.sample_period = get_sample_period(pmc, pmc->counter);
118117

119-
if (in_tx)
120-
attr.config |= HSW_IN_TX;
121-
if (in_tx_cp) {
118+
if ((attr.config & HSW_IN_TX_CHECKPOINTED) &&
119+
guest_cpuid_is_intel(pmc->vcpu)) {
122120
/*
123121
* HSW_IN_TX_CHECKPOINTED is not supported with nonzero
124122
* period. Just clear the sample period so at least
125123
* allocating the counter doesn't fail.
126124
*/
127125
attr.sample_period = 0;
128-
attr.config |= HSW_IN_TX_CHECKPOINTED;
129126
}
130127

131128
event = perf_event_create_kernel_counter(&attr, -1, current,
@@ -233,9 +230,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
233230
pmc_reprogram_counter(pmc, type, config,
234231
!(eventsel & ARCH_PERFMON_EVENTSEL_USR),
235232
!(eventsel & ARCH_PERFMON_EVENTSEL_OS),
236-
eventsel & ARCH_PERFMON_EVENTSEL_INT,
237-
(eventsel & HSW_IN_TX),
238-
(eventsel & HSW_IN_TX_CHECKPOINTED));
233+
eventsel & ARCH_PERFMON_EVENTSEL_INT);
239234
}
240235
EXPORT_SYMBOL_GPL(reprogram_gp_counter);
241236

@@ -271,7 +266,7 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
271266
kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc),
272267
!(en_field & 0x2), /* exclude user */
273268
!(en_field & 0x1), /* exclude kernel */
274-
pmi, false, false);
269+
pmi);
275270
}
276271
EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
277272

arch/x86/kvm/vmx/pmu_intel.c

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -389,6 +389,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
389389
struct kvm_pmc *pmc;
390390
u32 msr = msr_info->index;
391391
u64 data = msr_info->data;
392+
u64 reserved_bits;
392393

393394
switch (msr) {
394395
case MSR_CORE_PERF_FIXED_CTR_CTRL:
@@ -443,7 +444,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
443444
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
444445
if (data == pmc->eventsel)
445446
return 0;
446-
if (!(data & pmu->reserved_bits)) {
447+
reserved_bits = pmu->reserved_bits;
448+
if ((pmc->idx == 2) &&
449+
(pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED))
450+
reserved_bits ^= HSW_IN_TX_CHECKPOINTED;
451+
if (!(data & reserved_bits)) {
447452
reprogram_gp_counter(pmc, data);
448453
return 0;
449454
}
@@ -534,8 +539,10 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
534539
entry = kvm_find_cpuid_entry(vcpu, 7, 0);
535540
if (entry &&
536541
(boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
537-
(entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
538-
pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
542+
(entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) {
543+
pmu->reserved_bits ^= HSW_IN_TX;
544+
pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
545+
}
539546

540547
bitmap_set(pmu->all_valid_pmc_idx,
541548
0, pmu->nr_arch_gp_counters);

0 commit comments

Comments
 (0)