Skip to content

Commit 48b1893

Browse files
committed
Merge tag 'kvm-x86-pmu-6.4' of https://github.com/kvm-x86/linux into HEAD
KVM x86 PMU changes for 6.4: - Disallow virtualizing legacy LBRs if architectural LBRs are available, the two are mutually exclusive in hardware - Disallow writes to immutable feature MSRs (notably PERF_CAPABILITIES) after KVM_RUN, and overhaul the vmx_pmu_caps selftest to better validate PERF_CAPABILITIES - Apply PMU filters to emulated events and add test coverage to the pmu_event_filter selftest - Misc cleanups and fixes
2 parents 807b758 + 457bd7a commit 48b1893

14 files changed

Lines changed: 558 additions & 268 deletions

File tree

arch/x86/include/asm/kvm_host.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -513,6 +513,7 @@ struct kvm_pmc {
513513
#define MSR_ARCH_PERFMON_FIXED_CTR_MAX (MSR_ARCH_PERFMON_FIXED_CTR0 + KVM_PMC_MAX_FIXED - 1)
514514
#define KVM_AMD_PMC_MAX_GENERIC 6
515515
struct kvm_pmu {
516+
u8 version;
516517
unsigned nr_arch_gp_counters;
517518
unsigned nr_arch_fixed_counters;
518519
unsigned available_event_types;
@@ -525,7 +526,6 @@ struct kvm_pmu {
525526
u64 global_ovf_ctrl_mask;
526527
u64 reserved_bits;
527528
u64 raw_event_mask;
528-
u8 version;
529529
struct kvm_pmc gp_counters[KVM_INTEL_PMC_MAX_GENERIC];
530530
struct kvm_pmc fixed_counters[KVM_PMC_MAX_FIXED];
531531
struct irq_work irq_work;

arch/x86/kvm/cpuid.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -414,7 +414,7 @@ static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
414414
* KVM_SET_CPUID{,2} again. To support this legacy behavior, check
415415
* whether the supplied CPUID data is equal to what's already set.
416416
*/
417-
if (vcpu->arch.last_vmentry_cpu != -1) {
417+
if (kvm_vcpu_has_run(vcpu)) {
418418
r = kvm_cpuid_check_equal(vcpu, e2, nent);
419419
if (r)
420420
return r;

arch/x86/kvm/mmu/mmu.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5476,7 +5476,7 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
54765476
* Changing guest CPUID after KVM_RUN is forbidden, see the comment in
54775477
* kvm_arch_vcpu_ioctl().
54785478
*/
5479-
KVM_BUG_ON(vcpu->arch.last_vmentry_cpu != -1, vcpu->kvm);
5479+
KVM_BUG_ON(kvm_vcpu_has_run(vcpu), vcpu->kvm);
54805480
}
54815481

54825482
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)

arch/x86/kvm/pmu.c

Lines changed: 14 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
9393
#undef __KVM_X86_PMU_OP
9494
}
9595

96-
static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
96+
static inline bool pmc_is_globally_enabled(struct kvm_pmc *pmc)
9797
{
9898
return static_call(kvm_x86_pmu_pmc_is_enabled)(pmc);
9999
}
@@ -400,6 +400,12 @@ static bool check_pmu_event_filter(struct kvm_pmc *pmc)
400400
return is_fixed_event_allowed(filter, pmc->idx);
401401
}
402402

403+
static bool pmc_event_is_allowed(struct kvm_pmc *pmc)
404+
{
405+
return pmc_is_globally_enabled(pmc) && pmc_speculative_in_use(pmc) &&
406+
check_pmu_event_filter(pmc);
407+
}
408+
403409
static void reprogram_counter(struct kvm_pmc *pmc)
404410
{
405411
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
@@ -409,10 +415,7 @@ static void reprogram_counter(struct kvm_pmc *pmc)
409415

410416
pmc_pause_counter(pmc);
411417

412-
if (!pmc_speculative_in_use(pmc) || !pmc_is_enabled(pmc))
413-
goto reprogram_complete;
414-
415-
if (!check_pmu_event_filter(pmc))
418+
if (!pmc_event_is_allowed(pmc))
416419
goto reprogram_complete;
417420

418421
if (pmc->counter < pmc->prev_counter)
@@ -589,6 +592,10 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
589592
*/
590593
void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
591594
{
595+
if (KVM_BUG_ON(kvm_vcpu_has_run(vcpu), vcpu->kvm))
596+
return;
597+
598+
bitmap_zero(vcpu_to_pmu(vcpu)->all_valid_pmc_idx, X86_PMC_IDX_MAX);
592599
static_call(kvm_x86_pmu_refresh)(vcpu);
593600
}
594601

@@ -646,7 +653,7 @@ static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
646653
{
647654
pmc->prev_counter = pmc->counter;
648655
pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc);
649-
kvm_pmu_request_counter_reprogam(pmc);
656+
kvm_pmu_request_counter_reprogram(pmc);
650657
}
651658

652659
static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc,
@@ -684,7 +691,7 @@ void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
684691
for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
685692
pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
686693

687-
if (!pmc || !pmc_is_enabled(pmc) || !pmc_speculative_in_use(pmc))
694+
if (!pmc || !pmc_event_is_allowed(pmc))
688695
continue;
689696

690697
/* Ignore checks for edge detect, pin control, invert and CMASK bits */

arch/x86/kvm/pmu.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -195,7 +195,7 @@ static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
195195
KVM_PMC_MAX_FIXED);
196196
}
197197

198-
static inline void kvm_pmu_request_counter_reprogam(struct kvm_pmc *pmc)
198+
static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
199199
{
200200
set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
201201
kvm_make_request(KVM_REQ_PMU, pmc->vcpu);

arch/x86/kvm/svm/pmu.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,7 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
161161
data &= ~pmu->reserved_bits;
162162
if (data != pmc->eventsel) {
163163
pmc->eventsel = data;
164-
kvm_pmu_request_counter_reprogam(pmc);
164+
kvm_pmu_request_counter_reprogram(pmc);
165165
}
166166
return 0;
167167
}

arch/x86/kvm/svm/svm.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4093,7 +4093,7 @@ static bool svm_has_emulated_msr(struct kvm *kvm, u32 index)
40934093
{
40944094
switch (index) {
40954095
case MSR_IA32_MCG_EXT_CTL:
4096-
case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
4096+
case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
40974097
return false;
40984098
case MSR_IA32_SMBASE:
40994099
if (!IS_ENABLED(CONFIG_KVM_SMM))

arch/x86/kvm/vmx/pmu_intel.c

Lines changed: 75 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
5757
pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
5858

5959
__set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use);
60-
kvm_pmu_request_counter_reprogam(pmc);
60+
kvm_pmu_request_counter_reprogram(pmc);
6161
}
6262
}
6363

@@ -76,13 +76,13 @@ static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
7676
static void reprogram_counters(struct kvm_pmu *pmu, u64 diff)
7777
{
7878
int bit;
79-
struct kvm_pmc *pmc;
8079

81-
for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX) {
82-
pmc = intel_pmc_idx_to_pmc(pmu, bit);
83-
if (pmc)
84-
kvm_pmu_request_counter_reprogam(pmc);
85-
}
80+
if (!diff)
81+
return;
82+
83+
for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
84+
set_bit(bit, pmu->reprogram_pmi);
85+
kvm_make_request(KVM_REQ_PMU, pmu_to_vcpu(pmu));
8686
}
8787

8888
static bool intel_hw_event_available(struct kvm_pmc *pmc)
@@ -351,45 +351,47 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
351351
switch (msr) {
352352
case MSR_CORE_PERF_FIXED_CTR_CTRL:
353353
msr_info->data = pmu->fixed_ctr_ctrl;
354-
return 0;
354+
break;
355355
case MSR_CORE_PERF_GLOBAL_STATUS:
356356
msr_info->data = pmu->global_status;
357-
return 0;
357+
break;
358358
case MSR_CORE_PERF_GLOBAL_CTRL:
359359
msr_info->data = pmu->global_ctrl;
360-
return 0;
360+
break;
361361
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
362362
msr_info->data = 0;
363-
return 0;
363+
break;
364364
case MSR_IA32_PEBS_ENABLE:
365365
msr_info->data = pmu->pebs_enable;
366-
return 0;
366+
break;
367367
case MSR_IA32_DS_AREA:
368368
msr_info->data = pmu->ds_area;
369-
return 0;
369+
break;
370370
case MSR_PEBS_DATA_CFG:
371371
msr_info->data = pmu->pebs_data_cfg;
372-
return 0;
372+
break;
373373
default:
374374
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
375375
(pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
376376
u64 val = pmc_read_counter(pmc);
377377
msr_info->data =
378378
val & pmu->counter_bitmask[KVM_PMC_GP];
379-
return 0;
379+
break;
380380
} else if ((pmc = get_fixed_pmc(pmu, msr))) {
381381
u64 val = pmc_read_counter(pmc);
382382
msr_info->data =
383383
val & pmu->counter_bitmask[KVM_PMC_FIXED];
384-
return 0;
384+
break;
385385
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
386386
msr_info->data = pmc->eventsel;
387-
return 0;
388-
} else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, true))
389-
return 0;
387+
break;
388+
} else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, true)) {
389+
break;
390+
}
391+
return 1;
390392
}
391393

392-
return 1;
394+
return 0;
393395
}
394396

395397
static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
@@ -402,94 +404,97 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
402404

403405
switch (msr) {
404406
case MSR_CORE_PERF_FIXED_CTR_CTRL:
405-
if (pmu->fixed_ctr_ctrl == data)
406-
return 0;
407-
if (!(data & pmu->fixed_ctr_ctrl_mask)) {
407+
if (data & pmu->fixed_ctr_ctrl_mask)
408+
return 1;
409+
410+
if (pmu->fixed_ctr_ctrl != data)
408411
reprogram_fixed_counters(pmu, data);
409-
return 0;
410-
}
411412
break;
412413
case MSR_CORE_PERF_GLOBAL_STATUS:
413-
if (msr_info->host_initiated) {
414-
pmu->global_status = data;
415-
return 0;
416-
}
417-
break; /* RO MSR */
414+
if (!msr_info->host_initiated)
415+
return 1; /* RO MSR */
416+
417+
pmu->global_status = data;
418+
break;
418419
case MSR_CORE_PERF_GLOBAL_CTRL:
419-
if (pmu->global_ctrl == data)
420-
return 0;
421-
if (kvm_valid_perf_global_ctrl(pmu, data)) {
420+
if (!kvm_valid_perf_global_ctrl(pmu, data))
421+
return 1;
422+
423+
if (pmu->global_ctrl != data) {
422424
diff = pmu->global_ctrl ^ data;
423425
pmu->global_ctrl = data;
424426
reprogram_counters(pmu, diff);
425-
return 0;
426427
}
427428
break;
428429
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
429-
if (!(data & pmu->global_ovf_ctrl_mask)) {
430-
if (!msr_info->host_initiated)
431-
pmu->global_status &= ~data;
432-
return 0;
433-
}
430+
if (data & pmu->global_ovf_ctrl_mask)
431+
return 1;
432+
433+
if (!msr_info->host_initiated)
434+
pmu->global_status &= ~data;
434435
break;
435436
case MSR_IA32_PEBS_ENABLE:
436-
if (pmu->pebs_enable == data)
437-
return 0;
438-
if (!(data & pmu->pebs_enable_mask)) {
437+
if (data & pmu->pebs_enable_mask)
438+
return 1;
439+
440+
if (pmu->pebs_enable != data) {
439441
diff = pmu->pebs_enable ^ data;
440442
pmu->pebs_enable = data;
441443
reprogram_counters(pmu, diff);
442-
return 0;
443444
}
444445
break;
445446
case MSR_IA32_DS_AREA:
446447
if (msr_info->host_initiated && data && !guest_cpuid_has(vcpu, X86_FEATURE_DS))
447448
return 1;
448449
if (is_noncanonical_address(data, vcpu))
449450
return 1;
451+
450452
pmu->ds_area = data;
451-
return 0;
453+
break;
452454
case MSR_PEBS_DATA_CFG:
453-
if (pmu->pebs_data_cfg == data)
454-
return 0;
455-
if (!(data & pmu->pebs_data_cfg_mask)) {
456-
pmu->pebs_data_cfg = data;
457-
return 0;
458-
}
455+
if (data & pmu->pebs_data_cfg_mask)
456+
return 1;
457+
458+
pmu->pebs_data_cfg = data;
459459
break;
460460
default:
461461
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
462462
(pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
463463
if ((msr & MSR_PMC_FULL_WIDTH_BIT) &&
464464
(data & ~pmu->counter_bitmask[KVM_PMC_GP]))
465465
return 1;
466+
466467
if (!msr_info->host_initiated &&
467468
!(msr & MSR_PMC_FULL_WIDTH_BIT))
468469
data = (s64)(s32)data;
469470
pmc->counter += data - pmc_read_counter(pmc);
470471
pmc_update_sample_period(pmc);
471-
return 0;
472+
break;
472473
} else if ((pmc = get_fixed_pmc(pmu, msr))) {
473474
pmc->counter += data - pmc_read_counter(pmc);
474475
pmc_update_sample_period(pmc);
475-
return 0;
476+
break;
476477
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
477-
if (data == pmc->eventsel)
478-
return 0;
479478
reserved_bits = pmu->reserved_bits;
480479
if ((pmc->idx == 2) &&
481480
(pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED))
482481
reserved_bits ^= HSW_IN_TX_CHECKPOINTED;
483-
if (!(data & reserved_bits)) {
482+
if (data & reserved_bits)
483+
return 1;
484+
485+
if (data != pmc->eventsel) {
484486
pmc->eventsel = data;
485-
kvm_pmu_request_counter_reprogam(pmc);
486-
return 0;
487+
kvm_pmu_request_counter_reprogram(pmc);
487488
}
488-
} else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false))
489-
return 0;
489+
break;
490+
} else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false)) {
491+
break;
492+
}
493+
/* Not a known PMU MSR. */
494+
return 1;
490495
}
491496

492-
return 1;
497+
return 0;
493498
}
494499

495500
static void setup_fixed_pmc_eventsel(struct kvm_pmu *pmu)
@@ -531,6 +536,16 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
531536
pmu->pebs_enable_mask = ~0ull;
532537
pmu->pebs_data_cfg_mask = ~0ull;
533538

539+
memset(&lbr_desc->records, 0, sizeof(lbr_desc->records));
540+
541+
/*
542+
* Setting passthrough of LBR MSRs is done only in the VM-Entry loop,
543+
* and PMU refresh is disallowed after the vCPU has run, i.e. this code
544+
* should never be reached while KVM is passing through MSRs.
545+
*/
546+
if (KVM_BUG_ON(lbr_desc->msr_passthrough, vcpu->kvm))
547+
return;
548+
534549
entry = kvm_find_cpuid_entry(vcpu, 0xa);
535550
if (!entry || !vcpu->kvm->arch.enable_pmu)
536551
return;

0 commit comments

Comments
 (0)