Skip to content

Commit 8bca8c5

Browse files
committed
KVM: VMX: Refactor intel_pmu_{g,}set_msr() to align with other helpers
Invert the flows in intel_pmu_{g,s}et_msr()'s case statements so that they follow the kernel's preferred style of: if (<not valid>) return <error> <commit change> return <success> which is also the style used by every other {g,s}et_msr() helper (except AMD's PMU variant, which doesn't use a switch statement). Modify the "set" paths with costly side effects, i.e. that reprogram counters, to skip only the side effects, i.e. to perform reserved bits checks even if the value is unchanged. None of the reserved bits checks are expensive, so there's no strong justification for skipping them, and guarding only the side effect makes it slightly more obvious what is being skipped and why. No functional change intended (assuming no reserved bit bugs). Link: https://lkml.kernel.org/r/Y%2B6cfen%2FCpO3%2FdLO%40google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent cdd2fbf commit 8bca8c5

1 file changed

Lines changed: 57 additions & 52 deletions

File tree

arch/x86/kvm/vmx/pmu_intel.c

Lines changed: 57 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -351,45 +351,47 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
351351
switch (msr) {
352352
case MSR_CORE_PERF_FIXED_CTR_CTRL:
353353
msr_info->data = pmu->fixed_ctr_ctrl;
354-
return 0;
354+
break;
355355
case MSR_CORE_PERF_GLOBAL_STATUS:
356356
msr_info->data = pmu->global_status;
357-
return 0;
357+
break;
358358
case MSR_CORE_PERF_GLOBAL_CTRL:
359359
msr_info->data = pmu->global_ctrl;
360-
return 0;
360+
break;
361361
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
362362
msr_info->data = 0;
363-
return 0;
363+
break;
364364
case MSR_IA32_PEBS_ENABLE:
365365
msr_info->data = pmu->pebs_enable;
366-
return 0;
366+
break;
367367
case MSR_IA32_DS_AREA:
368368
msr_info->data = pmu->ds_area;
369-
return 0;
369+
break;
370370
case MSR_PEBS_DATA_CFG:
371371
msr_info->data = pmu->pebs_data_cfg;
372-
return 0;
372+
break;
373373
default:
374374
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
375375
(pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
376376
u64 val = pmc_read_counter(pmc);
377377
msr_info->data =
378378
val & pmu->counter_bitmask[KVM_PMC_GP];
379-
return 0;
379+
break;
380380
} else if ((pmc = get_fixed_pmc(pmu, msr))) {
381381
u64 val = pmc_read_counter(pmc);
382382
msr_info->data =
383383
val & pmu->counter_bitmask[KVM_PMC_FIXED];
384-
return 0;
384+
break;
385385
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
386386
msr_info->data = pmc->eventsel;
387-
return 0;
388-
} else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, true))
389-
return 0;
387+
break;
388+
} else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, true)) {
389+
break;
390+
}
391+
return 1;
390392
}
391393

392-
return 1;
394+
return 0;
393395
}
394396

395397
static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
@@ -402,94 +404,97 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
402404

403405
switch (msr) {
404406
case MSR_CORE_PERF_FIXED_CTR_CTRL:
405-
if (pmu->fixed_ctr_ctrl == data)
406-
return 0;
407-
if (!(data & pmu->fixed_ctr_ctrl_mask)) {
407+
if (data & pmu->fixed_ctr_ctrl_mask)
408+
return 1;
409+
410+
if (pmu->fixed_ctr_ctrl != data)
408411
reprogram_fixed_counters(pmu, data);
409-
return 0;
410-
}
411412
break;
412413
case MSR_CORE_PERF_GLOBAL_STATUS:
413-
if (msr_info->host_initiated) {
414-
pmu->global_status = data;
415-
return 0;
416-
}
417-
break; /* RO MSR */
414+
if (!msr_info->host_initiated)
415+
return 1; /* RO MSR */
416+
417+
pmu->global_status = data;
418+
break;
418419
case MSR_CORE_PERF_GLOBAL_CTRL:
419-
if (pmu->global_ctrl == data)
420-
return 0;
421-
if (kvm_valid_perf_global_ctrl(pmu, data)) {
420+
if (!kvm_valid_perf_global_ctrl(pmu, data))
421+
return 1;
422+
423+
if (pmu->global_ctrl != data) {
422424
diff = pmu->global_ctrl ^ data;
423425
pmu->global_ctrl = data;
424426
reprogram_counters(pmu, diff);
425-
return 0;
426427
}
427428
break;
428429
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
429-
if (!(data & pmu->global_ovf_ctrl_mask)) {
430-
if (!msr_info->host_initiated)
431-
pmu->global_status &= ~data;
432-
return 0;
433-
}
430+
if (data & pmu->global_ovf_ctrl_mask)
431+
return 1;
432+
433+
if (!msr_info->host_initiated)
434+
pmu->global_status &= ~data;
434435
break;
435436
case MSR_IA32_PEBS_ENABLE:
436-
if (pmu->pebs_enable == data)
437-
return 0;
438-
if (!(data & pmu->pebs_enable_mask)) {
437+
if (data & pmu->pebs_enable_mask)
438+
return 1;
439+
440+
if (pmu->pebs_enable != data) {
439441
diff = pmu->pebs_enable ^ data;
440442
pmu->pebs_enable = data;
441443
reprogram_counters(pmu, diff);
442-
return 0;
443444
}
444445
break;
445446
case MSR_IA32_DS_AREA:
446447
if (msr_info->host_initiated && data && !guest_cpuid_has(vcpu, X86_FEATURE_DS))
447448
return 1;
448449
if (is_noncanonical_address(data, vcpu))
449450
return 1;
451+
450452
pmu->ds_area = data;
451-
return 0;
453+
break;
452454
case MSR_PEBS_DATA_CFG:
453-
if (pmu->pebs_data_cfg == data)
454-
return 0;
455-
if (!(data & pmu->pebs_data_cfg_mask)) {
456-
pmu->pebs_data_cfg = data;
457-
return 0;
458-
}
455+
if (data & pmu->pebs_data_cfg_mask)
456+
return 1;
457+
458+
pmu->pebs_data_cfg = data;
459459
break;
460460
default:
461461
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
462462
(pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
463463
if ((msr & MSR_PMC_FULL_WIDTH_BIT) &&
464464
(data & ~pmu->counter_bitmask[KVM_PMC_GP]))
465465
return 1;
466+
466467
if (!msr_info->host_initiated &&
467468
!(msr & MSR_PMC_FULL_WIDTH_BIT))
468469
data = (s64)(s32)data;
469470
pmc->counter += data - pmc_read_counter(pmc);
470471
pmc_update_sample_period(pmc);
471-
return 0;
472+
break;
472473
} else if ((pmc = get_fixed_pmc(pmu, msr))) {
473474
pmc->counter += data - pmc_read_counter(pmc);
474475
pmc_update_sample_period(pmc);
475-
return 0;
476+
break;
476477
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
477-
if (data == pmc->eventsel)
478-
return 0;
479478
reserved_bits = pmu->reserved_bits;
480479
if ((pmc->idx == 2) &&
481480
(pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED))
482481
reserved_bits ^= HSW_IN_TX_CHECKPOINTED;
483-
if (!(data & reserved_bits)) {
482+
if (data & reserved_bits)
483+
return 1;
484+
485+
if (data != pmc->eventsel) {
484486
pmc->eventsel = data;
485487
kvm_pmu_request_counter_reprogam(pmc);
486-
return 0;
487488
}
488-
} else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false))
489-
return 0;
489+
break;
490+
} else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false)) {
491+
break;
492+
}
493+
/* Not a known PMU MSR. */
494+
return 1;
490495
}
491496

492-
return 1;
497+
return 0;
493498
}
494499

495500
static void setup_fixed_pmc_eventsel(struct kvm_pmu *pmu)

0 commit comments

Comments
 (0)