@@ -57,7 +57,7 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
5757 pmc = get_fixed_pmc (pmu , MSR_CORE_PERF_FIXED_CTR0 + i );
5858
5959 __set_bit (INTEL_PMC_IDX_FIXED + i , pmu -> pmc_in_use );
60- kvm_pmu_request_counter_reprogam (pmc );
60+ kvm_pmu_request_counter_reprogram (pmc );
6161 }
6262}
6363
@@ -76,13 +76,13 @@ static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
7676static void reprogram_counters (struct kvm_pmu * pmu , u64 diff )
7777{
7878 int bit ;
79- struct kvm_pmc * pmc ;
8079
81- for_each_set_bit (bit , (unsigned long * )& diff , X86_PMC_IDX_MAX ) {
82- pmc = intel_pmc_idx_to_pmc (pmu , bit );
83- if (pmc )
84- kvm_pmu_request_counter_reprogam (pmc );
85- }
80+ if (!diff )
81+ return ;
82+
83+ for_each_set_bit (bit , (unsigned long * )& diff , X86_PMC_IDX_MAX )
84+ set_bit (bit , pmu -> reprogram_pmi );
85+ kvm_make_request (KVM_REQ_PMU , pmu_to_vcpu (pmu ));
8686}
8787
8888static bool intel_hw_event_available (struct kvm_pmc * pmc )
@@ -351,45 +351,47 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
351351 switch (msr ) {
352352 case MSR_CORE_PERF_FIXED_CTR_CTRL :
353353 msr_info -> data = pmu -> fixed_ctr_ctrl ;
354- return 0 ;
354+ break ;
355355 case MSR_CORE_PERF_GLOBAL_STATUS :
356356 msr_info -> data = pmu -> global_status ;
357- return 0 ;
357+ break ;
358358 case MSR_CORE_PERF_GLOBAL_CTRL :
359359 msr_info -> data = pmu -> global_ctrl ;
360- return 0 ;
360+ break ;
361361 case MSR_CORE_PERF_GLOBAL_OVF_CTRL :
362362 msr_info -> data = 0 ;
363- return 0 ;
363+ break ;
364364 case MSR_IA32_PEBS_ENABLE :
365365 msr_info -> data = pmu -> pebs_enable ;
366- return 0 ;
366+ break ;
367367 case MSR_IA32_DS_AREA :
368368 msr_info -> data = pmu -> ds_area ;
369- return 0 ;
369+ break ;
370370 case MSR_PEBS_DATA_CFG :
371371 msr_info -> data = pmu -> pebs_data_cfg ;
372- return 0 ;
372+ break ;
373373 default :
374374 if ((pmc = get_gp_pmc (pmu , msr , MSR_IA32_PERFCTR0 )) ||
375375 (pmc = get_gp_pmc (pmu , msr , MSR_IA32_PMC0 ))) {
376376 u64 val = pmc_read_counter (pmc );
377377 msr_info -> data =
378378 val & pmu -> counter_bitmask [KVM_PMC_GP ];
379- return 0 ;
379+ break ;
380380 } else if ((pmc = get_fixed_pmc (pmu , msr ))) {
381381 u64 val = pmc_read_counter (pmc );
382382 msr_info -> data =
383383 val & pmu -> counter_bitmask [KVM_PMC_FIXED ];
384- return 0 ;
384+ break ;
385385 } else if ((pmc = get_gp_pmc (pmu , msr , MSR_P6_EVNTSEL0 ))) {
386386 msr_info -> data = pmc -> eventsel ;
387- return 0 ;
388- } else if (intel_pmu_handle_lbr_msrs_access (vcpu , msr_info , true))
389- return 0 ;
387+ break ;
388+ } else if (intel_pmu_handle_lbr_msrs_access (vcpu , msr_info , true)) {
389+ break ;
390+ }
391+ return 1 ;
390392 }
391393
392- return 1 ;
394+ return 0 ;
393395}
394396
395397static int intel_pmu_set_msr (struct kvm_vcpu * vcpu , struct msr_data * msr_info )
@@ -402,94 +404,97 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
402404
403405 switch (msr ) {
404406 case MSR_CORE_PERF_FIXED_CTR_CTRL :
405- if (pmu -> fixed_ctr_ctrl == data )
406- return 0 ;
407- if (!(data & pmu -> fixed_ctr_ctrl_mask )) {
407+ if (data & pmu -> fixed_ctr_ctrl_mask )
408+ return 1 ;
409+
410+ if (pmu -> fixed_ctr_ctrl != data )
408411 reprogram_fixed_counters (pmu , data );
409- return 0 ;
410- }
411412 break ;
412413 case MSR_CORE_PERF_GLOBAL_STATUS :
413- if (msr_info -> host_initiated ) {
414- pmu -> global_status = data ;
415- return 0 ;
416- }
417- break ; /* RO MSR */
414+ if (! msr_info -> host_initiated )
415+ return 1 ; /* RO MSR */
416+
417+ pmu -> global_status = data ;
418+ break ;
418419 case MSR_CORE_PERF_GLOBAL_CTRL :
419- if (pmu -> global_ctrl == data )
420- return 0 ;
421- if (kvm_valid_perf_global_ctrl (pmu , data )) {
420+ if (!kvm_valid_perf_global_ctrl (pmu , data ))
421+ return 1 ;
422+
423+ if (pmu -> global_ctrl != data ) {
422424 diff = pmu -> global_ctrl ^ data ;
423425 pmu -> global_ctrl = data ;
424426 reprogram_counters (pmu , diff );
425- return 0 ;
426427 }
427428 break ;
428429 case MSR_CORE_PERF_GLOBAL_OVF_CTRL :
429- if (!( data & pmu -> global_ovf_ctrl_mask )) {
430- if (! msr_info -> host_initiated )
431- pmu -> global_status &= ~ data ;
432- return 0 ;
433- }
430+ if (data & pmu -> global_ovf_ctrl_mask )
431+ return 1 ;
432+
433+ if (! msr_info -> host_initiated )
434+ pmu -> global_status &= ~ data ;
434435 break ;
435436 case MSR_IA32_PEBS_ENABLE :
436- if (pmu -> pebs_enable == data )
437- return 0 ;
438- if (!(data & pmu -> pebs_enable_mask )) {
437+ if (data & pmu -> pebs_enable_mask )
438+ return 1 ;
439+
440+ if (pmu -> pebs_enable != data ) {
439441 diff = pmu -> pebs_enable ^ data ;
440442 pmu -> pebs_enable = data ;
441443 reprogram_counters (pmu , diff );
442- return 0 ;
443444 }
444445 break ;
445446 case MSR_IA32_DS_AREA :
446447 if (msr_info -> host_initiated && data && !guest_cpuid_has (vcpu , X86_FEATURE_DS ))
447448 return 1 ;
448449 if (is_noncanonical_address (data , vcpu ))
449450 return 1 ;
451+
450452 pmu -> ds_area = data ;
451- return 0 ;
453+ break ;
452454 case MSR_PEBS_DATA_CFG :
453- if (pmu -> pebs_data_cfg == data )
454- return 0 ;
455- if (!(data & pmu -> pebs_data_cfg_mask )) {
456- pmu -> pebs_data_cfg = data ;
457- return 0 ;
458- }
455+ if (data & pmu -> pebs_data_cfg_mask )
456+ return 1 ;
457+
458+ pmu -> pebs_data_cfg = data ;
459459 break ;
460460 default :
461461 if ((pmc = get_gp_pmc (pmu , msr , MSR_IA32_PERFCTR0 )) ||
462462 (pmc = get_gp_pmc (pmu , msr , MSR_IA32_PMC0 ))) {
463463 if ((msr & MSR_PMC_FULL_WIDTH_BIT ) &&
464464 (data & ~pmu -> counter_bitmask [KVM_PMC_GP ]))
465465 return 1 ;
466+
466467 if (!msr_info -> host_initiated &&
467468 !(msr & MSR_PMC_FULL_WIDTH_BIT ))
468469 data = (s64 )(s32 )data ;
469470 pmc -> counter += data - pmc_read_counter (pmc );
470471 pmc_update_sample_period (pmc );
471- return 0 ;
472+ break ;
472473 } else if ((pmc = get_fixed_pmc (pmu , msr ))) {
473474 pmc -> counter += data - pmc_read_counter (pmc );
474475 pmc_update_sample_period (pmc );
475- return 0 ;
476+ break ;
476477 } else if ((pmc = get_gp_pmc (pmu , msr , MSR_P6_EVNTSEL0 ))) {
477- if (data == pmc -> eventsel )
478- return 0 ;
479478 reserved_bits = pmu -> reserved_bits ;
480479 if ((pmc -> idx == 2 ) &&
481480 (pmu -> raw_event_mask & HSW_IN_TX_CHECKPOINTED ))
482481 reserved_bits ^= HSW_IN_TX_CHECKPOINTED ;
483- if (!(data & reserved_bits )) {
482+ if (data & reserved_bits )
483+ return 1 ;
484+
485+ if (data != pmc -> eventsel ) {
484486 pmc -> eventsel = data ;
485- kvm_pmu_request_counter_reprogam (pmc );
486- return 0 ;
487+ kvm_pmu_request_counter_reprogram (pmc );
487488 }
488- } else if (intel_pmu_handle_lbr_msrs_access (vcpu , msr_info , false))
489- return 0 ;
489+ break ;
490+ } else if (intel_pmu_handle_lbr_msrs_access (vcpu , msr_info , false)) {
491+ break ;
492+ }
493+ /* Not a known PMU MSR. */
494+ return 1 ;
490495 }
491496
492- return 1 ;
497+ return 0 ;
493498}
494499
495500static void setup_fixed_pmc_eventsel (struct kvm_pmu * pmu )
@@ -531,6 +536,16 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
531536 pmu -> pebs_enable_mask = ~0ull ;
532537 pmu -> pebs_data_cfg_mask = ~0ull ;
533538
539+ memset (& lbr_desc -> records , 0 , sizeof (lbr_desc -> records ));
540+
541+ /*
542+ * Setting passthrough of LBR MSRs is done only in the VM-Entry loop,
543+ * and PMU refresh is disallowed after the vCPU has run, i.e. this code
544+ * should never be reached while KVM is passing through MSRs.
545+ */
546+ if (KVM_BUG_ON (lbr_desc -> msr_passthrough , vcpu -> kvm ))
547+ return ;
548+
534549 entry = kvm_find_cpuid_entry (vcpu , 0xa );
535550 if (!entry || !vcpu -> kvm -> arch .enable_pmu )
536551 return ;
0 commit comments