@@ -348,14 +348,14 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
348348
349349 switch (msr ) {
350350 case MSR_CORE_PERF_FIXED_CTR_CTRL :
351- if (data & pmu -> fixed_ctr_ctrl_mask )
351+ if (data & pmu -> fixed_ctr_ctrl_rsvd )
352352 return 1 ;
353353
354354 if (pmu -> fixed_ctr_ctrl != data )
355355 reprogram_fixed_counters (pmu , data );
356356 break ;
357357 case MSR_IA32_PEBS_ENABLE :
358- if (data & pmu -> pebs_enable_mask )
358+ if (data & pmu -> pebs_enable_rsvd )
359359 return 1 ;
360360
361361 if (pmu -> pebs_enable != data ) {
@@ -371,7 +371,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
371371 pmu -> ds_area = data ;
372372 break ;
373373 case MSR_PEBS_DATA_CFG :
374- if (data & pmu -> pebs_data_cfg_mask )
374+ if (data & pmu -> pebs_data_cfg_rsvd )
375375 return 1 ;
376376
377377 pmu -> pebs_data_cfg = data ;
@@ -436,8 +436,8 @@ static __always_inline u64 intel_get_fixed_pmc_eventsel(unsigned int index)
436436 };
437437 u64 eventsel ;
438438
439- BUILD_BUG_ON (ARRAY_SIZE (fixed_pmc_perf_ids ) != KVM_PMC_MAX_FIXED );
440- BUILD_BUG_ON (index >= KVM_PMC_MAX_FIXED );
439+ BUILD_BUG_ON (ARRAY_SIZE (fixed_pmc_perf_ids ) != KVM_MAX_NR_INTEL_FIXED_COUTNERS );
440+ BUILD_BUG_ON (index >= KVM_MAX_NR_INTEL_FIXED_COUTNERS );
441441
442442 /*
443443 * Yell if perf reports support for a fixed counter but perf doesn't
@@ -448,6 +448,14 @@ static __always_inline u64 intel_get_fixed_pmc_eventsel(unsigned int index)
448448 return eventsel ;
449449}
450450
451+ static void intel_pmu_enable_fixed_counter_bits (struct kvm_pmu * pmu , u64 bits )
452+ {
453+ int i ;
454+
455+ for (i = 0 ; i < pmu -> nr_arch_fixed_counters ; i ++ )
456+ pmu -> fixed_ctr_ctrl_rsvd &= ~intel_fixed_bits_by_idx (i , bits );
457+ }
458+
451459static void intel_pmu_refresh (struct kvm_vcpu * vcpu )
452460{
453461 struct kvm_pmu * pmu = vcpu_to_pmu (vcpu );
@@ -456,8 +464,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
456464 union cpuid10_eax eax ;
457465 union cpuid10_edx edx ;
458466 u64 perf_capabilities ;
459- u64 counter_mask ;
460- int i ;
467+ u64 counter_rsvd ;
461468
462469 memset (& lbr_desc -> records , 0 , sizeof (lbr_desc -> records ));
463470
@@ -501,22 +508,24 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
501508 ((u64 )1 << edx .split .bit_width_fixed ) - 1 ;
502509 }
503510
504- for (i = 0 ; i < pmu -> nr_arch_fixed_counters ; i ++ )
505- pmu -> fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4 ));
506- counter_mask = ~(((1ull << pmu -> nr_arch_gp_counters ) - 1 ) |
511+ intel_pmu_enable_fixed_counter_bits (pmu , INTEL_FIXED_0_KERNEL |
512+ INTEL_FIXED_0_USER |
513+ INTEL_FIXED_0_ENABLE_PMI );
514+
515+ counter_rsvd = ~(((1ull << pmu -> nr_arch_gp_counters ) - 1 ) |
507516 (((1ull << pmu -> nr_arch_fixed_counters ) - 1 ) << KVM_FIXED_PMC_BASE_IDX ));
508- pmu -> global_ctrl_mask = counter_mask ;
517+ pmu -> global_ctrl_rsvd = counter_rsvd ;
509518
510519 /*
511520 * GLOBAL_STATUS and GLOBAL_OVF_CONTROL (a.k.a. GLOBAL_STATUS_RESET)
512521 * share reserved bit definitions. The kernel just happens to use
513522 * OVF_CTRL for the names.
514523 */
515- pmu -> global_status_mask = pmu -> global_ctrl_mask
524+ pmu -> global_status_rsvd = pmu -> global_ctrl_rsvd
516525 & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
517526 MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD );
518527 if (vmx_pt_mode_is_host_guest ())
519- pmu -> global_status_mask &=
528+ pmu -> global_status_rsvd &=
520529 ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI ;
521530
522531 entry = kvm_find_cpuid_entry_index (vcpu , 7 , 0 );
@@ -544,15 +553,12 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
544553
545554 if (perf_capabilities & PERF_CAP_PEBS_FORMAT ) {
546555 if (perf_capabilities & PERF_CAP_PEBS_BASELINE ) {
547- pmu -> pebs_enable_mask = counter_mask ;
556+ pmu -> pebs_enable_rsvd = counter_rsvd ;
548557 pmu -> reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE ;
549- for (i = 0 ; i < pmu -> nr_arch_fixed_counters ; i ++ ) {
550- pmu -> fixed_ctr_ctrl_mask &=
551- ~(1ULL << (KVM_FIXED_PMC_BASE_IDX + i * 4 ));
552- }
553- pmu -> pebs_data_cfg_mask = ~0xff00000full ;
558+ pmu -> pebs_data_cfg_rsvd = ~0xff00000full ;
559+ intel_pmu_enable_fixed_counter_bits (pmu , ICL_FIXED_0_ADAPTIVE );
554560 } else {
555- pmu -> pebs_enable_mask =
561+ pmu -> pebs_enable_rsvd =
556562 ~((1ull << pmu -> nr_arch_gp_counters ) - 1 );
557563 }
558564 }
@@ -564,14 +570,14 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
564570 struct kvm_pmu * pmu = vcpu_to_pmu (vcpu );
565571 struct lbr_desc * lbr_desc = vcpu_to_lbr_desc (vcpu );
566572
567- for (i = 0 ; i < KVM_INTEL_PMC_MAX_GENERIC ; i ++ ) {
573+ for (i = 0 ; i < KVM_MAX_NR_INTEL_GP_COUNTERS ; i ++ ) {
568574 pmu -> gp_counters [i ].type = KVM_PMC_GP ;
569575 pmu -> gp_counters [i ].vcpu = vcpu ;
570576 pmu -> gp_counters [i ].idx = i ;
571577 pmu -> gp_counters [i ].current_config = 0 ;
572578 }
573579
574- for (i = 0 ; i < KVM_PMC_MAX_FIXED ; i ++ ) {
580+ for (i = 0 ; i < KVM_MAX_NR_INTEL_FIXED_COUTNERS ; i ++ ) {
575581 pmu -> fixed_counters [i ].type = KVM_PMC_FIXED ;
576582 pmu -> fixed_counters [i ].vcpu = vcpu ;
577583 pmu -> fixed_counters [i ].idx = i + KVM_FIXED_PMC_BASE_IDX ;
@@ -731,6 +737,6 @@ struct kvm_pmu_ops intel_pmu_ops __initdata = {
731737 .deliver_pmi = intel_pmu_deliver_pmi ,
732738 .cleanup = intel_pmu_cleanup ,
733739 .EVENTSEL_EVENT = ARCH_PERFMON_EVENTSEL_EVENT ,
734- .MAX_NR_GP_COUNTERS = KVM_INTEL_PMC_MAX_GENERIC ,
740+ .MAX_NR_GP_COUNTERS = KVM_MAX_NR_INTEL_GP_COUNTERS ,
735741 .MIN_NR_GP_COUNTERS = 1 ,
736742};
0 commit comments