@@ -93,11 +93,6 @@ void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
9393#undef __KVM_X86_PMU_OP
9494}
9595
96- static inline bool pmc_is_globally_enabled (struct kvm_pmc * pmc )
97- {
98- return static_call (kvm_x86_pmu_pmc_is_enabled )(pmc );
99- }
100-
10196static void kvm_pmi_trigger_fn (struct irq_work * irq_work )
10297{
10398 struct kvm_pmu * pmu = container_of (irq_work , struct kvm_pmu , irq_work );
@@ -562,6 +557,14 @@ void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
562557
563558bool kvm_pmu_is_valid_msr (struct kvm_vcpu * vcpu , u32 msr )
564559{
560+ switch (msr ) {
561+ case MSR_CORE_PERF_GLOBAL_STATUS :
562+ case MSR_CORE_PERF_GLOBAL_CTRL :
563+ case MSR_CORE_PERF_GLOBAL_OVF_CTRL :
564+ return kvm_pmu_has_perf_global_ctrl (vcpu_to_pmu (vcpu ));
565+ default :
566+ break ;
567+ }
565568 return static_call (kvm_x86_pmu_msr_idx_to_pmc )(vcpu , msr ) ||
566569 static_call (kvm_x86_pmu_is_valid_msr )(vcpu , msr );
567570}
@@ -577,13 +580,86 @@ static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
577580
578581int kvm_pmu_get_msr (struct kvm_vcpu * vcpu , struct msr_data * msr_info )
579582{
580- return static_call (kvm_x86_pmu_get_msr )(vcpu , msr_info );
583+ struct kvm_pmu * pmu = vcpu_to_pmu (vcpu );
584+ u32 msr = msr_info -> index ;
585+
586+ switch (msr ) {
587+ case MSR_CORE_PERF_GLOBAL_STATUS :
588+ case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS :
589+ msr_info -> data = pmu -> global_status ;
590+ break ;
591+ case MSR_AMD64_PERF_CNTR_GLOBAL_CTL :
592+ case MSR_CORE_PERF_GLOBAL_CTRL :
593+ msr_info -> data = pmu -> global_ctrl ;
594+ break ;
595+ case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR :
596+ case MSR_CORE_PERF_GLOBAL_OVF_CTRL :
597+ msr_info -> data = 0 ;
598+ break ;
599+ default :
600+ return static_call (kvm_x86_pmu_get_msr )(vcpu , msr_info );
601+ }
602+
603+ return 0 ;
581604}
582605
583606int kvm_pmu_set_msr (struct kvm_vcpu * vcpu , struct msr_data * msr_info )
584607{
585- kvm_pmu_mark_pmc_in_use (vcpu , msr_info -> index );
586- return static_call (kvm_x86_pmu_set_msr )(vcpu , msr_info );
608+ struct kvm_pmu * pmu = vcpu_to_pmu (vcpu );
609+ u32 msr = msr_info -> index ;
610+ u64 data = msr_info -> data ;
611+ u64 diff ;
612+
613+ /*
614+ * Note, AMD ignores writes to reserved bits and read-only PMU MSRs,
615+ * whereas Intel generates #GP on attempts to write reserved/RO MSRs.
616+ */
617+ switch (msr ) {
618+ case MSR_CORE_PERF_GLOBAL_STATUS :
619+ if (!msr_info -> host_initiated )
620+ return 1 ; /* RO MSR */
621+ fallthrough ;
622+ case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS :
623+ /* Per PPR, Read-only MSR. Writes are ignored. */
624+ if (!msr_info -> host_initiated )
625+ break ;
626+
627+ if (data & pmu -> global_status_mask )
628+ return 1 ;
629+
630+ pmu -> global_status = data ;
631+ break ;
632+ case MSR_AMD64_PERF_CNTR_GLOBAL_CTL :
633+ data &= ~pmu -> global_ctrl_mask ;
634+ fallthrough ;
635+ case MSR_CORE_PERF_GLOBAL_CTRL :
636+ if (!kvm_valid_perf_global_ctrl (pmu , data ))
637+ return 1 ;
638+
639+ if (pmu -> global_ctrl != data ) {
640+ diff = pmu -> global_ctrl ^ data ;
641+ pmu -> global_ctrl = data ;
642+ reprogram_counters (pmu , diff );
643+ }
644+ break ;
645+ case MSR_CORE_PERF_GLOBAL_OVF_CTRL :
646+ /*
647+ * GLOBAL_OVF_CTRL, a.k.a. GLOBAL STATUS_RESET, clears bits in
648+ * GLOBAL_STATUS, and so the set of reserved bits is the same.
649+ */
650+ if (data & pmu -> global_status_mask )
651+ return 1 ;
652+ fallthrough ;
653+ case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR :
654+ if (!msr_info -> host_initiated )
655+ pmu -> global_status &= ~data ;
656+ break ;
657+ default :
658+ kvm_pmu_mark_pmc_in_use (vcpu , msr_info -> index );
659+ return static_call (kvm_x86_pmu_set_msr )(vcpu , msr_info );
660+ }
661+
662+ return 0 ;
587663}
588664
589665/* refresh PMU settings. This function generally is called when underlying
0 commit comments