@@ -45,6 +45,22 @@ static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
4545 [7 ] = { 0xd1 , 0x00 , PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
4646};
4747
48+ /* duplicated from amd_f17h_perfmon_event_map. */
49+ static struct kvm_event_hw_type_mapping amd_f17h_event_mapping [] = {
50+ [0 ] = { 0x76 , 0x00 , PERF_COUNT_HW_CPU_CYCLES },
51+ [1 ] = { 0xc0 , 0x00 , PERF_COUNT_HW_INSTRUCTIONS },
52+ [2 ] = { 0x60 , 0xff , PERF_COUNT_HW_CACHE_REFERENCES },
53+ [3 ] = { 0x64 , 0x09 , PERF_COUNT_HW_CACHE_MISSES },
54+ [4 ] = { 0xc2 , 0x00 , PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
55+ [5 ] = { 0xc3 , 0x00 , PERF_COUNT_HW_BRANCH_MISSES },
56+ [6 ] = { 0x87 , 0x02 , PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
57+ [7 ] = { 0x87 , 0x01 , PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
58+ };
59+
60+ /* amd_pmc_perf_hw_id depends on these being the same size */
61+ static_assert (ARRAY_SIZE (amd_event_mapping ) ==
62+ ARRAY_SIZE (amd_f17h_event_mapping ));
63+
4864static unsigned int get_msr_base (struct kvm_pmu * pmu , enum pmu_type type )
4965{
5066 struct kvm_vcpu * vcpu = pmu_to_vcpu (pmu );
@@ -140,6 +156,7 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
140156
141157static unsigned int amd_pmc_perf_hw_id (struct kvm_pmc * pmc )
142158{
159+ struct kvm_event_hw_type_mapping * event_mapping ;
143160 u8 event_select = pmc -> eventsel & ARCH_PERFMON_EVENTSEL_EVENT ;
144161 u8 unit_mask = (pmc -> eventsel & ARCH_PERFMON_EVENTSEL_UMASK ) >> 8 ;
145162 int i ;
@@ -148,15 +165,20 @@ static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
148165 if (WARN_ON (pmc_is_fixed (pmc )))
149166 return PERF_COUNT_HW_MAX ;
150167
168+ if (guest_cpuid_family (pmc -> vcpu ) >= 0x17 )
169+ event_mapping = amd_f17h_event_mapping ;
170+ else
171+ event_mapping = amd_event_mapping ;
172+
151173 for (i = 0 ; i < ARRAY_SIZE (amd_event_mapping ); i ++ )
152- if (amd_event_mapping [i ].eventsel == event_select
153- && amd_event_mapping [i ].unit_mask == unit_mask )
174+ if (event_mapping [i ].eventsel == event_select
175+ && event_mapping [i ].unit_mask == unit_mask )
154176 break ;
155177
156178 if (i == ARRAY_SIZE (amd_event_mapping ))
157179 return PERF_COUNT_HW_MAX ;
158180
159- return amd_event_mapping [i ].event_type ;
181+ return event_mapping [i ].event_type ;
160182}
161183
162184/* check if a PMC is enabled by comparing it against global_ctrl bits. Because
0 commit comments