5454
5555#define AMD_ZEN_BR_RETIRED EVENT(0xc2, 0)
5656
57+
58+ /*
59+ * "Retired instructions", from Processor Programming Reference
60+ * (PPR) for AMD Family 17h Model 01h, Revision B1 Processors,
61+ * Preliminary Processor Programming Reference (PPR) for AMD Family
62+ * 17h Model 31h, Revision B0 Processors, and Preliminary Processor
63+ * Programming Reference (PPR) for AMD Family 19h Model 01h, Revision
64+ * B1 Processors Volume 1 of 2.
65+ * --- and ---
66+ * "Instructions retired", from the Intel SDM, volume 3,
67+ * "Pre-defined Architectural Performance Events."
68+ */
69+
70+ #define INST_RETIRED EVENT(0xc0, 0)
71+
5772/*
5873 * This event list comprises Intel's eight architectural events plus
5974 * AMD's "retired branch instructions" for Zen[123] (and possibly
6075 * other AMD CPUs).
6176 */
6277static const uint64_t event_list [] = {
6378 EVENT (0x3c , 0 ),
64- EVENT ( 0xc0 , 0 ) ,
79+ INST_RETIRED ,
6580 EVENT (0x3c , 1 ),
6681 EVENT (0x2e , 0x4f ),
6782 EVENT (0x2e , 0x41 ),
@@ -76,6 +91,7 @@ struct {
7691 uint64_t stores ;
7792 uint64_t loads_stores ;
7893 uint64_t branches_retired ;
94+ uint64_t instructions_retired ;
7995} pmc_results ;
8096
8197/*
@@ -110,10 +126,12 @@ static void check_msr(uint32_t msr, uint64_t bits_to_flip)
110126static void run_and_measure_loop (uint32_t msr_base )
111127{
112128 const uint64_t branches_retired = rdmsr (msr_base + 0 );
129+ const uint64_t insn_retired = rdmsr (msr_base + 1 );
113130
114131 __asm__ __volatile__("loop ." : "+c" ((int ){NUM_BRANCHES }));
115132
116133 pmc_results .branches_retired = rdmsr (msr_base + 0 ) - branches_retired ;
134+ pmc_results .instructions_retired = rdmsr (msr_base + 1 ) - insn_retired ;
117135}
118136
119137static void intel_guest_code (void )
@@ -127,7 +145,9 @@ static void intel_guest_code(void)
127145 wrmsr (MSR_CORE_PERF_GLOBAL_CTRL , 0 );
128146 wrmsr (MSR_P6_EVNTSEL0 , ARCH_PERFMON_EVENTSEL_ENABLE |
129147 ARCH_PERFMON_EVENTSEL_OS | INTEL_BR_RETIRED );
130- wrmsr (MSR_CORE_PERF_GLOBAL_CTRL , 0x1 );
148+ wrmsr (MSR_P6_EVNTSEL1 , ARCH_PERFMON_EVENTSEL_ENABLE |
149+ ARCH_PERFMON_EVENTSEL_OS | INST_RETIRED );
150+ wrmsr (MSR_CORE_PERF_GLOBAL_CTRL , 0x3 );
131151
132152 run_and_measure_loop (MSR_IA32_PMC0 );
133153 GUEST_SYNC (0 );
@@ -149,6 +169,8 @@ static void amd_guest_code(void)
149169 wrmsr (MSR_K7_EVNTSEL0 , 0 );
150170 wrmsr (MSR_K7_EVNTSEL0 , ARCH_PERFMON_EVENTSEL_ENABLE |
151171 ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_BR_RETIRED );
172+ wrmsr (MSR_K7_EVNTSEL1 , ARCH_PERFMON_EVENTSEL_ENABLE |
173+ ARCH_PERFMON_EVENTSEL_OS | INST_RETIRED );
152174
153175 run_and_measure_loop (MSR_K7_PERFCTR0 );
154176 GUEST_SYNC (0 );
@@ -263,20 +285,26 @@ static struct kvm_pmu_event_filter *remove_event(struct kvm_pmu_event_filter *f,
263285#define ASSERT_PMC_COUNTING_INSTRUCTIONS () \
264286do { \
265287 uint64_t br = pmc_results.branches_retired; \
288+ uint64_t ir = pmc_results.instructions_retired; \
266289 \
267290 if (br && br != NUM_BRANCHES) \
268291 pr_info("%s: Branch instructions retired = %lu (expected %u)\n", \
269292 __func__, br, NUM_BRANCHES); \
270293 TEST_ASSERT(br, "%s: Branch instructions retired = %lu (expected > 0)", \
271294 __func__, br); \
295+ TEST_ASSERT(ir, "%s: Instructions retired = %lu (expected > 0)", \
296+ __func__, ir); \
272297} while (0)
273298
274299#define ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS () \
275300do { \
276301 uint64_t br = pmc_results.branches_retired; \
302+ uint64_t ir = pmc_results.instructions_retired; \
277303 \
278304 TEST_ASSERT(!br, "%s: Branch instructions retired = %lu (expected 0)", \
279305 __func__, br); \
306+ TEST_ASSERT(!ir, "%s: Instructions retired = %lu (expected 0)", \
307+ __func__, ir); \
280308} while (0)
281309
282310static void test_without_filter (struct kvm_vcpu * vcpu )
@@ -329,6 +357,7 @@ static void test_not_member_deny_list(struct kvm_vcpu *vcpu)
329357{
330358 struct kvm_pmu_event_filter * f = event_filter (KVM_PMU_EVENT_DENY );
331359
360+ remove_event (f , INST_RETIRED );
332361 remove_event (f , INTEL_BR_RETIRED );
333362 remove_event (f , AMD_ZEN_BR_RETIRED );
334363 test_with_filter (vcpu , f );
@@ -341,6 +370,7 @@ static void test_not_member_allow_list(struct kvm_vcpu *vcpu)
341370{
342371 struct kvm_pmu_event_filter * f = event_filter (KVM_PMU_EVENT_ALLOW );
343372
373+ remove_event (f , INST_RETIRED );
344374 remove_event (f , INTEL_BR_RETIRED );
345375 remove_event (f , AMD_ZEN_BR_RETIRED );
346376 test_with_filter (vcpu , f );
0 commit comments