1111 */
1212
1313#define _GNU_SOURCE /* for program_invocation_short_name */
14- #include "test_util.h"
14+
1515#include "kvm_util.h"
16+ #include "pmu.h"
1617#include "processor.h"
17-
18- /*
19- * In lieu of copying perf_event.h into tools...
20- */
21- #define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
22- #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
23-
24- /* End of stuff taken from perf_event.h. */
25-
26- /* Oddly, this isn't in perf_event.h. */
27- #define ARCH_PERFMON_BRANCHES_RETIRED 5
18+ #include "test_util.h"
2819
2920#define NUM_BRANCHES 42
30- #define INTEL_PMC_IDX_FIXED 32
31-
32- /* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
33- #define MAX_FILTER_EVENTS 300
3421#define MAX_TEST_EVENTS 10
3522
3623#define PMU_EVENT_FILTER_INVALID_ACTION (KVM_PMU_EVENT_DENY + 1)
3724#define PMU_EVENT_FILTER_INVALID_FLAGS (KVM_PMU_EVENT_FLAGS_VALID_MASK << 1)
38- #define PMU_EVENT_FILTER_INVALID_NEVENTS (MAX_FILTER_EVENTS + 1)
39-
40- /*
41- * This is how the event selector and unit mask are stored in an AMD
42- * core performance event-select register. Intel's format is similar,
43- * but the event selector is only 8 bits.
44- */
45- #define EVENT (select , umask ) ((select & 0xf00UL) << 24 | (select & 0xff) | \
46- (umask & 0xff) << 8)
47-
48- /*
49- * "Branch instructions retired", from the Intel SDM, volume 3,
50- * "Pre-defined Architectural Performance Events."
51- */
52-
53- #define INTEL_BR_RETIRED EVENT(0xc4, 0)
54-
55- /*
56- * "Retired branch instructions", from Processor Programming Reference
57- * (PPR) for AMD Family 17h Model 01h, Revision B1 Processors,
58- * Preliminary Processor Programming Reference (PPR) for AMD Family
59- * 17h Model 31h, Revision B0 Processors, and Preliminary Processor
60- * Programming Reference (PPR) for AMD Family 19h Model 01h, Revision
61- * B1 Processors Volume 1 of 2.
62- */
63-
64- #define AMD_ZEN_BR_RETIRED EVENT(0xc2, 0)
65-
66-
67- /*
68- * "Retired instructions", from Processor Programming Reference
69- * (PPR) for AMD Family 17h Model 01h, Revision B1 Processors,
70- * Preliminary Processor Programming Reference (PPR) for AMD Family
71- * 17h Model 31h, Revision B0 Processors, and Preliminary Processor
72- * Programming Reference (PPR) for AMD Family 19h Model 01h, Revision
73- * B1 Processors Volume 1 of 2.
74- * --- and ---
75- * "Instructions retired", from the Intel SDM, volume 3,
76- * "Pre-defined Architectural Performance Events."
77- */
78-
79- #define INST_RETIRED EVENT(0xc0, 0)
25+ #define PMU_EVENT_FILTER_INVALID_NEVENTS (KVM_PMU_EVENT_FILTER_MAX_EVENTS + 1)
8026
8127struct __kvm_pmu_event_filter {
8228 __u32 action ;
8329 __u32 nevents ;
8430 __u32 fixed_counter_bitmap ;
8531 __u32 flags ;
8632 __u32 pad [4 ];
87- __u64 events [MAX_FILTER_EVENTS ];
33+ __u64 events [KVM_PMU_EVENT_FILTER_MAX_EVENTS ];
8834};
8935
9036/*
91- * This event list comprises Intel's eight architectural events plus
92- * AMD's "retired branch instructions" for Zen[123] (and possibly
93- * other AMD CPUs) .
37+ * This event list comprises Intel's known architectural events, plus AMD's
38+ * "retired branch instructions" for Zen1-Zen3 (and* possibly other AMD CPUs).
39+ * Note, AMD and Intel use the same encoding for instructions retired .
9440 */
41+ kvm_static_assert (INTEL_ARCH_INSTRUCTIONS_RETIRED == AMD_ZEN_INSTRUCTIONS_RETIRED );
42+
9543static const struct __kvm_pmu_event_filter base_event_filter = {
9644 .nevents = ARRAY_SIZE (base_event_filter .events ),
9745 .events = {
98- EVENT ( 0x3c , 0 ) ,
99- INST_RETIRED ,
100- EVENT ( 0x3c , 1 ) ,
101- EVENT ( 0x2e , 0x4f ) ,
102- EVENT ( 0x2e , 0x41 ) ,
103- EVENT ( 0xc4 , 0 ) ,
104- EVENT ( 0xc5 , 0 ) ,
105- EVENT ( 0xa4 , 1 ) ,
106- AMD_ZEN_BR_RETIRED ,
46+ INTEL_ARCH_CPU_CYCLES ,
47+ INTEL_ARCH_INSTRUCTIONS_RETIRED ,
48+ INTEL_ARCH_REFERENCE_CYCLES ,
49+ INTEL_ARCH_LLC_REFERENCES ,
50+ INTEL_ARCH_LLC_MISSES ,
51+ INTEL_ARCH_BRANCHES_RETIRED ,
52+ INTEL_ARCH_BRANCHES_MISPREDICTED ,
53+ INTEL_ARCH_TOPDOWN_SLOTS ,
54+ AMD_ZEN_BRANCHES_RETIRED ,
10755 },
10856};
10957
@@ -165,9 +113,9 @@ static void intel_guest_code(void)
165113 for (;;) {
166114 wrmsr (MSR_CORE_PERF_GLOBAL_CTRL , 0 );
167115 wrmsr (MSR_P6_EVNTSEL0 , ARCH_PERFMON_EVENTSEL_ENABLE |
168- ARCH_PERFMON_EVENTSEL_OS | INTEL_BR_RETIRED );
116+ ARCH_PERFMON_EVENTSEL_OS | INTEL_ARCH_BRANCHES_RETIRED );
169117 wrmsr (MSR_P6_EVNTSEL1 , ARCH_PERFMON_EVENTSEL_ENABLE |
170- ARCH_PERFMON_EVENTSEL_OS | INST_RETIRED );
118+ ARCH_PERFMON_EVENTSEL_OS | INTEL_ARCH_INSTRUCTIONS_RETIRED );
171119 wrmsr (MSR_CORE_PERF_GLOBAL_CTRL , 0x3 );
172120
173121 run_and_measure_loop (MSR_IA32_PMC0 );
@@ -189,9 +137,9 @@ static void amd_guest_code(void)
189137 for (;;) {
190138 wrmsr (MSR_K7_EVNTSEL0 , 0 );
191139 wrmsr (MSR_K7_EVNTSEL0 , ARCH_PERFMON_EVENTSEL_ENABLE |
192- ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_BR_RETIRED );
140+ ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_BRANCHES_RETIRED );
193141 wrmsr (MSR_K7_EVNTSEL1 , ARCH_PERFMON_EVENTSEL_ENABLE |
194- ARCH_PERFMON_EVENTSEL_OS | INST_RETIRED );
142+ ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_INSTRUCTIONS_RETIRED );
195143
196144 run_and_measure_loop (MSR_K7_PERFCTR0 );
197145 GUEST_SYNC (0 );
@@ -312,7 +260,7 @@ static void test_amd_deny_list(struct kvm_vcpu *vcpu)
312260 .action = KVM_PMU_EVENT_DENY ,
313261 .nevents = 1 ,
314262 .events = {
315- EVENT (0x1C2 , 0 ),
263+ RAW_EVENT (0x1C2 , 0 ),
316264 },
317265 };
318266
@@ -347,9 +295,9 @@ static void test_not_member_deny_list(struct kvm_vcpu *vcpu)
347295
348296 f .action = KVM_PMU_EVENT_DENY ;
349297
350- remove_event (& f , INST_RETIRED );
351- remove_event (& f , INTEL_BR_RETIRED );
352- remove_event (& f , AMD_ZEN_BR_RETIRED );
298+ remove_event (& f , INTEL_ARCH_INSTRUCTIONS_RETIRED );
299+ remove_event (& f , INTEL_ARCH_BRANCHES_RETIRED );
300+ remove_event (& f , AMD_ZEN_BRANCHES_RETIRED );
353301 test_with_filter (vcpu , & f );
354302
355303 ASSERT_PMC_COUNTING_INSTRUCTIONS ();
@@ -361,9 +309,9 @@ static void test_not_member_allow_list(struct kvm_vcpu *vcpu)
361309
362310 f .action = KVM_PMU_EVENT_ALLOW ;
363311
364- remove_event (& f , INST_RETIRED );
365- remove_event (& f , INTEL_BR_RETIRED );
366- remove_event (& f , AMD_ZEN_BR_RETIRED );
312+ remove_event (& f , INTEL_ARCH_INSTRUCTIONS_RETIRED );
313+ remove_event (& f , INTEL_ARCH_BRANCHES_RETIRED );
314+ remove_event (& f , AMD_ZEN_BRANCHES_RETIRED );
367315 test_with_filter (vcpu , & f );
368316
369317 ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS ();
@@ -452,9 +400,9 @@ static bool use_amd_pmu(void)
452400 * - Sapphire Rapids, Ice Lake, Cascade Lake, Skylake.
453401 */
454402#define MEM_INST_RETIRED 0xD0
455- #define MEM_INST_RETIRED_LOAD EVENT (MEM_INST_RETIRED, 0x81)
456- #define MEM_INST_RETIRED_STORE EVENT (MEM_INST_RETIRED, 0x82)
457- #define MEM_INST_RETIRED_LOAD_STORE EVENT (MEM_INST_RETIRED, 0x83)
403+ #define MEM_INST_RETIRED_LOAD RAW_EVENT (MEM_INST_RETIRED, 0x81)
404+ #define MEM_INST_RETIRED_STORE RAW_EVENT (MEM_INST_RETIRED, 0x82)
405+ #define MEM_INST_RETIRED_LOAD_STORE RAW_EVENT (MEM_INST_RETIRED, 0x83)
458406
459407static bool supports_event_mem_inst_retired (void )
460408{
@@ -486,9 +434,9 @@ static bool supports_event_mem_inst_retired(void)
486434 * B1 Processors Volume 1 of 2.
487435 */
488436#define LS_DISPATCH 0x29
489- #define LS_DISPATCH_LOAD EVENT (LS_DISPATCH, BIT(0))
490- #define LS_DISPATCH_STORE EVENT (LS_DISPATCH, BIT(1))
491- #define LS_DISPATCH_LOAD_STORE EVENT (LS_DISPATCH, BIT(2))
437+ #define LS_DISPATCH_LOAD RAW_EVENT (LS_DISPATCH, BIT(0))
438+ #define LS_DISPATCH_STORE RAW_EVENT (LS_DISPATCH, BIT(1))
439+ #define LS_DISPATCH_LOAD_STORE RAW_EVENT (LS_DISPATCH, BIT(2))
492440
493441#define INCLUDE_MASKED_ENTRY (event_select , mask , match ) \
494442 KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, false)
@@ -729,14 +677,14 @@ static void add_dummy_events(uint64_t *events, int nevents)
729677
730678static void test_masked_events (struct kvm_vcpu * vcpu )
731679{
732- int nevents = MAX_FILTER_EVENTS - MAX_TEST_EVENTS ;
733- uint64_t events [MAX_FILTER_EVENTS ];
680+ int nevents = KVM_PMU_EVENT_FILTER_MAX_EVENTS - MAX_TEST_EVENTS ;
681+ uint64_t events [KVM_PMU_EVENT_FILTER_MAX_EVENTS ];
734682
735683 /* Run the test cases against a sparse PMU event filter. */
736684 run_masked_events_tests (vcpu , events , 0 );
737685
738686 /* Run the test cases against a dense PMU event filter. */
739- add_dummy_events (events , MAX_FILTER_EVENTS );
687+ add_dummy_events (events , KVM_PMU_EVENT_FILTER_MAX_EVENTS );
740688 run_masked_events_tests (vcpu , events , nevents );
741689}
742690
@@ -809,20 +757,19 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu)
809757 TEST_ASSERT (!r , "Masking non-existent fixed counters should be allowed" );
810758}
811759
812- static void intel_run_fixed_counter_guest_code (uint8_t fixed_ctr_idx )
760+ static void intel_run_fixed_counter_guest_code (uint8_t idx )
813761{
814762 for (;;) {
815763 wrmsr (MSR_CORE_PERF_GLOBAL_CTRL , 0 );
816- wrmsr (MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx , 0 );
764+ wrmsr (MSR_CORE_PERF_FIXED_CTR0 + idx , 0 );
817765
818766 /* Only OS_EN bit is enabled for fixed counter[idx]. */
819- wrmsr (MSR_CORE_PERF_FIXED_CTR_CTRL , BIT_ULL (4 * fixed_ctr_idx ));
820- wrmsr (MSR_CORE_PERF_GLOBAL_CTRL ,
821- BIT_ULL (INTEL_PMC_IDX_FIXED + fixed_ctr_idx ));
767+ wrmsr (MSR_CORE_PERF_FIXED_CTR_CTRL , FIXED_PMC_CTRL (idx , FIXED_PMC_KERNEL ));
768+ wrmsr (MSR_CORE_PERF_GLOBAL_CTRL , FIXED_PMC_GLOBAL_CTRL_ENABLE (idx ));
822769 __asm__ __volatile__("loop ." : "+c" ((int ){NUM_BRANCHES }));
823770 wrmsr (MSR_CORE_PERF_GLOBAL_CTRL , 0 );
824771
825- GUEST_SYNC (rdmsr (MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx ));
772+ GUEST_SYNC (rdmsr (MSR_CORE_PERF_FIXED_CTR0 + idx ));
826773 }
827774}
828775
0 commit comments