2121
2222static uint8_t kvm_pmu_version ;
2323static bool kvm_has_perf_caps ;
24+ static bool is_forced_emulation_enabled ;
2425
2526static struct kvm_vm * pmu_vm_create_with_one_vcpu (struct kvm_vcpu * * vcpu ,
2627 void * guest_code ,
@@ -34,6 +35,7 @@ static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
3435 vcpu_init_descriptor_tables (* vcpu );
3536
3637 sync_global_to_guest (vm , kvm_pmu_version );
38+ sync_global_to_guest (vm , is_forced_emulation_enabled );
3739
3840 /*
3941 * Set PERF_CAPABILITIES before PMU version as KVM disallows enabling
@@ -138,37 +140,50 @@ static void guest_assert_event_count(uint8_t idx,
138140 * If CLFUSH{,OPT} is supported, flush the cacheline containing (at least) the
139141 * start of the loop to force LLC references and misses, i.e. to allow testing
140142 * that those events actually count.
143+ *
144+ * If forced emulation is enabled (and specified), force emulation on a subset
145+ * of the measured code to verify that KVM correctly emulates instructions and
146+ * branches retired events in conjunction with hardware also counting said
147+ * events.
141148 */
142- #define GUEST_MEASURE_EVENT (_msr , _value , clflush ) \
149+ #define GUEST_MEASURE_EVENT (_msr , _value , clflush , FEP ) \
143150do { \
144151 __asm__ __volatile__("wrmsr\n\t" \
145152 clflush "\n\t" \
146153 "mfence\n\t" \
147154 "1: mov $" __stringify(NUM_BRANCHES) ", %%ecx\n\t" \
148- "loop .\n\t" \
149- "mov %%edi, %%ecx\n\t" \
150- "xor %%eax, %%eax\n\t" \
151- "xor %%edx, %%edx\n\t" \
155+ FEP "loop .\n\t" \
156+ FEP "mov %%edi, %%ecx\n\t" \
157+ FEP "xor %%eax, %%eax\n\t" \
158+ FEP "xor %%edx, %%edx\n\t" \
152159 "wrmsr\n\t" \
153160 :: "a"((uint32_t)_value), "d"(_value >> 32), \
154161 "c"(_msr), "D"(_msr) \
155162 ); \
156163} while (0)
157164
165+ #define GUEST_TEST_EVENT (_idx , _event , _pmc , _pmc_msr , _ctrl_msr , _value , FEP ) \
166+ do { \
167+ wrmsr(pmc_msr, 0); \
168+ \
169+ if (this_cpu_has(X86_FEATURE_CLFLUSHOPT)) \
170+ GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflushopt 1f", FEP); \
171+ else if (this_cpu_has(X86_FEATURE_CLFLUSH)) \
172+ GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflush 1f", FEP); \
173+ else \
174+ GUEST_MEASURE_EVENT(_ctrl_msr, _value, "nop", FEP); \
175+ \
176+ guest_assert_event_count(_idx, _event, _pmc, _pmc_msr); \
177+ } while (0)
178+
158179static void __guest_test_arch_event (uint8_t idx , struct kvm_x86_pmu_feature event ,
159180 uint32_t pmc , uint32_t pmc_msr ,
160181 uint32_t ctrl_msr , uint64_t ctrl_msr_value )
161182{
162- wrmsr (pmc_msr , 0 );
163-
164- if (this_cpu_has (X86_FEATURE_CLFLUSHOPT ))
165- GUEST_MEASURE_EVENT (ctrl_msr , ctrl_msr_value , "clflushopt 1f" );
166- else if (this_cpu_has (X86_FEATURE_CLFLUSH ))
167- GUEST_MEASURE_EVENT (ctrl_msr , ctrl_msr_value , "clflush 1f" );
168- else
169- GUEST_MEASURE_EVENT (ctrl_msr , ctrl_msr_value , "nop" );
183+ GUEST_TEST_EVENT (idx , event , pmc , pmc_msr , ctrl_msr , ctrl_msr_value , "" );
170184
171- guest_assert_event_count (idx , event , pmc , pmc_msr );
185+ if (is_forced_emulation_enabled )
186+ GUEST_TEST_EVENT (idx , event , pmc , pmc_msr , ctrl_msr , ctrl_msr_value , KVM_FEP );
172187}
173188
174189#define X86_PMU_FEATURE_NULL \
@@ -553,6 +568,7 @@ int main(int argc, char *argv[])
553568
554569 kvm_pmu_version = kvm_cpu_property (X86_PROPERTY_PMU_VERSION );
555570 kvm_has_perf_caps = kvm_cpu_has (X86_FEATURE_PDCM );
571+ is_forced_emulation_enabled = kvm_is_forced_emulation_enabled ();
556572
557573 test_intel_counters ();
558574
0 commit comments