Skip to content

Commit 20492a6

Browse files
author
Marc Zyngier
committed
KVM: arm64: pmu: Restore compilation when HW_PERF_EVENTS isn't selected
Moving kvm_pmu_events into the vcpu (and refering to it) broke the somewhat unusual case where the kernel has no support for a PMU at all. In order to solve this, move things around a bit so that we can easily avoid refering to the pmu structure outside of PMU-aware code. As a bonus, pmu.c isn't compiled in when HW_PERF_EVENTS isn't selected. Reported-by: kernel test robot <lkp@intel.com> Reviewed-by: Fuad Tabba <tabba@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/202205161814.KQHpOzsJ-lkp@intel.com
1 parent 722625c commit 20492a6

5 files changed

Lines changed: 31 additions & 21 deletions

File tree

arch/arm64/include/asm/kvm_host.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -789,10 +789,6 @@ void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
789789
#ifdef CONFIG_KVM
790790
void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
791791
void kvm_clr_pmu_events(u32 clr);
792-
793-
struct kvm_pmu_events *kvm_get_pmu_events(void);
794-
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
795-
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
796792
#else
797793
static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
798794
static inline void kvm_clr_pmu_events(u32 clr) {}
@@ -824,8 +820,6 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
824820
#define kvm_has_mte(kvm) \
825821
(system_supports_mte() && \
826822
test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags))
827-
#define kvm_vcpu_has_pmu(vcpu) \
828-
(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
829823

830824
int kvm_trng_call(struct kvm_vcpu *vcpu);
831825
#ifdef CONFIG_KVM

arch/arm64/kvm/Makefile

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ obj-$(CONFIG_KVM) += hyp/
1313
kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
1414
inject_fault.o va_layout.o handle_exit.o \
1515
guest.o debug.o reset.o sys_regs.o \
16-
vgic-sys-reg-v3.o fpsimd.o pmu.o pkvm.o \
16+
vgic-sys-reg-v3.o fpsimd.o pkvm.o \
1717
arch_timer.o trng.o vmid.o \
1818
vgic/vgic.o vgic/vgic-init.o \
1919
vgic/vgic-irqfd.o vgic/vgic-v2.o \
@@ -22,7 +22,7 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
2222
vgic/vgic-mmio-v3.o vgic/vgic-kvm-device.o \
2323
vgic/vgic-its.o vgic/vgic-debug.o
2424

25-
kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o
25+
kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o pmu.o
2626

2727
always-y := hyp_constants.h hyp-constants.s
2828

arch/arm64/kvm/arm.c

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -751,19 +751,6 @@ static int noinstr kvm_arm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
751751
return ret;
752752
}
753753

754-
/*
755-
* Updates the vcpu's view of the pmu events for this cpu.
756-
* Must be called before every vcpu run after disabling interrupts, to ensure
757-
* that an interrupt cannot fire and update the structure.
758-
*/
759-
static void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu)
760-
{
761-
if (has_vhe() || !kvm_vcpu_has_pmu(vcpu))
762-
return;
763-
764-
vcpu->arch.pmu.events = *kvm_get_pmu_events();
765-
}
766-
767754
/**
768755
* kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
769756
* @vcpu: The VCPU pointer

arch/arm64/kvm/hyp/nvhe/switch.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,7 @@ static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
123123
/**
124124
* Disable host events, enable guest events
125125
*/
126+
#ifdef CONFIG_HW_PERF_EVENTS
126127
static bool __pmu_switch_to_guest(struct kvm_vcpu *vcpu)
127128
{
128129
struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events;
@@ -149,6 +150,10 @@ static void __pmu_switch_to_host(struct kvm_vcpu *vcpu)
149150
if (pmu->events_host)
150151
write_sysreg(pmu->events_host, pmcntenset_el0);
151152
}
153+
#else
154+
#define __pmu_switch_to_guest(v) ({ false; })
155+
#define __pmu_switch_to_host(v) do {} while (0)
156+
#endif
152157

153158
/**
154159
* Handler for protected VM MSR, MRS or System instruction execution in AArch64.

include/kvm/arm_pmu.h

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,25 @@ int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
7272
int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
7373
struct kvm_device_attr *attr);
7474
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
75+
76+
struct kvm_pmu_events *kvm_get_pmu_events(void);
77+
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
78+
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
79+
80+
#define kvm_vcpu_has_pmu(vcpu) \
81+
(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
82+
83+
/*
84+
* Updates the vcpu's view of the pmu events for this cpu.
85+
* Must be called before every vcpu run after disabling interrupts, to ensure
86+
* that an interrupt cannot fire and update the structure.
87+
*/
88+
#define kvm_pmu_update_vcpu_events(vcpu) \
89+
do { \
90+
if (!has_vhe() && kvm_vcpu_has_pmu(vcpu)) \
91+
vcpu->arch.pmu.events = *kvm_get_pmu_events(); \
92+
} while (0)
93+
7594
#else
7695
struct kvm_pmu {
7796
};
@@ -133,6 +152,11 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
133152
return 0;
134153
}
135154

155+
#define kvm_vcpu_has_pmu(vcpu) ({ false; })
156+
static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
157+
static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
158+
static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
159+
136160
#endif
137161

138162
#endif

0 commit comments

Comments
 (0)