Skip to content

Commit 8794b4f

Browse files
author
Marc Zyngier
committed
Merge branch kvm-arm64/per-vcpu-host-pmu-data into kvmarm-master/next
* kvm-arm64/per-vcpu-host-pmu-data: : . : Pass the host PMU state in the vcpu to avoid the use of additional : shared memory between EL1 and EL2 (this obviously only applies : to nVHE and Protected setups). : : Patches courtesy of Fuad Tabba. : . KVM: arm64: pmu: Restore compilation when HW_PERF_EVENTS isn't selected KVM: arm64: Reenable pmu in Protected Mode KVM: arm64: Pass pmu events to hyp via vcpu KVM: arm64: Repack struct kvm_pmu to reduce size KVM: arm64: Wrapper for getting pmu_events Signed-off-by: Marc Zyngier <maz@kernel.org>
2 parents ec2cff6 + 20492a6 commit 8794b4f

7 files changed

Lines changed: 71 additions & 48 deletions

File tree

arch/arm64/include/asm/kvm_host.h

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -273,14 +273,8 @@ struct kvm_cpu_context {
273273
struct kvm_vcpu *__hyp_running_vcpu;
274274
};
275275

276-
struct kvm_pmu_events {
277-
u32 events_host;
278-
u32 events_guest;
279-
};
280-
281276
struct kvm_host_data {
282277
struct kvm_cpu_context host_ctxt;
283-
struct kvm_pmu_events pmu_events;
284278
};
285279

286280
struct kvm_host_psci_config {
@@ -820,9 +814,6 @@ void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
820814
#ifdef CONFIG_KVM
821815
void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
822816
void kvm_clr_pmu_events(u32 clr);
823-
824-
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
825-
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
826817
#else
827818
static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
828819
static inline void kvm_clr_pmu_events(u32 clr) {}
@@ -854,8 +845,6 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
854845
#define kvm_has_mte(kvm) \
855846
(system_supports_mte() && \
856847
test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags))
857-
#define kvm_vcpu_has_pmu(vcpu) \
858-
(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
859848

860849
int kvm_trng_call(struct kvm_vcpu *vcpu);
861850
#ifdef CONFIG_KVM

arch/arm64/kvm/Makefile

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ obj-$(CONFIG_KVM) += hyp/
1313
kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
1414
inject_fault.o va_layout.o handle_exit.o \
1515
guest.o debug.o reset.o sys_regs.o \
16-
vgic-sys-reg-v3.o fpsimd.o pmu.o pkvm.o \
16+
vgic-sys-reg-v3.o fpsimd.o pkvm.o \
1717
arch_timer.o trng.o vmid.o \
1818
vgic/vgic.o vgic/vgic-init.o \
1919
vgic/vgic-irqfd.o vgic/vgic-v2.o \
@@ -22,7 +22,7 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
2222
vgic/vgic-mmio-v3.o vgic/vgic-kvm-device.o \
2323
vgic/vgic-its.o vgic/vgic-debug.o
2424

25-
kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o
25+
kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o pmu.o
2626

2727
always-y := hyp_constants.h hyp-constants.s
2828

arch/arm64/kvm/arm.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -882,6 +882,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
882882

883883
kvm_vgic_flush_hwstate(vcpu);
884884

885+
kvm_pmu_update_vcpu_events(vcpu);
886+
885887
/*
886888
* Ensure we set mode to IN_GUEST_MODE after we disable
887889
* interrupts and before the final VCPU requests check.

arch/arm64/kvm/hyp/nvhe/switch.c

Lines changed: 11 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -153,13 +153,10 @@ static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
153153
/**
154154
* Disable host events, enable guest events
155155
*/
156-
static bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
156+
#ifdef CONFIG_HW_PERF_EVENTS
157+
static bool __pmu_switch_to_guest(struct kvm_vcpu *vcpu)
157158
{
158-
struct kvm_host_data *host;
159-
struct kvm_pmu_events *pmu;
160-
161-
host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
162-
pmu = &host->pmu_events;
159+
struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events;
163160

164161
if (pmu->events_host)
165162
write_sysreg(pmu->events_host, pmcntenclr_el0);
@@ -173,20 +170,20 @@ static bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
173170
/**
174171
* Disable guest events, enable host events
175172
*/
176-
static void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
173+
static void __pmu_switch_to_host(struct kvm_vcpu *vcpu)
177174
{
178-
struct kvm_host_data *host;
179-
struct kvm_pmu_events *pmu;
180-
181-
host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
182-
pmu = &host->pmu_events;
175+
struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events;
183176

184177
if (pmu->events_guest)
185178
write_sysreg(pmu->events_guest, pmcntenclr_el0);
186179

187180
if (pmu->events_host)
188181
write_sysreg(pmu->events_host, pmcntenset_el0);
189182
}
183+
#else
184+
#define __pmu_switch_to_guest(v) ({ false; })
185+
#define __pmu_switch_to_host(v) do {} while (0)
186+
#endif
190187

191188
/**
192189
* Handler for protected VM MSR, MRS or System instruction execution in AArch64.
@@ -304,7 +301,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
304301
host_ctxt->__hyp_running_vcpu = vcpu;
305302
guest_ctxt = &vcpu->arch.ctxt;
306303

307-
pmu_switch_needed = __pmu_switch_to_guest(host_ctxt);
304+
pmu_switch_needed = __pmu_switch_to_guest(vcpu);
308305

309306
__sysreg_save_state_nvhe(host_ctxt);
310307
/*
@@ -366,7 +363,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
366363
__debug_restore_host_buffers_nvhe(vcpu);
367364

368365
if (pmu_switch_needed)
369-
__pmu_switch_to_host(host_ctxt);
366+
__pmu_switch_to_host(vcpu);
370367

371368
/* Returning to host will clear PSR.I, remask PMR if needed */
372369
if (system_uses_irq_prio_masking())

arch/arm64/kvm/pmu-emul.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -774,8 +774,7 @@ void kvm_host_pmu_init(struct arm_pmu *pmu)
774774
{
775775
struct arm_pmu_entry *entry;
776776

777-
if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_PMUVER_IMP_DEF ||
778-
is_protected_kvm_enabled())
777+
if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_PMUVER_IMP_DEF)
779778
return;
780779

781780
mutex_lock(&arm_pmus_lock);

arch/arm64/kvm/pmu.c

Lines changed: 23 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,8 @@
55
*/
66
#include <linux/kvm_host.h>
77
#include <linux/perf_event.h>
8-
#include <asm/kvm_hyp.h>
8+
9+
static DEFINE_PER_CPU(struct kvm_pmu_events, kvm_pmu_events);
910

1011
/*
1112
* Given the perf event attributes and system type, determine
@@ -25,35 +26,40 @@ static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
2526
return (attr->exclude_host != attr->exclude_guest);
2627
}
2728

29+
struct kvm_pmu_events *kvm_get_pmu_events(void)
30+
{
31+
return this_cpu_ptr(&kvm_pmu_events);
32+
}
33+
2834
/*
2935
* Add events to track that we may want to switch at guest entry/exit
3036
* time.
3137
*/
3238
void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
3339
{
34-
struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data);
40+
struct kvm_pmu_events *pmu = kvm_get_pmu_events();
3541

36-
if (!kvm_arm_support_pmu_v3() || !ctx || !kvm_pmu_switch_needed(attr))
42+
if (!kvm_arm_support_pmu_v3() || !pmu || !kvm_pmu_switch_needed(attr))
3743
return;
3844

3945
if (!attr->exclude_host)
40-
ctx->pmu_events.events_host |= set;
46+
pmu->events_host |= set;
4147
if (!attr->exclude_guest)
42-
ctx->pmu_events.events_guest |= set;
48+
pmu->events_guest |= set;
4349
}
4450

4551
/*
4652
* Stop tracking events
4753
*/
4854
void kvm_clr_pmu_events(u32 clr)
4955
{
50-
struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data);
56+
struct kvm_pmu_events *pmu = kvm_get_pmu_events();
5157

52-
if (!kvm_arm_support_pmu_v3() || !ctx)
58+
if (!kvm_arm_support_pmu_v3() || !pmu)
5359
return;
5460

55-
ctx->pmu_events.events_host &= ~clr;
56-
ctx->pmu_events.events_guest &= ~clr;
61+
pmu->events_host &= ~clr;
62+
pmu->events_guest &= ~clr;
5763
}
5864

5965
#define PMEVTYPER_READ_CASE(idx) \
@@ -169,16 +175,16 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
169175
*/
170176
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
171177
{
172-
struct kvm_host_data *host;
178+
struct kvm_pmu_events *pmu;
173179
u32 events_guest, events_host;
174180

175181
if (!kvm_arm_support_pmu_v3() || !has_vhe())
176182
return;
177183

178184
preempt_disable();
179-
host = this_cpu_ptr_hyp_sym(kvm_host_data);
180-
events_guest = host->pmu_events.events_guest;
181-
events_host = host->pmu_events.events_host;
185+
pmu = kvm_get_pmu_events();
186+
events_guest = pmu->events_guest;
187+
events_host = pmu->events_host;
182188

183189
kvm_vcpu_pmu_enable_el0(events_guest);
184190
kvm_vcpu_pmu_disable_el0(events_host);
@@ -190,15 +196,15 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
190196
*/
191197
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
192198
{
193-
struct kvm_host_data *host;
199+
struct kvm_pmu_events *pmu;
194200
u32 events_guest, events_host;
195201

196202
if (!kvm_arm_support_pmu_v3() || !has_vhe())
197203
return;
198204

199-
host = this_cpu_ptr_hyp_sym(kvm_host_data);
200-
events_guest = host->pmu_events.events_guest;
201-
events_host = host->pmu_events.events_host;
205+
pmu = kvm_get_pmu_events();
206+
events_guest = pmu->events_guest;
207+
events_host = pmu->events_host;
202208

203209
kvm_vcpu_pmu_enable_el0(events_host);
204210
kvm_vcpu_pmu_disable_el0(events_guest);

include/kvm/arm_pmu.h

Lines changed: 32 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,13 +20,19 @@ struct kvm_pmc {
2020
struct perf_event *perf_event;
2121
};
2222

23+
struct kvm_pmu_events {
24+
u32 events_host;
25+
u32 events_guest;
26+
};
27+
2328
struct kvm_pmu {
24-
int irq_num;
29+
struct irq_work overflow_work;
30+
struct kvm_pmu_events events;
2531
struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
2632
DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
33+
int irq_num;
2734
bool created;
2835
bool irq_level;
29-
struct irq_work overflow_work;
3036
};
3137

3238
struct arm_pmu_entry {
@@ -66,6 +72,25 @@ int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
6672
int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
6773
struct kvm_device_attr *attr);
6874
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
75+
76+
struct kvm_pmu_events *kvm_get_pmu_events(void);
77+
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
78+
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
79+
80+
#define kvm_vcpu_has_pmu(vcpu) \
81+
(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
82+
83+
/*
84+
* Updates the vcpu's view of the pmu events for this cpu.
85+
* Must be called before every vcpu run after disabling interrupts, to ensure
86+
* that an interrupt cannot fire and update the structure.
87+
*/
88+
#define kvm_pmu_update_vcpu_events(vcpu) \
89+
do { \
90+
if (!has_vhe() && kvm_vcpu_has_pmu(vcpu)) \
91+
vcpu->arch.pmu.events = *kvm_get_pmu_events(); \
92+
} while (0)
93+
6994
#else
7095
struct kvm_pmu {
7196
};
@@ -127,6 +152,11 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
127152
return 0;
128153
}
129154

155+
#define kvm_vcpu_has_pmu(vcpu) ({ false; })
156+
static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
157+
static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
158+
static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
159+
130160
#endif
131161

132162
#endif

0 commit comments

Comments
 (0)