Skip to content

Commit 85d3ccc

Browse files
ouptonMarc Zyngier
authored andcommitted
KVM: arm64: vgic-debug: Use an xarray mark for debug iterator
The vgic debug iterator is the final user of vgic_copy_lpi_list(), but is a bit more complicated to transition to something else. Use a mark in the LPI xarray to record the indices 'known' to the debug iterator. Protect against the LPIs from being freed by associating an additional reference with the xarray mark. Rework iter_next() to let the xarray walk 'drive' the iteration after visiting all of the SGIs, PPIs, and SPIs. Signed-off-by: Oliver Upton <oliver.upton@linux.dev> Link: https://lore.kernel.org/r/20240422200158.2606761-6-oliver.upton@linux.dev Signed-off-by: Marc Zyngier <maz@kernel.org>
1 parent 11f4f8f commit 85d3ccc

4 files changed

Lines changed: 64 additions & 25 deletions

File tree

arch/arm64/kvm/vgic/vgic-debug.c

Lines changed: 60 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -28,27 +28,65 @@ struct vgic_state_iter {
2828
int nr_lpis;
2929
int dist_id;
3030
int vcpu_id;
31-
int intid;
31+
unsigned long intid;
3232
int lpi_idx;
33-
u32 *lpi_array;
3433
};
3534

36-
static void iter_next(struct vgic_state_iter *iter)
35+
static void iter_next(struct kvm *kvm, struct vgic_state_iter *iter)
3736
{
37+
struct vgic_dist *dist = &kvm->arch.vgic;
38+
3839
if (iter->dist_id == 0) {
3940
iter->dist_id++;
4041
return;
4142
}
4243

44+
/*
45+
* Let the xarray drive the iterator after the last SPI, as the iterator
46+
* has exhausted the sequentially-allocated INTID space.
47+
*/
48+
if (iter->intid >= (iter->nr_spis + VGIC_NR_PRIVATE_IRQS - 1)) {
49+
if (iter->lpi_idx < iter->nr_lpis)
50+
xa_find_after(&dist->lpi_xa, &iter->intid,
51+
VGIC_LPI_MAX_INTID,
52+
LPI_XA_MARK_DEBUG_ITER);
53+
iter->lpi_idx++;
54+
return;
55+
}
56+
4357
iter->intid++;
4458
if (iter->intid == VGIC_NR_PRIVATE_IRQS &&
4559
++iter->vcpu_id < iter->nr_cpus)
4660
iter->intid = 0;
61+
}
4762

48-
if (iter->intid >= (iter->nr_spis + VGIC_NR_PRIVATE_IRQS)) {
49-
if (iter->lpi_idx < iter->nr_lpis)
50-
iter->intid = iter->lpi_array[iter->lpi_idx];
51-
iter->lpi_idx++;
63+
static int iter_mark_lpis(struct kvm *kvm)
64+
{
65+
struct vgic_dist *dist = &kvm->arch.vgic;
66+
struct vgic_irq *irq;
67+
unsigned long intid;
68+
int nr_lpis = 0;
69+
70+
xa_for_each(&dist->lpi_xa, intid, irq) {
71+
if (!vgic_try_get_irq_kref(irq))
72+
continue;
73+
74+
xa_set_mark(&dist->lpi_xa, intid, LPI_XA_MARK_DEBUG_ITER);
75+
nr_lpis++;
76+
}
77+
78+
return nr_lpis;
79+
}
80+
81+
static void iter_unmark_lpis(struct kvm *kvm)
82+
{
83+
struct vgic_dist *dist = &kvm->arch.vgic;
84+
struct vgic_irq *irq;
85+
unsigned long intid;
86+
87+
xa_for_each(&dist->lpi_xa, intid, irq) {
88+
xa_clear_mark(&dist->lpi_xa, intid, LPI_XA_MARK_DEBUG_ITER);
89+
vgic_put_irq(kvm, irq);
5290
}
5391
}
5492

@@ -61,15 +99,12 @@ static void iter_init(struct kvm *kvm, struct vgic_state_iter *iter,
6199

62100
iter->nr_cpus = nr_cpus;
63101
iter->nr_spis = kvm->arch.vgic.nr_spis;
64-
if (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
65-
iter->nr_lpis = vgic_copy_lpi_list(kvm, NULL, &iter->lpi_array);
66-
if (iter->nr_lpis < 0)
67-
iter->nr_lpis = 0;
68-
}
102+
if (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
103+
iter->nr_lpis = iter_mark_lpis(kvm);
69104

70105
/* Fast forward to the right position if needed */
71106
while (pos--)
72-
iter_next(iter);
107+
iter_next(kvm, iter);
73108
}
74109

75110
static bool end_of_vgic(struct vgic_state_iter *iter)
@@ -114,7 +149,7 @@ static void *vgic_debug_next(struct seq_file *s, void *v, loff_t *pos)
114149
struct vgic_state_iter *iter = kvm->arch.vgic.iter;
115150

116151
++*pos;
117-
iter_next(iter);
152+
iter_next(kvm, iter);
118153
if (end_of_vgic(iter))
119154
iter = NULL;
120155
return iter;
@@ -134,13 +169,14 @@ static void vgic_debug_stop(struct seq_file *s, void *v)
134169

135170
mutex_lock(&kvm->arch.config_lock);
136171
iter = kvm->arch.vgic.iter;
137-
kfree(iter->lpi_array);
172+
iter_unmark_lpis(kvm);
138173
kfree(iter);
139174
kvm->arch.vgic.iter = NULL;
140175
mutex_unlock(&kvm->arch.config_lock);
141176
}
142177

143-
static void print_dist_state(struct seq_file *s, struct vgic_dist *dist)
178+
static void print_dist_state(struct seq_file *s, struct vgic_dist *dist,
179+
struct vgic_state_iter *iter)
144180
{
145181
bool v3 = dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3;
146182

@@ -149,7 +185,7 @@ static void print_dist_state(struct seq_file *s, struct vgic_dist *dist)
149185
seq_printf(s, "vgic_model:\t%s\n", v3 ? "GICv3" : "GICv2");
150186
seq_printf(s, "nr_spis:\t%d\n", dist->nr_spis);
151187
if (v3)
152-
seq_printf(s, "nr_lpis:\t%d\n", atomic_read(&dist->lpi_count));
188+
seq_printf(s, "nr_lpis:\t%d\n", iter->nr_lpis);
153189
seq_printf(s, "enabled:\t%d\n", dist->enabled);
154190
seq_printf(s, "\n");
155191

@@ -236,7 +272,7 @@ static int vgic_debug_show(struct seq_file *s, void *v)
236272
unsigned long flags;
237273

238274
if (iter->dist_id == 0) {
239-
print_dist_state(s, &kvm->arch.vgic);
275+
print_dist_state(s, &kvm->arch.vgic, iter);
240276
return 0;
241277
}
242278

@@ -246,11 +282,13 @@ static int vgic_debug_show(struct seq_file *s, void *v)
246282
if (iter->vcpu_id < iter->nr_cpus)
247283
vcpu = kvm_get_vcpu(kvm, iter->vcpu_id);
248284

285+
/*
286+
* Expect this to succeed, as iter_mark_lpis() takes a reference on
287+
* every LPI to be visited.
288+
*/
249289
irq = vgic_get_irq(kvm, vcpu, iter->intid);
250-
if (!irq) {
251-
seq_printf(s, " LPI %4d freed\n", iter->intid);
252-
return 0;
253-
}
290+
if (WARN_ON_ONCE(!irq))
291+
return -EINVAL;
254292

255293
raw_spin_lock_irqsave(&irq->irq_lock, flags);
256294
print_irq_state(s, irq, vcpu);

arch/arm64/kvm/vgic/vgic-its.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -316,8 +316,6 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
316316
return 0;
317317
}
318318

319-
#define GIC_LPI_MAX_INTID ((1 << INTERRUPT_ID_BITS_ITS) - 1)
320-
321319
/*
322320
* Create a snapshot of the current LPIs targeting @vcpu, so that we can
323321
* enumerate those LPIs without holding any lock.
@@ -347,7 +345,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
347345
raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
348346
rcu_read_lock();
349347

350-
xas_for_each(&xas, irq, GIC_LPI_MAX_INTID) {
348+
xas_for_each(&xas, irq, VGIC_LPI_MAX_INTID) {
351349
if (i == irq_count)
352350
break;
353351
/* We don't need to "get" the IRQ, as we hold the list lock. */

arch/arm64/kvm/vgic/vgic.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616

1717
#define INTERRUPT_ID_BITS_SPIS 10
1818
#define INTERRUPT_ID_BITS_ITS 16
19+
#define VGIC_LPI_MAX_INTID ((1 << INTERRUPT_ID_BITS_ITS) - 1)
1920
#define VGIC_PRI_BITS 5
2021

2122
#define vgic_irq_is_sgi(intid) ((intid) < VGIC_NR_SGIS)

include/kvm/arm_vgic.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -276,6 +276,8 @@ struct vgic_dist {
276276

277277
/* Protects the lpi_list. */
278278
raw_spinlock_t lpi_list_lock;
279+
280+
#define LPI_XA_MARK_DEBUG_ITER XA_MARK_0
279281
struct xarray lpi_xa;
280282
atomic_t lpi_count;
281283

0 commit comments

Comments
 (0)