@@ -28,27 +28,65 @@ struct vgic_state_iter {
2828 int nr_lpis ;
2929 int dist_id ;
3030 int vcpu_id ;
31- int intid ;
31+ unsigned long intid ;
3232 int lpi_idx ;
33- u32 * lpi_array ;
3433};
3534
36- static void iter_next (struct vgic_state_iter * iter )
35+ static void iter_next (struct kvm * kvm , struct vgic_state_iter * iter )
3736{
37+ struct vgic_dist * dist = & kvm -> arch .vgic ;
38+
3839 if (iter -> dist_id == 0 ) {
3940 iter -> dist_id ++ ;
4041 return ;
4142 }
4243
44+ /*
45+ * Let the xarray drive the iterator after the last SPI, as the iterator
46+ * has exhausted the sequentially-allocated INTID space.
47+ */
48+ if (iter -> intid >= (iter -> nr_spis + VGIC_NR_PRIVATE_IRQS - 1 )) {
49+ if (iter -> lpi_idx < iter -> nr_lpis )
50+ xa_find_after (& dist -> lpi_xa , & iter -> intid ,
51+ VGIC_LPI_MAX_INTID ,
52+ LPI_XA_MARK_DEBUG_ITER );
53+ iter -> lpi_idx ++ ;
54+ return ;
55+ }
56+
4357 iter -> intid ++ ;
4458 if (iter -> intid == VGIC_NR_PRIVATE_IRQS &&
4559 ++ iter -> vcpu_id < iter -> nr_cpus )
4660 iter -> intid = 0 ;
61+ }
4762
48- if (iter -> intid >= (iter -> nr_spis + VGIC_NR_PRIVATE_IRQS )) {
49- if (iter -> lpi_idx < iter -> nr_lpis )
50- iter -> intid = iter -> lpi_array [iter -> lpi_idx ];
51- iter -> lpi_idx ++ ;
63+ static int iter_mark_lpis (struct kvm * kvm )
64+ {
65+ struct vgic_dist * dist = & kvm -> arch .vgic ;
66+ struct vgic_irq * irq ;
67+ unsigned long intid ;
68+ int nr_lpis = 0 ;
69+
70+ xa_for_each (& dist -> lpi_xa , intid , irq ) {
71+ if (!vgic_try_get_irq_kref (irq ))
72+ continue ;
73+
74+ xa_set_mark (& dist -> lpi_xa , intid , LPI_XA_MARK_DEBUG_ITER );
75+ nr_lpis ++ ;
76+ }
77+
78+ return nr_lpis ;
79+ }
80+
81+ static void iter_unmark_lpis (struct kvm * kvm )
82+ {
83+ struct vgic_dist * dist = & kvm -> arch .vgic ;
84+ struct vgic_irq * irq ;
85+ unsigned long intid ;
86+
87+ xa_for_each (& dist -> lpi_xa , intid , irq ) {
88+ xa_clear_mark (& dist -> lpi_xa , intid , LPI_XA_MARK_DEBUG_ITER );
89+ vgic_put_irq (kvm , irq );
5290 }
5391}
5492
@@ -61,15 +99,12 @@ static void iter_init(struct kvm *kvm, struct vgic_state_iter *iter,
6199
62100 iter -> nr_cpus = nr_cpus ;
63101 iter -> nr_spis = kvm -> arch .vgic .nr_spis ;
64- if (kvm -> arch .vgic .vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ) {
65- iter -> nr_lpis = vgic_copy_lpi_list (kvm , NULL , & iter -> lpi_array );
66- if (iter -> nr_lpis < 0 )
67- iter -> nr_lpis = 0 ;
68- }
102+ if (kvm -> arch .vgic .vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 )
103+ iter -> nr_lpis = iter_mark_lpis (kvm );
69104
70105 /* Fast forward to the right position if needed */
71106 while (pos -- )
72- iter_next (iter );
107+ iter_next (kvm , iter );
73108}
74109
75110static bool end_of_vgic (struct vgic_state_iter * iter )
@@ -114,7 +149,7 @@ static void *vgic_debug_next(struct seq_file *s, void *v, loff_t *pos)
114149 struct vgic_state_iter * iter = kvm -> arch .vgic .iter ;
115150
116151 ++ * pos ;
117- iter_next (iter );
152+ iter_next (kvm , iter );
118153 if (end_of_vgic (iter ))
119154 iter = NULL ;
120155 return iter ;
@@ -134,13 +169,14 @@ static void vgic_debug_stop(struct seq_file *s, void *v)
134169
135170 mutex_lock (& kvm -> arch .config_lock );
136171 iter = kvm -> arch .vgic .iter ;
137- kfree ( iter -> lpi_array );
172+ iter_unmark_lpis ( kvm );
138173 kfree (iter );
139174 kvm -> arch .vgic .iter = NULL ;
140175 mutex_unlock (& kvm -> arch .config_lock );
141176}
142177
143- static void print_dist_state (struct seq_file * s , struct vgic_dist * dist )
178+ static void print_dist_state (struct seq_file * s , struct vgic_dist * dist ,
179+ struct vgic_state_iter * iter )
144180{
145181 bool v3 = dist -> vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ;
146182
@@ -149,7 +185,7 @@ static void print_dist_state(struct seq_file *s, struct vgic_dist *dist)
149185 seq_printf (s , "vgic_model:\t%s\n" , v3 ? "GICv3" : "GICv2" );
150186 seq_printf (s , "nr_spis:\t%d\n" , dist -> nr_spis );
151187 if (v3 )
152- seq_printf (s , "nr_lpis:\t%d\n" , atomic_read ( & dist -> lpi_count ) );
188+ seq_printf (s , "nr_lpis:\t%d\n" , iter -> nr_lpis );
153189 seq_printf (s , "enabled:\t%d\n" , dist -> enabled );
154190 seq_printf (s , "\n" );
155191
@@ -236,7 +272,7 @@ static int vgic_debug_show(struct seq_file *s, void *v)
236272 unsigned long flags ;
237273
238274 if (iter -> dist_id == 0 ) {
239- print_dist_state (s , & kvm -> arch .vgic );
275+ print_dist_state (s , & kvm -> arch .vgic , iter );
240276 return 0 ;
241277 }
242278
@@ -246,11 +282,13 @@ static int vgic_debug_show(struct seq_file *s, void *v)
246282 if (iter -> vcpu_id < iter -> nr_cpus )
247283 vcpu = kvm_get_vcpu (kvm , iter -> vcpu_id );
248284
285+ /*
286+ * Expect this to succeed, as iter_mark_lpis() takes a reference on
287+ * every LPI to be visited.
288+ */
249289 irq = vgic_get_irq (kvm , vcpu , iter -> intid );
250- if (!irq ) {
251- seq_printf (s , " LPI %4d freed\n" , iter -> intid );
252- return 0 ;
253- }
290+ if (WARN_ON_ONCE (!irq ))
291+ return - EINVAL ;
254292
255293 raw_spin_lock_irqsave (& irq -> irq_lock , flags );
256294 print_irq_state (s , irq , vcpu );
0 commit comments