Skip to content

Commit ec2cff6

Browse files
author
Marc Zyngier
committed
Merge branch kvm-arm64/vgic-invlpir into kvmarm-master/next
* kvm-arm64/vgic-invlpir: : . : Implement MMIO-based LPI invalidation for vGICv3. : . KVM: arm64: vgic-v3: Advertise GICR_CTLR.{IR, CES} as a new GICD_IIDR revision KVM: arm64: vgic-v3: Implement MMIO-based LPI invalidation KVM: arm64: vgic-v3: Expose GICR_CTLR.RWP when disabling LPIs irqchip/gic-v3: Exposes bit values for GICR_CTLR.{IR, CES} Signed-off-by: Marc Zyngier <maz@kernel.org>
2 parents 3b8e21e + 49a1a2c commit ec2cff6

7 files changed

Lines changed: 195 additions & 39 deletions

File tree

arch/arm64/kvm/vgic/vgic-init.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -319,7 +319,12 @@ int vgic_init(struct kvm *kvm)
319319

320320
vgic_debug_init(kvm);
321321

322-
dist->implementation_rev = 2;
322+
/*
323+
* If userspace didn't set the GIC implementation revision,
324+
* default to the latest and greatest. You know want it.
325+
*/
326+
if (!dist->implementation_rev)
327+
dist->implementation_rev = KVM_VGIC_IMP_REV_LATEST;
323328
dist->initialized = true;
324329

325330
out:

arch/arm64/kvm/vgic/vgic-its.c

Lines changed: 42 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -683,7 +683,7 @@ int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
683683
if (!vcpu)
684684
return E_ITS_INT_UNMAPPED_INTERRUPT;
685685

686-
if (!vcpu->arch.vgic_cpu.lpis_enabled)
686+
if (!vgic_lpis_enabled(vcpu))
687687
return -EBUSY;
688688

689689
vgic_its_cache_translation(kvm, its, devid, eventid, ite->irq);
@@ -1272,6 +1272,11 @@ static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
12721272
return 0;
12731273
}
12741274

1275+
int vgic_its_inv_lpi(struct kvm *kvm, struct vgic_irq *irq)
1276+
{
1277+
return update_lpi_config(kvm, irq, NULL, true);
1278+
}
1279+
12751280
/*
12761281
* The INV command syncs the configuration bits from the memory table.
12771282
* Must be called with the its_lock mutex held.
@@ -1288,7 +1293,41 @@ static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
12881293
if (!ite)
12891294
return E_ITS_INV_UNMAPPED_INTERRUPT;
12901295

1291-
return update_lpi_config(kvm, ite->irq, NULL, true);
1296+
return vgic_its_inv_lpi(kvm, ite->irq);
1297+
}
1298+
1299+
/**
1300+
* vgic_its_invall - invalidate all LPIs targetting a given vcpu
1301+
* @vcpu: the vcpu for which the RD is targetted by an invalidation
1302+
*
1303+
* Contrary to the INVALL command, this targets a RD instead of a
1304+
* collection, and we don't need to hold the its_lock, since no ITS is
1305+
* involved here.
1306+
*/
1307+
int vgic_its_invall(struct kvm_vcpu *vcpu)
1308+
{
1309+
struct kvm *kvm = vcpu->kvm;
1310+
int irq_count, i = 0;
1311+
u32 *intids;
1312+
1313+
irq_count = vgic_copy_lpi_list(kvm, vcpu, &intids);
1314+
if (irq_count < 0)
1315+
return irq_count;
1316+
1317+
for (i = 0; i < irq_count; i++) {
1318+
struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intids[i]);
1319+
if (!irq)
1320+
continue;
1321+
update_lpi_config(kvm, irq, vcpu, false);
1322+
vgic_put_irq(kvm, irq);
1323+
}
1324+
1325+
kfree(intids);
1326+
1327+
if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm)
1328+
its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe);
1329+
1330+
return 0;
12921331
}
12931332

12941333
/*
@@ -1305,32 +1344,13 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
13051344
u32 coll_id = its_cmd_get_collection(its_cmd);
13061345
struct its_collection *collection;
13071346
struct kvm_vcpu *vcpu;
1308-
struct vgic_irq *irq;
1309-
u32 *intids;
1310-
int irq_count, i;
13111347

13121348
collection = find_collection(its, coll_id);
13131349
if (!its_is_collection_mapped(collection))
13141350
return E_ITS_INVALL_UNMAPPED_COLLECTION;
13151351

13161352
vcpu = kvm_get_vcpu(kvm, collection->target_addr);
1317-
1318-
irq_count = vgic_copy_lpi_list(kvm, vcpu, &intids);
1319-
if (irq_count < 0)
1320-
return irq_count;
1321-
1322-
for (i = 0; i < irq_count; i++) {
1323-
irq = vgic_get_irq(kvm, NULL, intids[i]);
1324-
if (!irq)
1325-
continue;
1326-
update_lpi_config(kvm, irq, vcpu, false);
1327-
vgic_put_irq(kvm, irq);
1328-
}
1329-
1330-
kfree(intids);
1331-
1332-
if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm)
1333-
its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe);
1353+
vgic_its_invall(vcpu);
13341354

13351355
return 0;
13361356
}

arch/arm64/kvm/vgic/vgic-mmio-v2.c

Lines changed: 15 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -73,9 +73,13 @@ static int vgic_mmio_uaccess_write_v2_misc(struct kvm_vcpu *vcpu,
7373
gpa_t addr, unsigned int len,
7474
unsigned long val)
7575
{
76+
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
77+
u32 reg;
78+
7679
switch (addr & 0x0c) {
7780
case GIC_DIST_IIDR:
78-
if (val != vgic_mmio_read_v2_misc(vcpu, addr, len))
81+
reg = vgic_mmio_read_v2_misc(vcpu, addr, len);
82+
if ((reg ^ val) & ~GICD_IIDR_REVISION_MASK)
7983
return -EINVAL;
8084

8185
/*
@@ -87,8 +91,16 @@ static int vgic_mmio_uaccess_write_v2_misc(struct kvm_vcpu *vcpu,
8791
* migration from old kernels to new kernels with legacy
8892
* userspace.
8993
*/
90-
vcpu->kvm->arch.vgic.v2_groups_user_writable = true;
91-
return 0;
94+
reg = FIELD_GET(GICD_IIDR_REVISION_MASK, reg);
95+
switch (reg) {
96+
case KVM_VGIC_IMP_REV_2:
97+
case KVM_VGIC_IMP_REV_3:
98+
vcpu->kvm->arch.vgic.v2_groups_user_writable = true;
99+
dist->implementation_rev = reg;
100+
return 0;
101+
default:
102+
return -EINVAL;
103+
}
92104
}
93105

94106
vgic_mmio_write_v2_misc(vcpu, addr, len, val);

arch/arm64/kvm/vgic/vgic-mmio-v3.c

Lines changed: 114 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -155,13 +155,27 @@ static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu *vcpu,
155155
unsigned long val)
156156
{
157157
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
158+
u32 reg;
158159

159160
switch (addr & 0x0c) {
160161
case GICD_TYPER2:
161-
case GICD_IIDR:
162162
if (val != vgic_mmio_read_v3_misc(vcpu, addr, len))
163163
return -EINVAL;
164164
return 0;
165+
case GICD_IIDR:
166+
reg = vgic_mmio_read_v3_misc(vcpu, addr, len);
167+
if ((reg ^ val) & ~GICD_IIDR_REVISION_MASK)
168+
return -EINVAL;
169+
170+
reg = FIELD_GET(GICD_IIDR_REVISION_MASK, reg);
171+
switch (reg) {
172+
case KVM_VGIC_IMP_REV_2:
173+
case KVM_VGIC_IMP_REV_3:
174+
dist->implementation_rev = reg;
175+
return 0;
176+
default:
177+
return -EINVAL;
178+
}
165179
case GICD_CTLR:
166180
/* Not a GICv4.1? No HW SGIs */
167181
if (!kvm_vgic_global_state.has_gicv4_1)
@@ -221,34 +235,58 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
221235
vgic_put_irq(vcpu->kvm, irq);
222236
}
223237

238+
bool vgic_lpis_enabled(struct kvm_vcpu *vcpu)
239+
{
240+
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
241+
242+
return atomic_read(&vgic_cpu->ctlr) == GICR_CTLR_ENABLE_LPIS;
243+
}
244+
224245
static unsigned long vgic_mmio_read_v3r_ctlr(struct kvm_vcpu *vcpu,
225246
gpa_t addr, unsigned int len)
226247
{
227248
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
249+
unsigned long val;
228250

229-
return vgic_cpu->lpis_enabled ? GICR_CTLR_ENABLE_LPIS : 0;
230-
}
251+
val = atomic_read(&vgic_cpu->ctlr);
252+
if (vgic_get_implementation_rev(vcpu) >= KVM_VGIC_IMP_REV_3)
253+
val |= GICR_CTLR_IR | GICR_CTLR_CES;
231254

255+
return val;
256+
}
232257

233258
static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu *vcpu,
234259
gpa_t addr, unsigned int len,
235260
unsigned long val)
236261
{
237262
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
238-
bool was_enabled = vgic_cpu->lpis_enabled;
263+
u32 ctlr;
239264

240265
if (!vgic_has_its(vcpu->kvm))
241266
return;
242267

243-
vgic_cpu->lpis_enabled = val & GICR_CTLR_ENABLE_LPIS;
268+
if (!(val & GICR_CTLR_ENABLE_LPIS)) {
269+
/*
270+
* Don't disable if RWP is set, as there already an
271+
* ongoing disable. Funky guest...
272+
*/
273+
ctlr = atomic_cmpxchg_acquire(&vgic_cpu->ctlr,
274+
GICR_CTLR_ENABLE_LPIS,
275+
GICR_CTLR_RWP);
276+
if (ctlr != GICR_CTLR_ENABLE_LPIS)
277+
return;
244278

245-
if (was_enabled && !vgic_cpu->lpis_enabled) {
246279
vgic_flush_pending_lpis(vcpu);
247280
vgic_its_invalidate_cache(vcpu->kvm);
248-
}
281+
atomic_set_release(&vgic_cpu->ctlr, 0);
282+
} else {
283+
ctlr = atomic_cmpxchg_acquire(&vgic_cpu->ctlr, 0,
284+
GICR_CTLR_ENABLE_LPIS);
285+
if (ctlr != 0)
286+
return;
249287

250-
if (!was_enabled && vgic_cpu->lpis_enabled)
251288
vgic_enable_lpis(vcpu);
289+
}
252290
}
253291

254292
static bool vgic_mmio_vcpu_rdist_is_last(struct kvm_vcpu *vcpu)
@@ -478,11 +516,10 @@ static void vgic_mmio_write_propbase(struct kvm_vcpu *vcpu,
478516
unsigned long val)
479517
{
480518
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
481-
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
482519
u64 old_propbaser, propbaser;
483520

484521
/* Storing a value with LPIs already enabled is undefined */
485-
if (vgic_cpu->lpis_enabled)
522+
if (vgic_lpis_enabled(vcpu))
486523
return;
487524

488525
do {
@@ -513,7 +550,7 @@ static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu,
513550
u64 old_pendbaser, pendbaser;
514551

515552
/* Storing a value with LPIs already enabled is undefined */
516-
if (vgic_cpu->lpis_enabled)
553+
if (vgic_lpis_enabled(vcpu))
517554
return;
518555

519556
do {
@@ -525,6 +562,63 @@ static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu,
525562
pendbaser) != old_pendbaser);
526563
}
527564

565+
static unsigned long vgic_mmio_read_sync(struct kvm_vcpu *vcpu,
566+
gpa_t addr, unsigned int len)
567+
{
568+
return !!atomic_read(&vcpu->arch.vgic_cpu.syncr_busy);
569+
}
570+
571+
static void vgic_set_rdist_busy(struct kvm_vcpu *vcpu, bool busy)
572+
{
573+
if (busy) {
574+
atomic_inc(&vcpu->arch.vgic_cpu.syncr_busy);
575+
smp_mb__after_atomic();
576+
} else {
577+
smp_mb__before_atomic();
578+
atomic_dec(&vcpu->arch.vgic_cpu.syncr_busy);
579+
}
580+
}
581+
582+
static void vgic_mmio_write_invlpi(struct kvm_vcpu *vcpu,
583+
gpa_t addr, unsigned int len,
584+
unsigned long val)
585+
{
586+
struct vgic_irq *irq;
587+
588+
/*
589+
* If the guest wrote only to the upper 32bit part of the
590+
* register, drop the write on the floor, as it is only for
591+
* vPEs (which we don't support for obvious reasons).
592+
*
593+
* Also discard the access if LPIs are not enabled.
594+
*/
595+
if ((addr & 4) || !vgic_lpis_enabled(vcpu))
596+
return;
597+
598+
vgic_set_rdist_busy(vcpu, true);
599+
600+
irq = vgic_get_irq(vcpu->kvm, NULL, lower_32_bits(val));
601+
if (irq) {
602+
vgic_its_inv_lpi(vcpu->kvm, irq);
603+
vgic_put_irq(vcpu->kvm, irq);
604+
}
605+
606+
vgic_set_rdist_busy(vcpu, false);
607+
}
608+
609+
static void vgic_mmio_write_invall(struct kvm_vcpu *vcpu,
610+
gpa_t addr, unsigned int len,
611+
unsigned long val)
612+
{
613+
/* See vgic_mmio_write_invlpi() for the early return rationale */
614+
if ((addr & 4) || !vgic_lpis_enabled(vcpu))
615+
return;
616+
617+
vgic_set_rdist_busy(vcpu, true);
618+
vgic_its_invall(vcpu);
619+
vgic_set_rdist_busy(vcpu, false);
620+
}
621+
528622
/*
529623
* The GICv3 per-IRQ registers are split to control PPIs and SGIs in the
530624
* redistributors, while SPIs are covered by registers in the distributor
@@ -630,6 +724,15 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = {
630724
REGISTER_DESC_WITH_LENGTH(GICR_PENDBASER,
631725
vgic_mmio_read_pendbase, vgic_mmio_write_pendbase, 8,
632726
VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
727+
REGISTER_DESC_WITH_LENGTH(GICR_INVLPIR,
728+
vgic_mmio_read_raz, vgic_mmio_write_invlpi, 8,
729+
VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
730+
REGISTER_DESC_WITH_LENGTH(GICR_INVALLR,
731+
vgic_mmio_read_raz, vgic_mmio_write_invall, 8,
732+
VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
733+
REGISTER_DESC_WITH_LENGTH(GICR_SYNCR,
734+
vgic_mmio_read_sync, vgic_mmio_write_wi, 4,
735+
VGIC_ACCESS_32bit),
633736
REGISTER_DESC_WITH_LENGTH(GICR_IDREGS,
634737
vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48,
635738
VGIC_ACCESS_32bit),

arch/arm64/kvm/vgic/vgic.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,11 @@
9898
#define DEBUG_SPINLOCK_BUG_ON(p)
9999
#endif
100100

101+
static inline u32 vgic_get_implementation_rev(struct kvm_vcpu *vcpu)
102+
{
103+
return vcpu->kvm->arch.vgic.implementation_rev;
104+
}
105+
101106
/* Requires the irq_lock to be held by the caller. */
102107
static inline bool irq_is_pending(struct vgic_irq *irq)
103108
{
@@ -308,6 +313,7 @@ static inline bool vgic_dist_overlap(struct kvm *kvm, gpa_t base, size_t size)
308313
(base < d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE);
309314
}
310315

316+
bool vgic_lpis_enabled(struct kvm_vcpu *vcpu);
311317
int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr);
312318
int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
313319
u32 devid, u32 eventid, struct vgic_irq **irq);
@@ -317,6 +323,10 @@ void vgic_lpi_translation_cache_init(struct kvm *kvm);
317323
void vgic_lpi_translation_cache_destroy(struct kvm *kvm);
318324
void vgic_its_invalidate_cache(struct kvm *kvm);
319325

326+
/* GICv4.1 MMIO interface */
327+
int vgic_its_inv_lpi(struct kvm *kvm, struct vgic_irq *irq);
328+
int vgic_its_invall(struct kvm_vcpu *vcpu);
329+
320330
bool vgic_supports_direct_msis(struct kvm *kvm);
321331
int vgic_v4_init(struct kvm *kvm);
322332
void vgic_v4_teardown(struct kvm *kvm);

include/kvm/arm_vgic.h

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -231,6 +231,9 @@ struct vgic_dist {
231231

232232
/* Implementation revision as reported in the GICD_IIDR */
233233
u32 implementation_rev;
234+
#define KVM_VGIC_IMP_REV_2 2 /* GICv2 restorable groups */
235+
#define KVM_VGIC_IMP_REV_3 3 /* GICv3 GICR_CTLR.{IW,CES,RWP} */
236+
#define KVM_VGIC_IMP_REV_LATEST KVM_VGIC_IMP_REV_3
234237

235238
/* Userspace can write to GICv2 IGROUPR */
236239
bool v2_groups_user_writable;
@@ -344,11 +347,12 @@ struct vgic_cpu {
344347
struct vgic_io_device rd_iodev;
345348
struct vgic_redist_region *rdreg;
346349
u32 rdreg_index;
350+
atomic_t syncr_busy;
347351

348352
/* Contains the attributes and gpa of the LPI pending tables. */
349353
u64 pendbaser;
350-
351-
bool lpis_enabled;
354+
/* GICR_CTLR.{ENABLE_LPIS,RWP} */
355+
atomic_t ctlr;
352356

353357
/* Cache guest priority bits */
354358
u32 num_pri_bits;

0 commit comments

Comments
 (0)