Skip to content

Commit 295593a

Browse files
author
Marc Zyngier
committed
Merge branch kvm-arm64/mmio-rcu into kvmarm-master/next
* kvm-arm64/mmio-rcu: : . : Speed up MMIO registration by avoiding unnecessary RCU synchronisation, : courtesy of Keir Fraser (20250909100007.3136249-1-keirf@google.com). : . KVM: Avoid synchronize_srcu() in kvm_io_bus_register_dev() KVM: Implement barriers before accessing kvm->buses[] on SRCU read paths KVM: arm64: vgic: Explicitly implement vgic_dist::ready ordering KVM: arm64: vgic-init: Remove vgic_ready() macro Signed-off-by: Marc Zyngier <maz@kernel.org>
2 parents 3064cee + 7d9a027 commit 295593a

5 files changed

Lines changed: 53 additions & 23 deletions

File tree

arch/arm64/kvm/vgic/vgic-init.c

Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -554,7 +554,6 @@ int vgic_lazy_init(struct kvm *kvm)
554554
* Also map the virtual CPU interface into the VM.
555555
* v2 calls vgic_init() if not already done.
556556
* v3 and derivatives return an error if the VGIC is not initialized.
557-
* vgic_ready() returns true if this function has succeeded.
558557
*/
559558
int kvm_vgic_map_resources(struct kvm *kvm)
560559
{
@@ -563,12 +562,12 @@ int kvm_vgic_map_resources(struct kvm *kvm)
563562
gpa_t dist_base;
564563
int ret = 0;
565564

566-
if (likely(vgic_ready(kvm)))
565+
if (likely(smp_load_acquire(&dist->ready)))
567566
return 0;
568567

569568
mutex_lock(&kvm->slots_lock);
570569
mutex_lock(&kvm->arch.config_lock);
571-
if (vgic_ready(kvm))
570+
if (dist->ready)
572571
goto out;
573572

574573
if (!irqchip_in_kernel(kvm))
@@ -594,14 +593,7 @@ int kvm_vgic_map_resources(struct kvm *kvm)
594593
goto out_slots;
595594
}
596595

597-
/*
598-
* kvm_io_bus_register_dev() guarantees all readers see the new MMIO
599-
* registration before returning through synchronize_srcu(), which also
600-
* implies a full memory barrier. As such, marking the distributor as
601-
* 'ready' here is guaranteed to be ordered after all vCPUs having seen
602-
* a completely configured distributor.
603-
*/
604-
dist->ready = true;
596+
smp_store_release(&dist->ready, true);
605597
goto out_slots;
606598
out:
607599
mutex_unlock(&kvm->arch.config_lock);

arch/x86/kvm/vmx/vmx.c

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5785,6 +5785,13 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
57855785
if (kvm_test_request(KVM_REQ_EVENT, vcpu))
57865786
return 1;
57875787

5788+
/*
5789+
* Ensure that any updates to kvm->buses[] observed by the
5790+
* previous instruction (emulated or otherwise) are also
5791+
* visible to the instruction KVM is about to emulate.
5792+
*/
5793+
smp_rmb();
5794+
57885795
if (!kvm_emulate_instruction(vcpu, 0))
57895796
return 0;
57905797

include/kvm/arm_vgic.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -406,7 +406,6 @@ u64 vgic_v3_get_misr(struct kvm_vcpu *vcpu);
406406

407407
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
408408
#define vgic_initialized(k) ((k)->arch.vgic.initialized)
409-
#define vgic_ready(k) ((k)->arch.vgic.ready)
410409
#define vgic_valid_spi(k, i) (((i) >= VGIC_NR_PRIVATE_IRQS) && \
411410
((i) < (k)->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS))
412411

include/linux/kvm_host.h

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -206,6 +206,7 @@ struct kvm_io_range {
206206
struct kvm_io_bus {
207207
int dev_count;
208208
int ioeventfd_count;
209+
struct rcu_head rcu;
209210
struct kvm_io_range range[];
210211
};
211212

@@ -966,11 +967,15 @@ static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
966967
return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET);
967968
}
968969

970+
/*
971+
* Get a bus reference under the update-side lock. No long-term SRCU reader
972+
* references are permitted, to avoid stale reads vs concurrent IO
973+
* registrations.
974+
*/
969975
static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
970976
{
971-
return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
972-
lockdep_is_held(&kvm->slots_lock) ||
973-
!refcount_read(&kvm->users_count));
977+
return rcu_dereference_protected(kvm->buses[idx],
978+
lockdep_is_held(&kvm->slots_lock));
974979
}
975980

976981
static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)

virt/kvm/kvm_main.c

Lines changed: 35 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1103,6 +1103,14 @@ void __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
11031103
{
11041104
}
11051105

1106+
/* Called only on cleanup and destruction paths when there are no users. */
1107+
static inline struct kvm_io_bus *kvm_get_bus_for_destruction(struct kvm *kvm,
1108+
enum kvm_bus idx)
1109+
{
1110+
return rcu_dereference_protected(kvm->buses[idx],
1111+
!refcount_read(&kvm->users_count));
1112+
}
1113+
11061114
static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
11071115
{
11081116
struct kvm *kvm = kvm_arch_alloc_vm();
@@ -1228,7 +1236,7 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
12281236
out_err_no_arch_destroy_vm:
12291237
WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
12301238
for (i = 0; i < KVM_NR_BUSES; i++)
1231-
kfree(kvm_get_bus(kvm, i));
1239+
kfree(kvm_get_bus_for_destruction(kvm, i));
12321240
kvm_free_irq_routing(kvm);
12331241
out_err_no_irq_routing:
12341242
cleanup_srcu_struct(&kvm->irq_srcu);
@@ -1276,7 +1284,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
12761284

12771285
kvm_free_irq_routing(kvm);
12781286
for (i = 0; i < KVM_NR_BUSES; i++) {
1279-
struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
1287+
struct kvm_io_bus *bus = kvm_get_bus_for_destruction(kvm, i);
12801288

12811289
if (bus)
12821290
kvm_io_bus_destroy(bus);
@@ -1312,6 +1320,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
13121320
kvm_free_memslots(kvm, &kvm->__memslots[i][1]);
13131321
}
13141322
cleanup_srcu_struct(&kvm->irq_srcu);
1323+
srcu_barrier(&kvm->srcu);
13151324
cleanup_srcu_struct(&kvm->srcu);
13161325
#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
13171326
xa_destroy(&kvm->mem_attr_array);
@@ -5843,6 +5852,18 @@ static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
58435852
return -EOPNOTSUPP;
58445853
}
58455854

5855+
static struct kvm_io_bus *kvm_get_bus_srcu(struct kvm *kvm, enum kvm_bus idx)
5856+
{
5857+
/*
5858+
* Ensure that any updates to kvm_buses[] observed by the previous vCPU
5859+
* machine instruction are also visible to the vCPU machine instruction
5860+
* that triggered this call.
5861+
*/
5862+
smp_mb__after_srcu_read_lock();
5863+
5864+
return srcu_dereference(kvm->buses[idx], &kvm->srcu);
5865+
}
5866+
58465867
int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
58475868
int len, const void *val)
58485869
{
@@ -5855,7 +5876,7 @@ int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
58555876
.len = len,
58565877
};
58575878

5858-
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5879+
bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx);
58595880
if (!bus)
58605881
return -ENOMEM;
58615882
r = __kvm_io_bus_write(vcpu, bus, &range, val);
@@ -5874,7 +5895,7 @@ int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
58745895
.len = len,
58755896
};
58765897

5877-
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5898+
bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx);
58785899
if (!bus)
58795900
return -ENOMEM;
58805901

@@ -5924,14 +5945,21 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
59245945
.len = len,
59255946
};
59265947

5927-
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5948+
bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx);
59285949
if (!bus)
59295950
return -ENOMEM;
59305951
r = __kvm_io_bus_read(vcpu, bus, &range, val);
59315952
return r < 0 ? r : 0;
59325953
}
59335954
EXPORT_SYMBOL_GPL(kvm_io_bus_read);
59345955

5956+
static void __free_bus(struct rcu_head *rcu)
5957+
{
5958+
struct kvm_io_bus *bus = container_of(rcu, struct kvm_io_bus, rcu);
5959+
5960+
kfree(bus);
5961+
}
5962+
59355963
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
59365964
int len, struct kvm_io_device *dev)
59375965
{
@@ -5970,8 +5998,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
59705998
memcpy(new_bus->range + i + 1, bus->range + i,
59715999
(bus->dev_count - i) * sizeof(struct kvm_io_range));
59726000
rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
5973-
synchronize_srcu_expedited(&kvm->srcu);
5974-
kfree(bus);
6001+
call_srcu(&kvm->srcu, &bus->rcu, __free_bus);
59756002

59766003
return 0;
59776004
}
@@ -6033,7 +6060,7 @@ struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
60336060

60346061
srcu_idx = srcu_read_lock(&kvm->srcu);
60356062

6036-
bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
6063+
bus = kvm_get_bus_srcu(kvm, bus_idx);
60376064
if (!bus)
60386065
goto out_unlock;
60396066

0 commit comments

Comments
 (0)