Skip to content

Commit b44a1dd

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM fixes from Paolo Bonzini: "Fixes for PPC and s390" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: PPC: Book3S HV: Restore SPRG3 in kvmhv_p9_guest_entry() KVM: PPC: Book3S HV: Fix lockdep warning when entering guest on POWER9 KVM: PPC: Book3S HV: XIVE: Fix page offset when clearing ESB pages KVM: PPC: Book3S HV: XIVE: Take the srcu read lock when accessing memslots KVM: PPC: Book3S HV: XIVE: Do not clear IRQ data of passthrough interrupts KVM: PPC: Book3S HV: XIVE: Introduce a new mutex for the XIVE device KVM: PPC: Book3S HV: XIVE: Fix the enforced limit on the vCPU identifier KVM: PPC: Book3S HV: XIVE: Do not test the EQ flag validity when resetting KVM: PPC: Book3S HV: XIVE: Clear file mapping when device is released KVM: PPC: Book3S HV: Don't take kvm->lock around kvm_for_each_vcpu KVM: PPC: Book3S: Use new mutex to synchronize access to rtas token list KVM: PPC: Book3S HV: Use new mutex to synchronize MMU setup KVM: PPC: Book3S HV: Avoid touching arch.mmu_ready in XIVE release functions KVM: s390: Do not report unusabled IDs via KVM_CAP_MAX_VCPU_ID kvm: fix compile on s390 part 2
2 parents 38baf0b + f8d221d commit b44a1dd

14 files changed

Lines changed: 157 additions & 117 deletions

File tree

arch/mips/kvm/mips.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1122,6 +1122,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
11221122
case KVM_CAP_MAX_VCPUS:
11231123
r = KVM_MAX_VCPUS;
11241124
break;
1125+
case KVM_CAP_MAX_VCPU_ID:
1126+
r = KVM_MAX_VCPU_ID;
1127+
break;
11251128
case KVM_CAP_MIPS_FPU:
11261129
/* We don't handle systems with inconsistent cpu_has_fpu */
11271130
r = !!raw_cpu_has_fpu;

arch/powerpc/include/asm/kvm_host.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -309,6 +309,7 @@ struct kvm_arch {
309309
#ifdef CONFIG_PPC_BOOK3S_64
310310
struct list_head spapr_tce_tables;
311311
struct list_head rtas_tokens;
312+
struct mutex rtas_token_lock;
312313
DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
313314
#endif
314315
#ifdef CONFIG_KVM_MPIC
@@ -325,6 +326,7 @@ struct kvm_arch {
325326
#endif
326327
struct kvmppc_ops *kvm_ops;
327328
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
329+
struct mutex mmu_setup_lock; /* nests inside vcpu mutexes */
328330
u64 l1_ptcr;
329331
int max_nested_lpid;
330332
struct kvm_nested_guest *nested_guests[KVM_MAX_NESTED_GUESTS];

arch/powerpc/kvm/book3s.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -902,6 +902,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
902902
#ifdef CONFIG_PPC64
903903
INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
904904
INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
905+
mutex_init(&kvm->arch.rtas_token_lock);
905906
#endif
906907

907908
return kvm->arch.kvm_ops->init_vm(kvm);

arch/powerpc/kvm/book3s_64_mmu_hv.c

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ struct kvm_resize_hpt {
6363
struct work_struct work;
6464
u32 order;
6565

66-
/* These fields protected by kvm->lock */
66+
/* These fields protected by kvm->arch.mmu_setup_lock */
6767

6868
/* Possible values and their usage:
6969
* <0 an error occurred during allocation,
@@ -73,7 +73,7 @@ struct kvm_resize_hpt {
7373
int error;
7474

7575
/* Private to the work thread, until error != -EBUSY,
76-
* then protected by kvm->lock.
76+
* then protected by kvm->arch.mmu_setup_lock.
7777
*/
7878
struct kvm_hpt_info hpt;
7979
};
@@ -139,7 +139,7 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
139139
long err = -EBUSY;
140140
struct kvm_hpt_info info;
141141

142-
mutex_lock(&kvm->lock);
142+
mutex_lock(&kvm->arch.mmu_setup_lock);
143143
if (kvm->arch.mmu_ready) {
144144
kvm->arch.mmu_ready = 0;
145145
/* order mmu_ready vs. vcpus_running */
@@ -183,7 +183,7 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
183183
/* Ensure that each vcpu will flush its TLB on next entry. */
184184
cpumask_setall(&kvm->arch.need_tlb_flush);
185185

186-
mutex_unlock(&kvm->lock);
186+
mutex_unlock(&kvm->arch.mmu_setup_lock);
187187
return err;
188188
}
189189

@@ -1447,7 +1447,7 @@ static void resize_hpt_pivot(struct kvm_resize_hpt *resize)
14471447

14481448
static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize)
14491449
{
1450-
if (WARN_ON(!mutex_is_locked(&kvm->lock)))
1450+
if (WARN_ON(!mutex_is_locked(&kvm->arch.mmu_setup_lock)))
14511451
return;
14521452

14531453
if (!resize)
@@ -1474,14 +1474,14 @@ static void resize_hpt_prepare_work(struct work_struct *work)
14741474
if (WARN_ON(resize->error != -EBUSY))
14751475
return;
14761476

1477-
mutex_lock(&kvm->lock);
1477+
mutex_lock(&kvm->arch.mmu_setup_lock);
14781478

14791479
/* Request is still current? */
14801480
if (kvm->arch.resize_hpt == resize) {
14811481
/* We may request large allocations here:
1482-
* do not sleep with kvm->lock held for a while.
1482+
* do not sleep with kvm->arch.mmu_setup_lock held for a while.
14831483
*/
1484-
mutex_unlock(&kvm->lock);
1484+
mutex_unlock(&kvm->arch.mmu_setup_lock);
14851485

14861486
resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n",
14871487
resize->order);
@@ -1494,9 +1494,9 @@ static void resize_hpt_prepare_work(struct work_struct *work)
14941494
if (WARN_ON(err == -EBUSY))
14951495
err = -EINPROGRESS;
14961496

1497-
mutex_lock(&kvm->lock);
1497+
mutex_lock(&kvm->arch.mmu_setup_lock);
14981498
/* It is possible that kvm->arch.resize_hpt != resize
1499-
* after we grab kvm->lock again.
1499+
* after we grab kvm->arch.mmu_setup_lock again.
15001500
*/
15011501
}
15021502

@@ -1505,7 +1505,7 @@ static void resize_hpt_prepare_work(struct work_struct *work)
15051505
if (kvm->arch.resize_hpt != resize)
15061506
resize_hpt_release(kvm, resize);
15071507

1508-
mutex_unlock(&kvm->lock);
1508+
mutex_unlock(&kvm->arch.mmu_setup_lock);
15091509
}
15101510

15111511
long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
@@ -1522,7 +1522,7 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
15221522
if (shift && ((shift < 18) || (shift > 46)))
15231523
return -EINVAL;
15241524

1525-
mutex_lock(&kvm->lock);
1525+
mutex_lock(&kvm->arch.mmu_setup_lock);
15261526

15271527
resize = kvm->arch.resize_hpt;
15281528

@@ -1565,7 +1565,7 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
15651565
ret = 100; /* estimated time in ms */
15661566

15671567
out:
1568-
mutex_unlock(&kvm->lock);
1568+
mutex_unlock(&kvm->arch.mmu_setup_lock);
15691569
return ret;
15701570
}
15711571

@@ -1588,7 +1588,7 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
15881588
if (shift && ((shift < 18) || (shift > 46)))
15891589
return -EINVAL;
15901590

1591-
mutex_lock(&kvm->lock);
1591+
mutex_lock(&kvm->arch.mmu_setup_lock);
15921592

15931593
resize = kvm->arch.resize_hpt;
15941594

@@ -1625,7 +1625,7 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
16251625
smp_mb();
16261626
out_no_hpt:
16271627
resize_hpt_release(kvm, resize);
1628-
mutex_unlock(&kvm->lock);
1628+
mutex_unlock(&kvm->arch.mmu_setup_lock);
16291629
return ret;
16301630
}
16311631

@@ -1868,15 +1868,15 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
18681868
return -EINVAL;
18691869

18701870
/* lock out vcpus from running while we're doing this */
1871-
mutex_lock(&kvm->lock);
1871+
mutex_lock(&kvm->arch.mmu_setup_lock);
18721872
mmu_ready = kvm->arch.mmu_ready;
18731873
if (mmu_ready) {
18741874
kvm->arch.mmu_ready = 0; /* temporarily */
18751875
/* order mmu_ready vs. vcpus_running */
18761876
smp_mb();
18771877
if (atomic_read(&kvm->arch.vcpus_running)) {
18781878
kvm->arch.mmu_ready = 1;
1879-
mutex_unlock(&kvm->lock);
1879+
mutex_unlock(&kvm->arch.mmu_setup_lock);
18801880
return -EBUSY;
18811881
}
18821882
}
@@ -1963,7 +1963,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
19631963
/* Order HPTE updates vs. mmu_ready */
19641964
smp_wmb();
19651965
kvm->arch.mmu_ready = mmu_ready;
1966-
mutex_unlock(&kvm->lock);
1966+
mutex_unlock(&kvm->arch.mmu_setup_lock);
19671967

19681968
if (err)
19691969
return err;

arch/powerpc/kvm/book3s_hv.c

Lines changed: 30 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -446,12 +446,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
446446

447447
static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
448448
{
449-
struct kvm_vcpu *ret;
450-
451-
mutex_lock(&kvm->lock);
452-
ret = kvm_get_vcpu_by_id(kvm, id);
453-
mutex_unlock(&kvm->lock);
454-
return ret;
449+
return kvm_get_vcpu_by_id(kvm, id);
455450
}
456451

457452
static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
@@ -1583,7 +1578,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
15831578
struct kvmppc_vcore *vc = vcpu->arch.vcore;
15841579
u64 mask;
15851580

1586-
mutex_lock(&kvm->lock);
15871581
spin_lock(&vc->lock);
15881582
/*
15891583
* If ILE (interrupt little-endian) has changed, update the
@@ -1623,7 +1617,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
16231617
mask &= 0xFFFFFFFF;
16241618
vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
16251619
spin_unlock(&vc->lock);
1626-
mutex_unlock(&kvm->lock);
16271620
}
16281621

16291622
static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
@@ -2338,11 +2331,17 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
23382331
pr_devel("KVM: collision on id %u", id);
23392332
vcore = NULL;
23402333
} else if (!vcore) {
2334+
/*
2335+
* Take mmu_setup_lock for mutual exclusion
2336+
* with kvmppc_update_lpcr().
2337+
*/
23412338
err = -ENOMEM;
23422339
vcore = kvmppc_vcore_create(kvm,
23432340
id & ~(kvm->arch.smt_mode - 1));
2341+
mutex_lock(&kvm->arch.mmu_setup_lock);
23442342
kvm->arch.vcores[core] = vcore;
23452343
kvm->arch.online_vcores++;
2344+
mutex_unlock(&kvm->arch.mmu_setup_lock);
23462345
}
23472346
}
23482347
mutex_unlock(&kvm->lock);
@@ -3663,6 +3662,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
36633662
vc->in_guest = 0;
36643663

36653664
mtspr(SPRN_DEC, local_paca->kvm_hstate.dec_expires - mftb());
3665+
mtspr(SPRN_SPRG_VDSO_WRITE, local_paca->sprg_vdso);
36663666

36673667
kvmhv_load_host_pmu();
36683668

@@ -3859,7 +3859,7 @@ static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
38593859
int r = 0;
38603860
struct kvm *kvm = vcpu->kvm;
38613861

3862-
mutex_lock(&kvm->lock);
3862+
mutex_lock(&kvm->arch.mmu_setup_lock);
38633863
if (!kvm->arch.mmu_ready) {
38643864
if (!kvm_is_radix(kvm))
38653865
r = kvmppc_hv_setup_htab_rma(vcpu);
@@ -3869,7 +3869,7 @@ static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
38693869
kvm->arch.mmu_ready = 1;
38703870
}
38713871
}
3872-
mutex_unlock(&kvm->lock);
3872+
mutex_unlock(&kvm->arch.mmu_setup_lock);
38733873
return r;
38743874
}
38753875

@@ -4091,16 +4091,20 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
40914091
kvmppc_check_need_tlb_flush(kvm, pcpu, nested);
40924092
}
40934093

4094-
trace_hardirqs_on();
40954094
guest_enter_irqoff();
40964095

40974096
srcu_idx = srcu_read_lock(&kvm->srcu);
40984097

40994098
this_cpu_disable_ftrace();
41004099

4100+
/* Tell lockdep that we're about to enable interrupts */
4101+
trace_hardirqs_on();
4102+
41014103
trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr);
41024104
vcpu->arch.trap = trap;
41034105

4106+
trace_hardirqs_off();
4107+
41044108
this_cpu_enable_ftrace();
41054109

41064110
srcu_read_unlock(&kvm->srcu, srcu_idx);
@@ -4110,7 +4114,6 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
41104114
isync();
41114115
}
41124116

4113-
trace_hardirqs_off();
41144117
set_irq_happened(trap);
41154118

41164119
kvmppc_set_host_core(pcpu);
@@ -4478,7 +4481,8 @@ static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
44784481

44794482
/*
44804483
* Update LPCR values in kvm->arch and in vcores.
4481-
* Caller must hold kvm->lock.
4484+
* Caller must hold kvm->arch.mmu_setup_lock (for mutual exclusion
4485+
* of kvm->arch.lpcr update).
44824486
*/
44834487
void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
44844488
{
@@ -4530,7 +4534,7 @@ void kvmppc_setup_partition_table(struct kvm *kvm)
45304534

45314535
/*
45324536
* Set up HPT (hashed page table) and RMA (real-mode area).
4533-
* Must be called with kvm->lock held.
4537+
* Must be called with kvm->arch.mmu_setup_lock held.
45344538
*/
45354539
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
45364540
{
@@ -4618,7 +4622,10 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
46184622
goto out_srcu;
46194623
}
46204624

4621-
/* Must be called with kvm->lock held and mmu_ready = 0 and no vcpus running */
4625+
/*
4626+
* Must be called with kvm->arch.mmu_setup_lock held and
4627+
* mmu_ready = 0 and no vcpus running.
4628+
*/
46224629
int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
46234630
{
46244631
if (nesting_enabled(kvm))
@@ -4635,7 +4642,10 @@ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
46354642
return 0;
46364643
}
46374644

4638-
/* Must be called with kvm->lock held and mmu_ready = 0 and no vcpus running */
4645+
/*
4646+
* Must be called with kvm->arch.mmu_setup_lock held and
4647+
* mmu_ready = 0 and no vcpus running.
4648+
*/
46394649
int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
46404650
{
46414651
int err;
@@ -4740,6 +4750,8 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
47404750
char buf[32];
47414751
int ret;
47424752

4753+
mutex_init(&kvm->arch.mmu_setup_lock);
4754+
47434755
/* Allocate the guest's logical partition ID */
47444756

47454757
lpid = kvmppc_alloc_lpid();
@@ -5265,7 +5277,7 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
52655277
if (kvmhv_on_pseries() && !radix)
52665278
return -EINVAL;
52675279

5268-
mutex_lock(&kvm->lock);
5280+
mutex_lock(&kvm->arch.mmu_setup_lock);
52695281
if (radix != kvm_is_radix(kvm)) {
52705282
if (kvm->arch.mmu_ready) {
52715283
kvm->arch.mmu_ready = 0;
@@ -5293,7 +5305,7 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
52935305
err = 0;
52945306

52955307
out_unlock:
5296-
mutex_unlock(&kvm->lock);
5308+
mutex_unlock(&kvm->arch.mmu_setup_lock);
52975309
return err;
52985310
}
52995311

0 commit comments

Comments
 (0)