Skip to content

Commit 0c2c7c0

Browse files
pgondabonzini
authored andcommitted
KVM: SEV: Mark nested locking of vcpu->lock
svm_vm_migrate_from() uses sev_lock_vcpus_for_migration() to lock all source and target vcpu->locks. Unfortunately there is an 8 subclass limit, so a new subclass cannot be used for each vCPU. Instead maintain ownership of the first vcpu's mutex.dep_map using a role specific subclass: source vs target. Release the other vcpu's mutex.dep_maps. Fixes: b566393 ("KVM: SEV: Add support for SEV intra host migration") Reported-by: John Sperbeck<jsperbeck@google.com> Suggested-by: David Rientjes <rientjes@google.com> Suggested-by: Sean Christopherson <seanjc@google.com> Suggested-by: Paolo Bonzini <pbonzini@redhat.com> Cc: Hillf Danton <hdanton@sina.com> Cc: kvm@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Peter Gonda <pgonda@google.com> Message-Id: <20220502165807.529624-1-pgonda@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent 0414410 commit 0c2c7c0

1 file changed

Lines changed: 38 additions & 4 deletions

File tree

arch/x86/kvm/svm/sev.c

Lines changed: 38 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1594,24 +1594,51 @@ static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
15941594
atomic_set_release(&src_sev->migration_in_progress, 0);
15951595
}
15961596

1597+
/* vCPU mutex subclasses. */
1598+
enum sev_migration_role {
1599+
SEV_MIGRATION_SOURCE = 0,
1600+
SEV_MIGRATION_TARGET,
1601+
SEV_NR_MIGRATION_ROLES,
1602+
};
15971603

1598-
static int sev_lock_vcpus_for_migration(struct kvm *kvm)
1604+
static int sev_lock_vcpus_for_migration(struct kvm *kvm,
1605+
enum sev_migration_role role)
15991606
{
16001607
struct kvm_vcpu *vcpu;
16011608
unsigned long i, j;
1609+
bool first = true;
16021610

16031611
kvm_for_each_vcpu(i, vcpu, kvm) {
1604-
if (mutex_lock_killable(&vcpu->mutex))
1612+
if (mutex_lock_killable_nested(&vcpu->mutex, role))
16051613
goto out_unlock;
1614+
1615+
if (first) {
1616+
/*
1617+
* Reset the role to one that avoids colliding with
1618+
* the role used for the first vcpu mutex.
1619+
*/
1620+
role = SEV_NR_MIGRATION_ROLES;
1621+
first = false;
1622+
} else {
1623+
mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
1624+
}
16061625
}
16071626

16081627
return 0;
16091628

16101629
out_unlock:
1630+
1631+
first = true;
16111632
kvm_for_each_vcpu(j, vcpu, kvm) {
16121633
if (i == j)
16131634
break;
16141635

1636+
if (first)
1637+
first = false;
1638+
else
1639+
mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
1640+
1641+
16151642
mutex_unlock(&vcpu->mutex);
16161643
}
16171644
return -EINTR;
@@ -1621,8 +1648,15 @@ static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
16211648
{
16221649
struct kvm_vcpu *vcpu;
16231650
unsigned long i;
1651+
bool first = true;
16241652

16251653
kvm_for_each_vcpu(i, vcpu, kvm) {
1654+
if (first)
1655+
first = false;
1656+
else
1657+
mutex_acquire(&vcpu->mutex.dep_map,
1658+
SEV_NR_MIGRATION_ROLES, 0, _THIS_IP_);
1659+
16261660
mutex_unlock(&vcpu->mutex);
16271661
}
16281662
}
@@ -1748,10 +1782,10 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
17481782
charged = true;
17491783
}
17501784

1751-
ret = sev_lock_vcpus_for_migration(kvm);
1785+
ret = sev_lock_vcpus_for_migration(kvm, SEV_MIGRATION_SOURCE);
17521786
if (ret)
17531787
goto out_dst_cgroup;
1754-
ret = sev_lock_vcpus_for_migration(source_kvm);
1788+
ret = sev_lock_vcpus_for_migration(source_kvm, SEV_MIGRATION_TARGET);
17551789
if (ret)
17561790
goto out_dst_vcpu;
17571791

0 commit comments

Comments
 (0)