Skip to content

Commit ba8ec27

Browse files
Maxim Levitskybonzini
authored andcommitted
KVM: x86: SVM: drop preempt-safe wrappers for avic_vcpu_load/put
Now that these functions are always called with preemption disabled, remove the preempt_disable()/preempt_enable() pair inside them. No functional change intended. Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com> Message-Id: <20220606180829.102503-8-mlevitsk@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent 18869f2 commit ba8ec27

3 files changed

Lines changed: 8 additions & 27 deletions

File tree

arch/x86/kvm/svm/avic.c

Lines changed: 4 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -946,7 +946,7 @@ avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
946946
return ret;
947947
}
948948

949-
void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
949+
void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
950950
{
951951
u64 entry;
952952
int h_physical_id = kvm_cpu_get_apicid(cpu);
@@ -978,7 +978,7 @@ void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
978978
avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true);
979979
}
980980

981-
void __avic_vcpu_put(struct kvm_vcpu *vcpu)
981+
void avic_vcpu_put(struct kvm_vcpu *vcpu)
982982
{
983983
u64 entry;
984984
struct vcpu_svm *svm = to_svm(vcpu);
@@ -997,25 +997,6 @@ void __avic_vcpu_put(struct kvm_vcpu *vcpu)
997997
WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
998998
}
999999

1000-
static void avic_vcpu_load(struct kvm_vcpu *vcpu)
1001-
{
1002-
int cpu = get_cpu();
1003-
1004-
WARN_ON(cpu != vcpu->cpu);
1005-
1006-
__avic_vcpu_load(vcpu, cpu);
1007-
1008-
put_cpu();
1009-
}
1010-
1011-
static void avic_vcpu_put(struct kvm_vcpu *vcpu)
1012-
{
1013-
preempt_disable();
1014-
1015-
__avic_vcpu_put(vcpu);
1016-
1017-
preempt_enable();
1018-
}
10191000

10201001
void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
10211002
{
@@ -1042,7 +1023,7 @@ void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
10421023
vmcb_mark_dirty(vmcb, VMCB_AVIC);
10431024

10441025
if (activated)
1045-
avic_vcpu_load(vcpu);
1026+
avic_vcpu_load(vcpu, vcpu->cpu);
10461027
else
10471028
avic_vcpu_put(vcpu);
10481029

@@ -1075,5 +1056,5 @@ void avic_vcpu_unblocking(struct kvm_vcpu *vcpu)
10751056
if (!kvm_vcpu_apicv_active(vcpu))
10761057
return;
10771058

1078-
avic_vcpu_load(vcpu);
1059+
avic_vcpu_load(vcpu, vcpu->cpu);
10791060
}

arch/x86/kvm/svm/svm.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1400,13 +1400,13 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
14001400
indirect_branch_prediction_barrier();
14011401
}
14021402
if (kvm_vcpu_apicv_active(vcpu))
1403-
__avic_vcpu_load(vcpu, cpu);
1403+
avic_vcpu_load(vcpu, cpu);
14041404
}
14051405

14061406
static void svm_vcpu_put(struct kvm_vcpu *vcpu)
14071407
{
14081408
if (kvm_vcpu_apicv_active(vcpu))
1409-
__avic_vcpu_put(vcpu);
1409+
avic_vcpu_put(vcpu);
14101410

14111411
svm_prepare_host_switch(vcpu);
14121412

arch/x86/kvm/svm/svm.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -610,8 +610,8 @@ void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb);
610610
int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
611611
int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
612612
int avic_init_vcpu(struct vcpu_svm *svm);
613-
void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
614-
void __avic_vcpu_put(struct kvm_vcpu *vcpu);
613+
void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
614+
void avic_vcpu_put(struct kvm_vcpu *vcpu);
615615
void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu);
616616
void avic_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
617617
void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);

0 commit comments

Comments
 (0)