Skip to content

Commit 000d75b

Browse files
committed
KVM: x86: Update APICv ISR (a.k.a. SVI) as part of kvm_apic_update_apicv()
Fold the calls to .hwapic_isr_update() in kvm_apic_set_state(), kvm_lapic_reset(), and __kvm_vcpu_update_apicv() into kvm_apic_update_apicv(), as updating SVI is directly related to updating KVM's own cache of ISR information, e.g. SVI is more or less the APICv equivalent of highest_isr_cache. Note, calling .hwapic_isr_update() during kvm_apic_update_apicv() has benign side effects, as doing so changes the orders of the calls in kvm_lapic_reset() and kvm_apic_set_state(), specifically with respect to to the order between .hwapic_isr_update() and .apicv_post_state_restore(). However, the changes in ordering are glorified nops as the former hook is VMX-only and the latter is SVM-only. Reviewed-by: Chao Gao <chao.gao@intel.com> Link: https://patch.msgid.link/20260109034532.1012993-9-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 249cc1a commit 000d75b

3 files changed

Lines changed: 12 additions & 27 deletions

File tree

arch/x86/kvm/lapic.c

Lines changed: 12 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -760,17 +760,6 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
760760
}
761761
}
762762

763-
void kvm_apic_update_hwapic_isr(struct kvm_vcpu *vcpu)
764-
{
765-
struct kvm_lapic *apic = vcpu->arch.apic;
766-
767-
if (WARN_ON_ONCE(!lapic_in_kernel(vcpu)) || !apic->apicv_active)
768-
return;
769-
770-
kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic));
771-
}
772-
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apic_update_hwapic_isr);
773-
774763
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
775764
{
776765
/* This may race with setting of irr in __apic_accept_irq() and
@@ -2783,10 +2772,18 @@ void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
27832772
*/
27842773
apic->irr_pending = true;
27852774

2786-
if (apic->apicv_active)
2775+
/*
2776+
* Update SVI when APICv gets enabled, otherwise SVI won't reflect the
2777+
* highest bit in vISR and the next accelerated EOI in the guest won't
2778+
* be virtualized correctly (the CPU uses SVI to determine which vISR
2779+
* vector to clear).
2780+
*/
2781+
if (apic->apicv_active) {
27872782
apic->isr_count = 1;
2788-
else
2783+
kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic));
2784+
} else {
27892785
apic->isr_count = count_vectors(apic->regs + APIC_ISR);
2786+
}
27902787

27912788
apic->highest_isr_cache = -1;
27922789
}
@@ -2914,10 +2911,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
29142911

29152912
vcpu->arch.pv_eoi.msr_val = 0;
29162913
apic_update_ppr(apic);
2917-
if (apic->apicv_active) {
2914+
if (apic->apicv_active)
29182915
kvm_x86_call(apicv_post_state_restore)(vcpu);
2919-
kvm_x86_call(hwapic_isr_update)(vcpu, -1);
2920-
}
29212916

29222917
vcpu->arch.apic_arb_prio = 0;
29232918
vcpu->arch.apic_attention = 0;
@@ -3228,10 +3223,8 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
32283223
__start_apic_timer(apic, APIC_TMCCT);
32293224
kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
32303225
kvm_apic_update_apicv(vcpu);
3231-
if (apic->apicv_active) {
3226+
if (apic->apicv_active)
32323227
kvm_x86_call(apicv_post_state_restore)(vcpu);
3233-
kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic));
3234-
}
32353228
kvm_make_request(KVM_REQ_EVENT, vcpu);
32363229

32373230
#ifdef CONFIG_KVM_IOAPIC

arch/x86/kvm/lapic.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,6 @@ void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high);
134134
int kvm_apic_set_base(struct kvm_vcpu *vcpu, u64 value, bool host_initiated);
135135
int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
136136
int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
137-
void kvm_apic_update_hwapic_isr(struct kvm_vcpu *vcpu);
138137
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
139138

140139
u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu);

arch/x86/kvm/x86.c

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -10886,16 +10886,9 @@ void __kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
1088610886
* pending. At the same time, KVM_REQ_EVENT may not be set as APICv was
1088710887
* still active when the interrupt got accepted. Make sure
1088810888
* kvm_check_and_inject_events() is called to check for that.
10889-
*
10890-
* Update SVI when APICv gets enabled, otherwise SVI won't reflect the
10891-
* highest bit in vISR and the next accelerated EOI in the guest won't
10892-
* be virtualized correctly (the CPU uses SVI to determine which vISR
10893-
* vector to clear).
1089410889
*/
1089510890
if (!apic->apicv_active)
1089610891
kvm_make_request(KVM_REQ_EVENT, vcpu);
10897-
else
10898-
kvm_apic_update_hwapic_isr(vcpu);
1089910892

1090010893
out:
1090110894
preempt_enable();

0 commit comments

Comments
 (0)