@@ -3442,7 +3442,7 @@ static int nested_vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
34423442 if (!nested_cpu_has_pml (vmcs12 ))
34433443 return 0 ;
34443444
3445- if (vmcs12 -> guest_pml_index >= PML_ENTITY_NUM ) {
3445+ if (vmcs12 -> guest_pml_index >= PML_LOG_NR_ENTRIES ) {
34463446 vmx -> nested .pml_full = true;
34473447 return 1 ;
34483448 }
@@ -3481,14 +3481,6 @@ static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
34813481 return 1 ;
34823482}
34833483
3484- static u8 vmx_has_apicv_interrupt (struct kvm_vcpu * vcpu )
3485- {
3486- u8 rvi = vmx_get_rvi ();
3487- u8 vppr = kvm_lapic_get_reg (vcpu -> arch .apic , APIC_PROCPRI );
3488-
3489- return ((rvi & 0xf0 ) > (vppr & 0xf0 ));
3490- }
3491-
34923484static void load_vmcs12_host_state (struct kvm_vcpu * vcpu ,
34933485 struct vmcs12 * vmcs12 );
34943486
@@ -3508,7 +3500,6 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
35083500 struct vcpu_vmx * vmx = to_vmx (vcpu );
35093501 struct vmcs12 * vmcs12 = get_vmcs12 (vcpu );
35103502 enum vm_entry_failure_code entry_failure_code ;
3511- bool evaluate_pending_interrupts ;
35123503 union vmx_exit_reason exit_reason = {
35133504 .basic = EXIT_REASON_INVALID_STATE ,
35143505 .failed_vmentry = 1 ,
@@ -3527,13 +3518,6 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
35273518
35283519 kvm_service_local_tlb_flush_requests (vcpu );
35293520
3530- evaluate_pending_interrupts = exec_controls_get (vmx ) &
3531- (CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING );
3532- if (likely (!evaluate_pending_interrupts ) && kvm_vcpu_apicv_active (vcpu ))
3533- evaluate_pending_interrupts |= vmx_has_apicv_interrupt (vcpu );
3534- if (!evaluate_pending_interrupts )
3535- evaluate_pending_interrupts |= kvm_apic_has_pending_init_or_sipi (vcpu );
3536-
35373521 if (!vmx -> nested .nested_run_pending ||
35383522 !(vmcs12 -> vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS ))
35393523 vmx -> nested .pre_vmenter_debugctl = vmcs_read64 (GUEST_IA32_DEBUGCTL );
@@ -3616,9 +3600,13 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
36163600 * Re-evaluate pending events if L1 had a pending IRQ/NMI/INIT/SIPI
36173601 * when it executed VMLAUNCH/VMRESUME, as entering non-root mode can
36183602 * effectively unblock various events, e.g. INIT/SIPI cause VM-Exit
3619- * unconditionally.
3603+ * unconditionally. Take care to pull data from vmcs01 as appropriate,
3604+ * e.g. when checking for interrupt windows, as vmcs02 is now loaded.
36203605 */
3621- if (unlikely (evaluate_pending_interrupts ))
3606+ if ((__exec_controls_get (& vmx -> vmcs01 ) & (CPU_BASED_INTR_WINDOW_EXITING |
3607+ CPU_BASED_NMI_WINDOW_EXITING )) ||
3608+ kvm_apic_has_pending_init_or_sipi (vcpu ) ||
3609+ kvm_apic_has_interrupt (vcpu ))
36223610 kvm_make_request (KVM_REQ_EVENT , vcpu );
36233611
36243612 /*
@@ -3751,14 +3739,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
37513739 if (unlikely (status != NVMX_VMENTRY_SUCCESS ))
37523740 goto vmentry_failed ;
37533741
3754- /* Emulate processing of posted interrupts on VM-Enter. */
3755- if (nested_cpu_has_posted_intr (vmcs12 ) &&
3756- kvm_apic_has_interrupt (vcpu ) == vmx -> nested .posted_intr_nv ) {
3757- vmx -> nested .pi_pending = true;
3758- kvm_make_request (KVM_REQ_EVENT , vcpu );
3759- kvm_apic_clear_irr (vcpu , vmx -> nested .posted_intr_nv );
3760- }
3761-
37623742 /* Hide L1D cache contents from the nested guest. */
37633743 vmx -> vcpu .arch .l1tf_flush_l1d = true;
37643744
@@ -4220,13 +4200,25 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
42204200 */
42214201 bool block_nested_exceptions = vmx -> nested .nested_run_pending ;
42224202 /*
4223- * New events (not exceptions) are only recognized at instruction
4203+ * Events that don't require injection, i.e. that are virtualized by
4204+ * hardware, aren't blocked by a pending VM-Enter as KVM doesn't need
4205+ * to regain control in order to deliver the event, and hardware will
4206+ * handle event ordering, e.g. with respect to injected exceptions.
4207+ *
4208+ * But, new events (not exceptions) are only recognized at instruction
42244209 * boundaries. If an event needs reinjection, then KVM is handling a
4225- * VM-Exit that occurred _during_ instruction execution; new events are
4226- * blocked until the instruction completes.
4210+ * VM-Exit that occurred _during_ instruction execution; new events,
4211+ * irrespective of whether or not they're injected, are blocked until
4212+ * the instruction completes.
4213+ */
4214+ bool block_non_injected_events = kvm_event_needs_reinjection (vcpu );
4215+ /*
4216+ * Inject events are blocked by nested VM-Enter, as KVM is responsible
4217+ * for managing priority between concurrent events, i.e. KVM needs to
4218+ * wait until after VM-Enter completes to deliver injected events.
42274219 */
42284220 bool block_nested_events = block_nested_exceptions ||
4229- kvm_event_needs_reinjection ( vcpu ) ;
4221+ block_non_injected_events ;
42304222
42314223 if (lapic_in_kernel (vcpu ) &&
42324224 test_bit (KVM_APIC_INIT , & apic -> pending_events )) {
@@ -4338,18 +4330,26 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
43384330 if (kvm_cpu_has_interrupt (vcpu ) && !vmx_interrupt_blocked (vcpu )) {
43394331 int irq ;
43404332
4341- if (block_nested_events )
4342- return - EBUSY ;
4343- if (!nested_exit_on_intr (vcpu ))
4333+ if (!nested_exit_on_intr (vcpu )) {
4334+ if (block_nested_events )
4335+ return - EBUSY ;
4336+
43444337 goto no_vmexit ;
4338+ }
43454339
43464340 if (!nested_exit_intr_ack_set (vcpu )) {
4341+ if (block_nested_events )
4342+ return - EBUSY ;
4343+
43474344 nested_vmx_vmexit (vcpu , EXIT_REASON_EXTERNAL_INTERRUPT , 0 , 0 );
43484345 return 0 ;
43494346 }
43504347
43514348 irq = kvm_cpu_get_extint (vcpu );
43524349 if (irq != -1 ) {
4350+ if (block_nested_events )
4351+ return - EBUSY ;
4352+
43534353 nested_vmx_vmexit (vcpu , EXIT_REASON_EXTERNAL_INTERRUPT ,
43544354 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR | irq , 0 );
43554355 return 0 ;
@@ -4368,11 +4368,22 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
43684368 * and enabling posted interrupts requires ACK-on-exit.
43694369 */
43704370 if (irq == vmx -> nested .posted_intr_nv ) {
4371+ /*
4372+ * Nested posted interrupts are delivered via RVI, i.e.
4373+ * aren't injected by KVM, and so can be queued even if
4374+ * manual event injection is disallowed.
4375+ */
4376+ if (block_non_injected_events )
4377+ return - EBUSY ;
4378+
43714379 vmx -> nested .pi_pending = true;
43724380 kvm_apic_clear_irr (vcpu , irq );
43734381 goto no_vmexit ;
43744382 }
43754383
4384+ if (block_nested_events )
4385+ return - EBUSY ;
4386+
43764387 nested_vmx_vmexit (vcpu , EXIT_REASON_EXTERNAL_INTERRUPT ,
43774388 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR | irq , 0 );
43784389
@@ -5050,6 +5061,11 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
50505061 kvm_make_request (KVM_REQ_APICV_UPDATE , vcpu );
50515062 }
50525063
5064+ if (vmx -> nested .update_vmcs01_hwapic_isr ) {
5065+ vmx -> nested .update_vmcs01_hwapic_isr = false;
5066+ kvm_apic_update_hwapic_isr (vcpu );
5067+ }
5068+
50535069 if ((vm_exit_reason != -1 ) &&
50545070 (enable_shadow_vmcs || nested_vmx_is_evmptr12_valid (vmx )))
50555071 vmx -> nested .need_vmcs12_to_shadow_sync = true;
0 commit comments