@@ -1372,7 +1372,6 @@ void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
13721372
13731373 vmx_vcpu_pi_load (vcpu , cpu );
13741374
1375- vmx -> host_pkru = read_pkru ();
13761375 vmx -> host_debugctlmsr = get_debugctlmsr ();
13771376}
13781377
@@ -4677,15 +4676,13 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
46774676 dr6 = vmcs_readl (EXIT_QUALIFICATION );
46784677 if (!(vcpu -> guest_debug &
46794678 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP ))) {
4680- vcpu -> arch .dr6 &= ~DR_TRAP_BITS ;
4681- vcpu -> arch .dr6 |= dr6 | DR6_RTM ;
46824679 if (is_icebp (intr_info ))
46834680 WARN_ON (!skip_emulated_instruction (vcpu ));
46844681
4685- kvm_queue_exception (vcpu , DB_VECTOR );
4682+ kvm_queue_exception_p (vcpu , DB_VECTOR , dr6 );
46864683 return 1 ;
46874684 }
4688- kvm_run -> debug .arch .dr6 = dr6 | DR6_FIXED_1 ;
4685+ kvm_run -> debug .arch .dr6 = dr6 | DR6_FIXED_1 | DR6_RTM ;
46894686 kvm_run -> debug .arch .dr7 = vmcs_readl (GUEST_DR7 );
46904687 /* fall through */
46914688 case BP_VECTOR :
@@ -4929,16 +4926,14 @@ static int handle_dr(struct kvm_vcpu *vcpu)
49294926 * guest debugging itself.
49304927 */
49314928 if (vcpu -> guest_debug & KVM_GUESTDBG_USE_HW_BP ) {
4932- vcpu -> run -> debug .arch .dr6 = vcpu -> arch . dr6 ;
4929+ vcpu -> run -> debug .arch .dr6 = DR6_BD | DR6_RTM | DR6_FIXED_1 ;
49334930 vcpu -> run -> debug .arch .dr7 = dr7 ;
49344931 vcpu -> run -> debug .arch .pc = kvm_get_linear_rip (vcpu );
49354932 vcpu -> run -> debug .arch .exception = DB_VECTOR ;
49364933 vcpu -> run -> exit_reason = KVM_EXIT_DEBUG ;
49374934 return 0 ;
49384935 } else {
4939- vcpu -> arch .dr6 &= ~DR_TRAP_BITS ;
4940- vcpu -> arch .dr6 |= DR6_BD | DR6_RTM ;
4941- kvm_queue_exception (vcpu , DB_VECTOR );
4936+ kvm_queue_exception_p (vcpu , DB_VECTOR , DR6_BD );
49424937 return 1 ;
49434938 }
49444939 }
@@ -4969,15 +4964,6 @@ static int handle_dr(struct kvm_vcpu *vcpu)
49694964 return kvm_skip_emulated_instruction (vcpu );
49704965}
49714966
4972- static u64 vmx_get_dr6 (struct kvm_vcpu * vcpu )
4973- {
4974- return vcpu -> arch .dr6 ;
4975- }
4976-
4977- static void vmx_set_dr6 (struct kvm_vcpu * vcpu , unsigned long val )
4978- {
4979- }
4980-
49814967static void vmx_sync_dirty_debug_regs (struct kvm_vcpu * vcpu )
49824968{
49834969 get_debugreg (vcpu -> arch .db [0 ], 0 );
@@ -6577,11 +6563,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
65776563
65786564 kvm_load_guest_xsave_state (vcpu );
65796565
6580- if (static_cpu_has (X86_FEATURE_PKU ) &&
6581- kvm_read_cr4_bits (vcpu , X86_CR4_PKE ) &&
6582- vcpu -> arch .pkru != vmx -> host_pkru )
6583- __write_pkru (vcpu -> arch .pkru );
6584-
65856566 pt_guest_enter (vmx );
65866567
65876568 if (vcpu_to_pmu (vcpu )-> version )
@@ -6671,18 +6652,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
66716652
66726653 pt_guest_exit (vmx );
66736654
6674- /*
6675- * eager fpu is enabled if PKEY is supported and CR4 is switched
6676- * back on host, so it is safe to read guest PKRU from current
6677- * XSAVE.
6678- */
6679- if (static_cpu_has (X86_FEATURE_PKU ) &&
6680- kvm_read_cr4_bits (vcpu , X86_CR4_PKE )) {
6681- vcpu -> arch .pkru = rdpkru ();
6682- if (vcpu -> arch .pkru != vmx -> host_pkru )
6683- __write_pkru (vmx -> host_pkru );
6684- }
6685-
66866655 kvm_load_host_xsave_state (vcpu );
66876656
66886657 vmx -> nested .nested_run_pending = 0 ;
@@ -7740,8 +7709,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
77407709 .set_idt = vmx_set_idt ,
77417710 .get_gdt = vmx_get_gdt ,
77427711 .set_gdt = vmx_set_gdt ,
7743- .get_dr6 = vmx_get_dr6 ,
7744- .set_dr6 = vmx_set_dr6 ,
77457712 .set_dr7 = vmx_set_dr7 ,
77467713 .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs ,
77477714 .cache_reg = vmx_cache_reg ,
0 commit comments