@@ -721,6 +721,24 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
721721 nested_vmx_set_intercept_for_msr (vmx , msr_bitmap_l1 , msr_bitmap_l0 ,
722722 MSR_IA32_MPERF , MSR_TYPE_R );
723723
724+ nested_vmx_set_intercept_for_msr (vmx , msr_bitmap_l1 , msr_bitmap_l0 ,
725+ MSR_IA32_U_CET , MSR_TYPE_RW );
726+
727+ nested_vmx_set_intercept_for_msr (vmx , msr_bitmap_l1 , msr_bitmap_l0 ,
728+ MSR_IA32_S_CET , MSR_TYPE_RW );
729+
730+ nested_vmx_set_intercept_for_msr (vmx , msr_bitmap_l1 , msr_bitmap_l0 ,
731+ MSR_IA32_PL0_SSP , MSR_TYPE_RW );
732+
733+ nested_vmx_set_intercept_for_msr (vmx , msr_bitmap_l1 , msr_bitmap_l0 ,
734+ MSR_IA32_PL1_SSP , MSR_TYPE_RW );
735+
736+ nested_vmx_set_intercept_for_msr (vmx , msr_bitmap_l1 , msr_bitmap_l0 ,
737+ MSR_IA32_PL2_SSP , MSR_TYPE_RW );
738+
739+ nested_vmx_set_intercept_for_msr (vmx , msr_bitmap_l1 , msr_bitmap_l0 ,
740+ MSR_IA32_PL3_SSP , MSR_TYPE_RW );
741+
724742 kvm_vcpu_unmap (vcpu , & map );
725743
726744 vmx -> nested .force_msr_bitmap_recalc = false;
@@ -2521,6 +2539,32 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs0
25212539 }
25222540}
25232541
2542+ static void vmcs_read_cet_state (struct kvm_vcpu * vcpu , u64 * s_cet ,
2543+ u64 * ssp , u64 * ssp_tbl )
2544+ {
2545+ if (guest_cpu_cap_has (vcpu , X86_FEATURE_IBT ) ||
2546+ guest_cpu_cap_has (vcpu , X86_FEATURE_SHSTK ))
2547+ * s_cet = vmcs_readl (GUEST_S_CET );
2548+
2549+ if (guest_cpu_cap_has (vcpu , X86_FEATURE_SHSTK )) {
2550+ * ssp = vmcs_readl (GUEST_SSP );
2551+ * ssp_tbl = vmcs_readl (GUEST_INTR_SSP_TABLE );
2552+ }
2553+ }
2554+
2555+ static void vmcs_write_cet_state (struct kvm_vcpu * vcpu , u64 s_cet ,
2556+ u64 ssp , u64 ssp_tbl )
2557+ {
2558+ if (guest_cpu_cap_has (vcpu , X86_FEATURE_IBT ) ||
2559+ guest_cpu_cap_has (vcpu , X86_FEATURE_SHSTK ))
2560+ vmcs_writel (GUEST_S_CET , s_cet );
2561+
2562+ if (guest_cpu_cap_has (vcpu , X86_FEATURE_SHSTK )) {
2563+ vmcs_writel (GUEST_SSP , ssp );
2564+ vmcs_writel (GUEST_INTR_SSP_TABLE , ssp_tbl );
2565+ }
2566+ }
2567+
25242568static void prepare_vmcs02_rare (struct vcpu_vmx * vmx , struct vmcs12 * vmcs12 )
25252569{
25262570 struct hv_enlightened_vmcs * hv_evmcs = nested_vmx_evmcs (vmx );
@@ -2637,6 +2681,10 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
26372681 vmcs_write32 (VM_EXIT_MSR_LOAD_COUNT , vmx -> msr_autoload .host .nr );
26382682 vmcs_write32 (VM_ENTRY_MSR_LOAD_COUNT , vmx -> msr_autoload .guest .nr );
26392683
2684+ if (vmcs12 -> vm_entry_controls & VM_ENTRY_LOAD_CET_STATE )
2685+ vmcs_write_cet_state (& vmx -> vcpu , vmcs12 -> guest_s_cet ,
2686+ vmcs12 -> guest_ssp , vmcs12 -> guest_ssp_tbl );
2687+
26402688 set_cr4_guest_host_mask (vmx );
26412689}
26422690
@@ -2676,6 +2724,13 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
26762724 kvm_set_dr (vcpu , 7 , vcpu -> arch .dr7 );
26772725 vmx_guest_debugctl_write (vcpu , vmx -> nested .pre_vmenter_debugctl );
26782726 }
2727+
2728+ if (!vmx -> nested .nested_run_pending ||
2729+ !(vmcs12 -> vm_entry_controls & VM_ENTRY_LOAD_CET_STATE ))
2730+ vmcs_write_cet_state (vcpu , vmx -> nested .pre_vmenter_s_cet ,
2731+ vmx -> nested .pre_vmenter_ssp ,
2732+ vmx -> nested .pre_vmenter_ssp_tbl );
2733+
26792734 if (kvm_mpx_supported () && (!vmx -> nested .nested_run_pending ||
26802735 !(vmcs12 -> vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS )))
26812736 vmcs_write64 (GUEST_BNDCFGS , vmx -> nested .pre_vmenter_bndcfgs );
@@ -3551,6 +3606,12 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
35513606 !(vmcs12 -> vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS )))
35523607 vmx -> nested .pre_vmenter_bndcfgs = vmcs_read64 (GUEST_BNDCFGS );
35533608
3609+ if (!vmx -> nested .nested_run_pending ||
3610+ !(vmcs12 -> vm_entry_controls & VM_ENTRY_LOAD_CET_STATE ))
3611+ vmcs_read_cet_state (vcpu , & vmx -> nested .pre_vmenter_s_cet ,
3612+ & vmx -> nested .pre_vmenter_ssp ,
3613+ & vmx -> nested .pre_vmenter_ssp_tbl );
3614+
35543615 /*
35553616 * Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and*
35563617 * nested early checks are disabled. In the event of a "late" VM-Fail,
@@ -4634,6 +4695,10 @@ static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
46344695
46354696 if (vmcs12 -> vm_exit_controls & VM_EXIT_SAVE_IA32_EFER )
46364697 vmcs12 -> guest_ia32_efer = vcpu -> arch .efer ;
4698+
4699+ vmcs_read_cet_state (& vmx -> vcpu , & vmcs12 -> guest_s_cet ,
4700+ & vmcs12 -> guest_ssp ,
4701+ & vmcs12 -> guest_ssp_tbl );
46374702}
46384703
46394704/*
@@ -4759,6 +4824,18 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
47594824 if (vmcs12 -> vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS )
47604825 vmcs_write64 (GUEST_BNDCFGS , 0 );
47614826
4827+ /*
4828+ * Load CET state from host state if VM_EXIT_LOAD_CET_STATE is set.
4829+ * otherwise CET state should be retained across VM-exit, i.e.,
4830+ * guest values should be propagated from vmcs12 to vmcs01.
4831+ */
4832+ if (vmcs12 -> vm_exit_controls & VM_EXIT_LOAD_CET_STATE )
4833+ vmcs_write_cet_state (vcpu , vmcs12 -> host_s_cet , vmcs12 -> host_ssp ,
4834+ vmcs12 -> host_ssp_tbl );
4835+ else
4836+ vmcs_write_cet_state (vcpu , vmcs12 -> guest_s_cet , vmcs12 -> guest_ssp ,
4837+ vmcs12 -> guest_ssp_tbl );
4838+
47624839 if (vmcs12 -> vm_exit_controls & VM_EXIT_LOAD_IA32_PAT ) {
47634840 vmcs_write64 (GUEST_IA32_PAT , vmcs12 -> host_ia32_pat );
47644841 vcpu -> arch .pat = vmcs12 -> host_ia32_pat ;
0 commit comments