|
10 | 10 | #include "tdx.h" |
11 | 11 | #include "tdx_arch.h" |
12 | 12 |
|
| 13 | +#ifdef CONFIG_KVM_INTEL_TDX |
| 14 | +static_assert(offsetof(struct vcpu_vmx, vt) == offsetof(struct vcpu_tdx, vt)); |
| 15 | +#endif |
| 16 | + |
13 | 17 | static void vt_disable_virtualization_cpu(void) |
14 | 18 | { |
15 | 19 | /* Note, TDX *and* VMX need to be disabled if TDX is enabled. */ |
@@ -141,6 +145,42 @@ static void vt_update_cpu_dirty_logging(struct kvm_vcpu *vcpu) |
141 | 145 | vmx_update_cpu_dirty_logging(vcpu); |
142 | 146 | } |
143 | 147 |
|
| 148 | +static void vt_prepare_switch_to_guest(struct kvm_vcpu *vcpu) |
| 149 | +{ |
| 150 | + if (is_td_vcpu(vcpu)) { |
| 151 | + tdx_prepare_switch_to_guest(vcpu); |
| 152 | + return; |
| 153 | + } |
| 154 | + |
| 155 | + vmx_prepare_switch_to_guest(vcpu); |
| 156 | +} |
| 157 | + |
| 158 | +static void vt_vcpu_put(struct kvm_vcpu *vcpu) |
| 159 | +{ |
| 160 | + if (is_td_vcpu(vcpu)) { |
| 161 | + tdx_vcpu_put(vcpu); |
| 162 | + return; |
| 163 | + } |
| 164 | + |
| 165 | + vmx_vcpu_put(vcpu); |
| 166 | +} |
| 167 | + |
| 168 | +static int vt_vcpu_pre_run(struct kvm_vcpu *vcpu) |
| 169 | +{ |
| 170 | + if (is_td_vcpu(vcpu)) |
| 171 | + return tdx_vcpu_pre_run(vcpu); |
| 172 | + |
| 173 | + return vmx_vcpu_pre_run(vcpu); |
| 174 | +} |
| 175 | + |
| 176 | +static fastpath_t vt_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit) |
| 177 | +{ |
| 178 | + if (is_td_vcpu(vcpu)) |
| 179 | + return tdx_vcpu_run(vcpu, force_immediate_exit); |
| 180 | + |
| 181 | + return vmx_vcpu_run(vcpu, force_immediate_exit); |
| 182 | +} |
| 183 | + |
144 | 184 | static void vt_flush_tlb_all(struct kvm_vcpu *vcpu) |
145 | 185 | { |
146 | 186 | if (is_td_vcpu(vcpu)) { |
@@ -245,9 +285,9 @@ struct kvm_x86_ops vt_x86_ops __initdata = { |
245 | 285 | .vcpu_free = vt_vcpu_free, |
246 | 286 | .vcpu_reset = vt_vcpu_reset, |
247 | 287 |
|
248 | | - .prepare_switch_to_guest = vmx_prepare_switch_to_guest, |
| 288 | + .prepare_switch_to_guest = vt_prepare_switch_to_guest, |
249 | 289 | .vcpu_load = vt_vcpu_load, |
250 | | - .vcpu_put = vmx_vcpu_put, |
| 290 | + .vcpu_put = vt_vcpu_put, |
251 | 291 |
|
252 | 292 | .update_exception_bitmap = vmx_update_exception_bitmap, |
253 | 293 | .get_feature_msr = vmx_get_feature_msr, |
@@ -281,8 +321,8 @@ struct kvm_x86_ops vt_x86_ops __initdata = { |
281 | 321 | .flush_tlb_gva = vt_flush_tlb_gva, |
282 | 322 | .flush_tlb_guest = vt_flush_tlb_guest, |
283 | 323 |
|
284 | | - .vcpu_pre_run = vmx_vcpu_pre_run, |
285 | | - .vcpu_run = vmx_vcpu_run, |
| 324 | + .vcpu_pre_run = vt_vcpu_pre_run, |
| 325 | + .vcpu_run = vt_vcpu_run, |
286 | 326 | .handle_exit = vmx_handle_exit, |
287 | 327 | .skip_emulated_instruction = vmx_skip_emulated_instruction, |
288 | 328 | .update_emulated_instruction = vmx_update_emulated_instruction, |
|
0 commit comments