@@ -256,6 +256,20 @@ static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
256256 return regs ;
257257}
258258
259+ static unsigned long get_guest_cr3 (struct kvm_vcpu * vcpu )
260+ {
261+ return kvm_read_cr3 (vcpu );
262+ }
263+
264+ static inline unsigned long kvm_mmu_get_guest_pgd (struct kvm_vcpu * vcpu ,
265+ struct kvm_mmu * mmu )
266+ {
267+ if (IS_ENABLED (CONFIG_RETPOLINE ) && mmu -> get_guest_pgd == get_guest_cr3 )
268+ return kvm_read_cr3 (vcpu );
269+
270+ return mmu -> get_guest_pgd (vcpu );
271+ }
272+
259273static inline bool kvm_available_flush_tlb_with_range (void )
260274{
261275 return kvm_x86_ops .tlb_remote_flush_with_range ;
@@ -3801,7 +3815,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
38013815 int quadrant , i , r ;
38023816 hpa_t root ;
38033817
3804- root_pgd = mmu -> get_guest_pgd (vcpu );
3818+ root_pgd = kvm_mmu_get_guest_pgd (vcpu , mmu );
38053819 root_gfn = root_pgd >> PAGE_SHIFT ;
38063820
38073821 if (mmu_check_root (vcpu , root_gfn ))
@@ -4251,7 +4265,7 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
42514265 arch .token = alloc_apf_token (vcpu );
42524266 arch .gfn = gfn ;
42534267 arch .direct_map = vcpu -> arch .mmu -> root_role .direct ;
4254- arch .cr3 = vcpu -> arch .mmu -> get_guest_pgd ( vcpu );
4268+ arch .cr3 = kvm_mmu_get_guest_pgd ( vcpu , vcpu -> arch .mmu );
42554269
42564270 return kvm_setup_async_pf (vcpu , cr2_or_gpa ,
42574271 kvm_vcpu_gfn_to_hva (vcpu , gfn ), & arch );
@@ -4270,7 +4284,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
42704284 return ;
42714285
42724286 if (!vcpu -> arch .mmu -> root_role .direct &&
4273- work -> arch .cr3 != vcpu -> arch .mmu -> get_guest_pgd ( vcpu ))
4287+ work -> arch .cr3 != kvm_mmu_get_guest_pgd ( vcpu , vcpu -> arch .mmu ))
42744288 return ;
42754289
42764290 kvm_mmu_do_page_fault (vcpu , work -> cr2_or_gpa , 0 , true, NULL );
@@ -4673,11 +4687,6 @@ void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
46734687}
46744688EXPORT_SYMBOL_GPL (kvm_mmu_new_pgd );
46754689
4676- static unsigned long get_cr3 (struct kvm_vcpu * vcpu )
4677- {
4678- return kvm_read_cr3 (vcpu );
4679- }
4680-
46814690static bool sync_mmio_spte (struct kvm_vcpu * vcpu , u64 * sptep , gfn_t gfn ,
46824691 unsigned int access )
46834692{
@@ -5223,7 +5232,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
52235232 context -> root_role .word = root_role .word ;
52245233 context -> page_fault = kvm_tdp_page_fault ;
52255234 context -> sync_spte = NULL ;
5226- context -> get_guest_pgd = get_cr3 ;
5235+ context -> get_guest_pgd = get_guest_cr3 ;
52275236 context -> get_pdptr = kvm_pdptr_read ;
52285237 context -> inject_page_fault = kvm_inject_page_fault ;
52295238
@@ -5372,7 +5381,7 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu,
53725381
53735382 kvm_init_shadow_mmu (vcpu , cpu_role );
53745383
5375- context -> get_guest_pgd = get_cr3 ;
5384+ context -> get_guest_pgd = get_guest_cr3 ;
53765385 context -> get_pdptr = kvm_pdptr_read ;
53775386 context -> inject_page_fault = kvm_inject_page_fault ;
53785387}
@@ -5386,7 +5395,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu,
53865395 return ;
53875396
53885397 g_context -> cpu_role .as_u64 = new_mode .as_u64 ;
5389- g_context -> get_guest_pgd = get_cr3 ;
5398+ g_context -> get_guest_pgd = get_guest_cr3 ;
53905399 g_context -> get_pdptr = kvm_pdptr_read ;
53915400 g_context -> inject_page_fault = kvm_inject_page_fault ;
53925401
0 commit comments