1111#include <nvhe/mem_protect.h>
1212
1313struct tlb_inv_context {
14- u64 tcr ;
14+ struct kvm_s2_mmu * mmu ;
15+ u64 tcr ;
16+ u64 sctlr ;
1517};
1618
17- static void __tlb_switch_to_guest (struct kvm_s2_mmu * mmu ,
18- struct tlb_inv_context * cxt ,
19- bool nsh )
19+ static void enter_vmid_context (struct kvm_s2_mmu * mmu ,
20+ struct tlb_inv_context * cxt ,
21+ bool nsh )
2022{
23+ struct kvm_s2_mmu * host_s2_mmu = & host_mmu .arch .mmu ;
24+ struct kvm_cpu_context * host_ctxt ;
25+ struct kvm_vcpu * vcpu ;
26+
27+ host_ctxt = & this_cpu_ptr (& kvm_host_data )-> host_ctxt ;
28+ vcpu = host_ctxt -> __hyp_running_vcpu ;
29+ cxt -> mmu = NULL ;
30+
2131 /*
2232 * We have two requirements:
2333 *
@@ -40,20 +50,55 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
4050 else
4151 dsb (ish );
4252
53+ /*
54+ * If we're already in the desired context, then there's nothing to do.
55+ */
56+ if (vcpu ) {
57+ /*
58+ * We're in guest context. However, for this to work, this needs
59+ * to be called from within __kvm_vcpu_run(), which ensures that
60+ * __hyp_running_vcpu is set to the current guest vcpu.
61+ */
62+ if (mmu == vcpu -> arch .hw_mmu || WARN_ON (mmu != host_s2_mmu ))
63+ return ;
64+
65+ cxt -> mmu = vcpu -> arch .hw_mmu ;
66+ } else {
67+ /* We're in host context. */
68+ if (mmu == host_s2_mmu )
69+ return ;
70+
71+ cxt -> mmu = host_s2_mmu ;
72+ }
73+
4374 if (cpus_have_final_cap (ARM64_WORKAROUND_SPECULATIVE_AT )) {
4475 u64 val ;
4576
4677 /*
4778 * For CPUs that are affected by ARM 1319367, we need to
48- * avoid a host Stage-1 walk while we have the guest's
49- * VMID set in the VTTBR in order to invalidate TLBs.
50- * We're guaranteed that the S1 MMU is enabled, so we can
51- * simply set the EPD bits to avoid any further TLB fill.
79+ * avoid a Stage-1 walk with the old VMID while we have
80+ * the new VMID set in the VTTBR in order to invalidate TLBs.
81+ * We're guaranteed that the host S1 MMU is enabled, so
82+ * we can simply set the EPD bits to avoid any further
83+ * TLB fill. For guests, we ensure that the S1 MMU is
84+ * temporarily enabled in the next context.
5285 */
5386 val = cxt -> tcr = read_sysreg_el1 (SYS_TCR );
5487 val |= TCR_EPD1_MASK | TCR_EPD0_MASK ;
5588 write_sysreg_el1 (val , SYS_TCR );
5689 isb ();
90+
91+ if (vcpu ) {
92+ val = cxt -> sctlr = read_sysreg_el1 (SYS_SCTLR );
93+ if (!(val & SCTLR_ELx_M )) {
94+ val |= SCTLR_ELx_M ;
95+ write_sysreg_el1 (val , SYS_SCTLR );
96+ isb ();
97+ }
98+ } else {
99+ /* The host S1 MMU is always enabled. */
100+ cxt -> sctlr = SCTLR_ELx_M ;
101+ }
57102 }
58103
59104 /*
@@ -62,18 +107,40 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
62107 * ensuring that we always have an ISB, but not two ISBs back
63108 * to back.
64109 */
65- __load_stage2 (mmu , kern_hyp_va (mmu -> arch ));
110+ if (vcpu )
111+ __load_host_stage2 ();
112+ else
113+ __load_stage2 (mmu , kern_hyp_va (mmu -> arch ));
114+
66115 asm(ALTERNATIVE ("isb" , "nop" , ARM64_WORKAROUND_SPECULATIVE_AT ));
67116}
68117
69- static void __tlb_switch_to_host (struct tlb_inv_context * cxt )
118+ static void exit_vmid_context (struct tlb_inv_context * cxt )
70119{
71- __load_host_stage2 ();
120+ struct kvm_s2_mmu * mmu = cxt -> mmu ;
121+ struct kvm_cpu_context * host_ctxt ;
122+ struct kvm_vcpu * vcpu ;
123+
124+ host_ctxt = & this_cpu_ptr (& kvm_host_data )-> host_ctxt ;
125+ vcpu = host_ctxt -> __hyp_running_vcpu ;
126+
127+ if (!mmu )
128+ return ;
129+
130+ if (vcpu )
131+ __load_stage2 (mmu , kern_hyp_va (mmu -> arch ));
132+ else
133+ __load_host_stage2 ();
72134
73135 if (cpus_have_final_cap (ARM64_WORKAROUND_SPECULATIVE_AT )) {
74- /* Ensure write of the host VMID */
136+ /* Ensure write of the old VMID */
75137 isb ();
76- /* Restore the host's TCR_EL1 */
138+
139+ if (!(cxt -> sctlr & SCTLR_ELx_M )) {
140+ write_sysreg_el1 (cxt -> sctlr , SYS_SCTLR );
141+ isb ();
142+ }
143+
77144 write_sysreg_el1 (cxt -> tcr , SYS_TCR );
78145 }
79146}
@@ -84,7 +151,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
84151 struct tlb_inv_context cxt ;
85152
86153 /* Switch to requested VMID */
87- __tlb_switch_to_guest (mmu , & cxt , false);
154+ enter_vmid_context (mmu , & cxt , false);
88155
89156 /*
90157 * We could do so much better if we had the VA as well.
@@ -105,7 +172,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
105172 dsb (ish );
106173 isb ();
107174
108- __tlb_switch_to_host (& cxt );
175+ exit_vmid_context (& cxt );
109176}
110177
111178void __kvm_tlb_flush_vmid_ipa_nsh (struct kvm_s2_mmu * mmu ,
@@ -114,7 +181,7 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
114181 struct tlb_inv_context cxt ;
115182
116183 /* Switch to requested VMID */
117- __tlb_switch_to_guest (mmu , & cxt , true);
184+ enter_vmid_context (mmu , & cxt , true);
118185
119186 /*
120187 * We could do so much better if we had the VA as well.
@@ -135,7 +202,7 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
135202 dsb (nsh );
136203 isb ();
137204
138- __tlb_switch_to_host (& cxt );
205+ exit_vmid_context (& cxt );
139206}
140207
141208void __kvm_tlb_flush_vmid_range (struct kvm_s2_mmu * mmu ,
@@ -152,7 +219,7 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
152219 start = round_down (start , stride );
153220
154221 /* Switch to requested VMID */
155- __tlb_switch_to_guest (mmu , & cxt , false);
222+ enter_vmid_context (mmu , & cxt , false);
156223
157224 __flush_s2_tlb_range_op (ipas2e1is , start , pages , stride , 0 );
158225
@@ -161,41 +228,41 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
161228 dsb (ish );
162229 isb ();
163230
164- __tlb_switch_to_host (& cxt );
231+ exit_vmid_context (& cxt );
165232}
166233
167234void __kvm_tlb_flush_vmid (struct kvm_s2_mmu * mmu )
168235{
169236 struct tlb_inv_context cxt ;
170237
171238 /* Switch to requested VMID */
172- __tlb_switch_to_guest (mmu , & cxt , false);
239+ enter_vmid_context (mmu , & cxt , false);
173240
174241 __tlbi (vmalls12e1is );
175242 dsb (ish );
176243 isb ();
177244
178- __tlb_switch_to_host (& cxt );
245+ exit_vmid_context (& cxt );
179246}
180247
181248void __kvm_flush_cpu_context (struct kvm_s2_mmu * mmu )
182249{
183250 struct tlb_inv_context cxt ;
184251
185252 /* Switch to requested VMID */
186- __tlb_switch_to_guest (mmu , & cxt , false);
253+ enter_vmid_context (mmu , & cxt , false);
187254
188255 __tlbi (vmalle1 );
189256 asm volatile ("ic iallu" );
190257 dsb (nsh );
191258 isb ();
192259
193- __tlb_switch_to_host (& cxt );
260+ exit_vmid_context (& cxt );
194261}
195262
196263void __kvm_flush_vm_context (void )
197264{
198- /* Same remark as in __tlb_switch_to_guest () */
265+ /* Same remark as in enter_vmid_context () */
199266 dsb (ish );
200267 __tlbi (alle1is );
201268 dsb (ish );
0 commit comments