@@ -48,7 +48,14 @@ bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
4848 if (!gpc -> active )
4949 return false;
5050
51- if (gpc -> generation != slots -> generation || kvm_is_error_hva (gpc -> uhva ))
51+ /*
52+ * If the page was cached from a memslot, make sure the memslots have
53+ * not been re-configured.
54+ */
55+ if (!kvm_is_error_gpa (gpc -> gpa ) && gpc -> generation != slots -> generation )
56+ return false;
57+
58+ if (kvm_is_error_hva (gpc -> uhva ))
5259 return false;
5360
5461 if (offset_in_page (gpc -> uhva ) + len > PAGE_SIZE )
@@ -209,22 +216,27 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
209216 return - EFAULT ;
210217}
211218
212- static int __kvm_gpc_refresh (struct gfn_to_pfn_cache * gpc , gpa_t gpa ,
219+ static int __kvm_gpc_refresh (struct gfn_to_pfn_cache * gpc , gpa_t gpa , unsigned long uhva ,
213220 unsigned long len )
214221{
215- struct kvm_memslots * slots = kvm_memslots (gpc -> kvm );
216- unsigned long page_offset = offset_in_page (gpa );
222+ unsigned long page_offset ;
217223 bool unmap_old = false;
218224 unsigned long old_uhva ;
219225 kvm_pfn_t old_pfn ;
220226 bool hva_change = false;
221227 void * old_khva ;
222228 int ret ;
223229
230+ /* Either gpa or uhva must be valid, but not both */
231+ if (WARN_ON_ONCE (kvm_is_error_gpa (gpa ) == kvm_is_error_hva (uhva )))
232+ return - EINVAL ;
233+
224234 /*
225- * If must fit within a single page. The 'len' argument is
226- * only to enforce that.
235+ * The cached acces must fit within a single page. The 'len' argument
236+ * exists only to enforce that.
227237 */
238+ page_offset = kvm_is_error_gpa (gpa ) ? offset_in_page (uhva ) :
239+ offset_in_page (gpa );
228240 if (page_offset + len > PAGE_SIZE )
229241 return - EINVAL ;
230242
@@ -246,29 +258,39 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
246258 old_khva = (void * )PAGE_ALIGN_DOWN ((uintptr_t )gpc -> khva );
247259 old_uhva = PAGE_ALIGN_DOWN (gpc -> uhva );
248260
249- /* Refresh the userspace HVA if necessary */
250- if (gpc -> gpa != gpa || gpc -> generation != slots -> generation ||
251- kvm_is_error_hva (gpc -> uhva )) {
252- gfn_t gfn = gpa_to_gfn (gpa );
253-
254- gpc -> gpa = gpa ;
255- gpc -> generation = slots -> generation ;
256- gpc -> memslot = __gfn_to_memslot (slots , gfn );
257- gpc -> uhva = gfn_to_hva_memslot (gpc -> memslot , gfn );
261+ if (kvm_is_error_gpa (gpa )) {
262+ gpc -> gpa = INVALID_GPA ;
263+ gpc -> memslot = NULL ;
264+ gpc -> uhva = PAGE_ALIGN_DOWN (uhva );
258265
259- if (kvm_is_error_hva (gpc -> uhva )) {
260- ret = - EFAULT ;
261- goto out ;
262- }
263-
264- /*
265- * Even if the GPA and/or the memslot generation changed, the
266- * HVA may still be the same.
267- */
268266 if (gpc -> uhva != old_uhva )
269267 hva_change = true;
270268 } else {
271- gpc -> uhva = old_uhva ;
269+ struct kvm_memslots * slots = kvm_memslots (gpc -> kvm );
270+
271+ if (gpc -> gpa != gpa || gpc -> generation != slots -> generation ||
272+ kvm_is_error_hva (gpc -> uhva )) {
273+ gfn_t gfn = gpa_to_gfn (gpa );
274+
275+ gpc -> gpa = gpa ;
276+ gpc -> generation = slots -> generation ;
277+ gpc -> memslot = __gfn_to_memslot (slots , gfn );
278+ gpc -> uhva = gfn_to_hva_memslot (gpc -> memslot , gfn );
279+
280+ if (kvm_is_error_hva (gpc -> uhva )) {
281+ ret = - EFAULT ;
282+ goto out ;
283+ }
284+
285+ /*
286+ * Even if the GPA and/or the memslot generation changed, the
287+ * HVA may still be the same.
288+ */
289+ if (gpc -> uhva != old_uhva )
290+ hva_change = true;
291+ } else {
292+ gpc -> uhva = old_uhva ;
293+ }
272294 }
273295
274296 /* Note: the offset must be correct before calling hva_to_pfn_retry() */
@@ -319,7 +341,15 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
319341
320342int kvm_gpc_refresh (struct gfn_to_pfn_cache * gpc , unsigned long len )
321343{
322- return __kvm_gpc_refresh (gpc , gpc -> gpa , len );
344+ /*
345+ * If the GPA is valid then ignore the HVA, as a cache can be GPA-based
346+ * or HVA-based, not both. For GPA-based caches, the HVA will be
347+ * recomputed during refresh if necessary.
348+ */
349+ unsigned long uhva = kvm_is_error_gpa (gpc -> gpa ) ? gpc -> uhva :
350+ KVM_HVA_ERR_BAD ;
351+
352+ return __kvm_gpc_refresh (gpc , gpc -> gpa , uhva , len );
323353}
324354
325355void kvm_gpc_init (struct gfn_to_pfn_cache * gpc , struct kvm * kvm )
@@ -329,10 +359,12 @@ void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm)
329359
330360 gpc -> kvm = kvm ;
331361 gpc -> pfn = KVM_PFN_ERR_FAULT ;
362+ gpc -> gpa = INVALID_GPA ;
332363 gpc -> uhva = KVM_HVA_ERR_BAD ;
333364}
334365
335- int kvm_gpc_activate (struct gfn_to_pfn_cache * gpc , gpa_t gpa , unsigned long len )
366+ static int __kvm_gpc_activate (struct gfn_to_pfn_cache * gpc , gpa_t gpa , unsigned long uhva ,
367+ unsigned long len )
336368{
337369 struct kvm * kvm = gpc -> kvm ;
338370
@@ -353,7 +385,17 @@ int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
353385 gpc -> active = true;
354386 write_unlock_irq (& gpc -> lock );
355387 }
356- return __kvm_gpc_refresh (gpc , gpa , len );
388+ return __kvm_gpc_refresh (gpc , gpa , uhva , len );
389+ }
390+
391+ int kvm_gpc_activate (struct gfn_to_pfn_cache * gpc , gpa_t gpa , unsigned long len )
392+ {
393+ return __kvm_gpc_activate (gpc , gpa , KVM_HVA_ERR_BAD , len );
394+ }
395+
396+ int kvm_gpc_activate_hva (struct gfn_to_pfn_cache * gpc , unsigned long uhva , unsigned long len )
397+ {
398+ return __kvm_gpc_activate (gpc , INVALID_GPA , uhva , len );
357399}
358400
359401void kvm_gpc_deactivate (struct gfn_to_pfn_cache * gpc )
0 commit comments