@@ -13,14 +13,50 @@ struct kvm_gmem {
1313 struct list_head entry ;
1414};
1515
16- static struct folio * kvm_gmem_get_folio (struct inode * inode , pgoff_t index )
16+ static int kvm_gmem_prepare_folio (struct inode * inode , pgoff_t index , struct folio * folio )
17+ {
18+ #ifdef CONFIG_HAVE_KVM_GMEM_PREPARE
19+ struct list_head * gmem_list = & inode -> i_mapping -> i_private_list ;
20+ struct kvm_gmem * gmem ;
21+
22+ list_for_each_entry (gmem , gmem_list , entry ) {
23+ struct kvm_memory_slot * slot ;
24+ struct kvm * kvm = gmem -> kvm ;
25+ struct page * page ;
26+ kvm_pfn_t pfn ;
27+ gfn_t gfn ;
28+ int rc ;
29+
30+ if (!kvm_arch_gmem_prepare_needed (kvm ))
31+ continue ;
32+
33+ slot = xa_load (& gmem -> bindings , index );
34+ if (!slot )
35+ continue ;
36+
37+ page = folio_file_page (folio , index );
38+ pfn = page_to_pfn (page );
39+ gfn = slot -> base_gfn + index - slot -> gmem .pgoff ;
40+ rc = kvm_arch_gmem_prepare (kvm , gfn , pfn , compound_order (compound_head (page )));
41+ if (rc ) {
42+ pr_warn_ratelimited ("gmem: Failed to prepare folio for index %lx, error %d.\n" ,
43+ index , rc );
44+ return rc ;
45+ }
46+ }
47+
48+ #endif
49+ return 0 ;
50+ }
51+
52+ static struct folio * kvm_gmem_get_folio (struct inode * inode , pgoff_t index , bool prepare )
1753{
1854 struct folio * folio ;
1955
2056 /* TODO: Support huge pages. */
2157 folio = filemap_grab_folio (inode -> i_mapping , index );
22- if (IS_ERR_OR_NULL (folio ))
23- return NULL ;
58+ if (IS_ERR (folio ))
59+ return folio ;
2460
2561 /*
2662 * Use the up-to-date flag to track whether or not the memory has been
@@ -41,6 +77,15 @@ static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
4177 folio_mark_uptodate (folio );
4278 }
4379
80+ if (prepare ) {
81+ int r = kvm_gmem_prepare_folio (inode , index , folio );
82+ if (r < 0 ) {
83+ folio_unlock (folio );
84+ folio_put (folio );
85+ return ERR_PTR (r );
86+ }
87+ }
88+
4489 /*
4590 * Ignore accessed, referenced, and dirty flags. The memory is
4691 * unevictable and there is no storage to write back to.
@@ -145,9 +190,9 @@ static long kvm_gmem_allocate(struct inode *inode, loff_t offset, loff_t len)
145190 break ;
146191 }
147192
148- folio = kvm_gmem_get_folio (inode , index );
149- if (! folio ) {
150- r = - ENOMEM ;
193+ folio = kvm_gmem_get_folio (inode , index , true );
194+ if (IS_ERR ( folio ) ) {
195+ r = PTR_ERR ( folio ) ;
151196 break ;
152197 }
153198
@@ -298,10 +343,24 @@ static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *fol
298343 return MF_DELAYED ;
299344}
300345
346+ #ifdef CONFIG_HAVE_KVM_GMEM_INVALIDATE
347+ static void kvm_gmem_free_folio (struct folio * folio )
348+ {
349+ struct page * page = folio_page (folio , 0 );
350+ kvm_pfn_t pfn = page_to_pfn (page );
351+ int order = folio_order (folio );
352+
353+ kvm_arch_gmem_invalidate (pfn , pfn + (1ul << order ));
354+ }
355+ #endif
356+
301357static const struct address_space_operations kvm_gmem_aops = {
302358 .dirty_folio = noop_dirty_folio ,
303359 .migrate_folio = kvm_gmem_migrate_folio ,
304360 .error_remove_folio = kvm_gmem_error_folio ,
361+ #ifdef CONFIG_HAVE_KVM_GMEM_INVALIDATE
362+ .free_folio = kvm_gmem_free_folio ,
363+ #endif
305364};
306365
307366static int kvm_gmem_getattr (struct mnt_idmap * idmap , const struct path * path ,
@@ -357,6 +416,7 @@ static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags)
357416 inode -> i_private = (void * )(unsigned long )flags ;
358417 inode -> i_op = & kvm_gmem_iops ;
359418 inode -> i_mapping -> a_ops = & kvm_gmem_aops ;
419+ inode -> i_mapping -> flags |= AS_INACCESSIBLE ;
360420 inode -> i_mode |= S_IFREG ;
361421 inode -> i_size = size ;
362422 mapping_set_gfp_mask (inode -> i_mapping , GFP_HIGHUSER );
@@ -482,32 +542,29 @@ void kvm_gmem_unbind(struct kvm_memory_slot *slot)
482542 fput (file );
483543}
484544
485- int kvm_gmem_get_pfn (struct kvm * kvm , struct kvm_memory_slot * slot ,
486- gfn_t gfn , kvm_pfn_t * pfn , int * max_order )
545+ static int __kvm_gmem_get_pfn (struct file * file , struct kvm_memory_slot * slot ,
546+ gfn_t gfn , kvm_pfn_t * pfn , int * max_order , bool prepare )
487547{
488548 pgoff_t index = gfn - slot -> base_gfn + slot -> gmem .pgoff ;
489- struct kvm_gmem * gmem ;
549+ struct kvm_gmem * gmem = file -> private_data ;
490550 struct folio * folio ;
491551 struct page * page ;
492- struct file * file ;
493552 int r ;
494553
495- file = kvm_gmem_get_file ( slot );
496- if (! file )
554+ if ( file != slot -> gmem . file ) {
555+ WARN_ON_ONCE ( slot -> gmem . file );
497556 return - EFAULT ;
557+ }
498558
499559 gmem = file -> private_data ;
500-
501- if (WARN_ON_ONCE (xa_load (& gmem -> bindings , index ) != slot )) {
502- r = - EIO ;
503- goto out_fput ;
560+ if (xa_load (& gmem -> bindings , index ) != slot ) {
561+ WARN_ON_ONCE (xa_load (& gmem -> bindings , index ));
562+ return - EIO ;
504563 }
505564
506- folio = kvm_gmem_get_folio (file_inode (file ), index );
507- if (!folio ) {
508- r = - ENOMEM ;
509- goto out_fput ;
510- }
565+ folio = kvm_gmem_get_folio (file_inode (file ), index , prepare );
566+ if (IS_ERR (folio ))
567+ return PTR_ERR (folio );
511568
512569 if (folio_test_hwpoison (folio )) {
513570 r = - EHWPOISON ;
@@ -524,9 +581,73 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
524581
525582out_unlock :
526583 folio_unlock (folio );
527- out_fput :
528- fput (file );
529584
530585 return r ;
531586}
587+
588+ int kvm_gmem_get_pfn (struct kvm * kvm , struct kvm_memory_slot * slot ,
589+ gfn_t gfn , kvm_pfn_t * pfn , int * max_order )
590+ {
591+ struct file * file = kvm_gmem_get_file (slot );
592+ int r ;
593+
594+ if (!file )
595+ return - EFAULT ;
596+
597+ r = __kvm_gmem_get_pfn (file , slot , gfn , pfn , max_order , true);
598+ fput (file );
599+ return r ;
600+ }
532601EXPORT_SYMBOL_GPL (kvm_gmem_get_pfn );
602+
603+ long kvm_gmem_populate (struct kvm * kvm , gfn_t start_gfn , void __user * src , long npages ,
604+ kvm_gmem_populate_cb post_populate , void * opaque )
605+ {
606+ struct file * file ;
607+ struct kvm_memory_slot * slot ;
608+ void __user * p ;
609+
610+ int ret = 0 , max_order ;
611+ long i ;
612+
613+ lockdep_assert_held (& kvm -> slots_lock );
614+ if (npages < 0 )
615+ return - EINVAL ;
616+
617+ slot = gfn_to_memslot (kvm , start_gfn );
618+ if (!kvm_slot_can_be_private (slot ))
619+ return - EINVAL ;
620+
621+ file = kvm_gmem_get_file (slot );
622+ if (!file )
623+ return - EFAULT ;
624+
625+ filemap_invalidate_lock (file -> f_mapping );
626+
627+ npages = min_t (ulong , slot -> npages - (start_gfn - slot -> base_gfn ), npages );
628+ for (i = 0 ; i < npages ; i += (1 << max_order )) {
629+ gfn_t gfn = start_gfn + i ;
630+ kvm_pfn_t pfn ;
631+
632+ ret = __kvm_gmem_get_pfn (file , slot , gfn , & pfn , & max_order , false);
633+ if (ret )
634+ break ;
635+
636+ if (!IS_ALIGNED (gfn , (1 << max_order )) ||
637+ (npages - i ) < (1 << max_order ))
638+ max_order = 0 ;
639+
640+ p = src ? src + i * PAGE_SIZE : NULL ;
641+ ret = post_populate (kvm , gfn , pfn , p , max_order , opaque );
642+
643+ put_page (pfn_to_page (pfn ));
644+ if (ret )
645+ break ;
646+ }
647+
648+ filemap_invalidate_unlock (file -> f_mapping );
649+
650+ fput (file );
651+ return ret && !i ? ret : i ;
652+ }
653+ EXPORT_SYMBOL_GPL (kvm_gmem_populate );
0 commit comments