Skip to content

Commit 8622ef0

Browse files
mdrothsean-jc
authored andcommitted
KVM: guest_memfd: Remove preparation tracking
guest_memfd currently uses the folio uptodate flag to track: 1) whether or not a page has been cleared before initial usage 2) whether or not the architecture hooks have been issued to put the page in a private state as defined by the architecture In practice, (2) is only actually being tracked for SEV-SNP VMs, and there do not seem to be any plans/reasons that would suggest this will change in the future, so this additional tracking/complexity is not really providing any general benefit to guest_memfd users. On the other hand, future plans around in-place conversion and hugepage support will make the burden of tracking this information within guest_memfd even more complex. With in-place conversion and hugepage support, the plan is to use the per-folio uptodate flag purely to track the initial clearing of folios, whereas conversion operations could trigger multiple transitions between 'prepared' and 'unprepared' and thus need separate tracking. Since preparation generally happens during fault time, i.e. on the "read-side" of any VM-wide locks that might protect state tracked by guest_memfd, supporting concurrent handling of page faults would likely require more complex locking schemes if the "preparedness" state were tracked by guest_memfd, i.e. if it needs to be updated as part of handling the fault. Instead of keeping this current/future complexity within guest_memfd for what is essentially just SEV-SNP, just drop the tracking for (2) and have the arch-specific preparation hooks get triggered unconditionally on every fault so the arch-specific hooks can check the preparation state directly and decide whether or not a folio still needs additional preparation. In the case of SEV-SNP, the preparation state is already checked again via the preparation hooks to avoid double-preparation, so nothing extra needs to be done to update the handling of things there. Reviewed-by: Vishal Annapurve <vannapurve@google.com> Tested-by: Vishal Annapurve <vannapurve@google.com> Reviewed-by: Pankaj Gupta <pankaj.gupta@amd.com> Tested-by: Kai Huang <kai.huang@intel.com> Signed-off-by: Michael Roth <michael.roth@amd.com> Link: https://patch.msgid.link/20260108214622.1084057-4-michael.roth@amd.com [sean: massage changelog] Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 6538b62 commit 8622ef0

1 file changed

Lines changed: 12 additions & 32 deletions

File tree

virt/kvm/guest_memfd.c

Lines changed: 12 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -76,11 +76,6 @@ static int __kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slo
7676
return 0;
7777
}
7878

79-
static inline void kvm_gmem_mark_prepared(struct folio *folio)
80-
{
81-
folio_mark_uptodate(folio);
82-
}
83-
8479
/*
8580
* Process @folio, which contains @gfn, so that the guest can use it.
8681
* The folio must be locked and the gfn must be contained in @slot.
@@ -90,13 +85,7 @@ static inline void kvm_gmem_mark_prepared(struct folio *folio)
9085
static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
9186
gfn_t gfn, struct folio *folio)
9287
{
93-
unsigned long nr_pages, i;
9488
pgoff_t index;
95-
int r;
96-
97-
nr_pages = folio_nr_pages(folio);
98-
for (i = 0; i < nr_pages; i++)
99-
clear_highpage(folio_page(folio, i));
10089

10190
/*
10291
* Preparing huge folios should always be safe, since it should
@@ -114,11 +103,8 @@ static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
114103
WARN_ON(!IS_ALIGNED(slot->gmem.pgoff, folio_nr_pages(folio)));
115104
index = kvm_gmem_get_index(slot, gfn);
116105
index = ALIGN_DOWN(index, folio_nr_pages(folio));
117-
r = __kvm_gmem_prepare_folio(kvm, slot, index, folio);
118-
if (!r)
119-
kvm_gmem_mark_prepared(folio);
120106

121-
return r;
107+
return __kvm_gmem_prepare_folio(kvm, slot, index, folio);
122108
}
123109

124110
/*
@@ -429,7 +415,7 @@ static vm_fault_t kvm_gmem_fault_user_mapping(struct vm_fault *vmf)
429415

430416
if (!folio_test_uptodate(folio)) {
431417
clear_highpage(folio_page(folio, 0));
432-
kvm_gmem_mark_prepared(folio);
418+
folio_mark_uptodate(folio);
433419
}
434420

435421
vmf->page = folio_file_page(folio, vmf->pgoff);
@@ -766,7 +752,7 @@ void kvm_gmem_unbind(struct kvm_memory_slot *slot)
766752
static struct folio *__kvm_gmem_get_pfn(struct file *file,
767753
struct kvm_memory_slot *slot,
768754
pgoff_t index, kvm_pfn_t *pfn,
769-
bool *is_prepared, int *max_order)
755+
int *max_order)
770756
{
771757
struct file *slot_file = READ_ONCE(slot->gmem.file);
772758
struct gmem_file *f = file->private_data;
@@ -796,7 +782,6 @@ static struct folio *__kvm_gmem_get_pfn(struct file *file,
796782
if (max_order)
797783
*max_order = 0;
798784

799-
*is_prepared = folio_test_uptodate(folio);
800785
return folio;
801786
}
802787

@@ -806,19 +791,22 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
806791
{
807792
pgoff_t index = kvm_gmem_get_index(slot, gfn);
808793
struct folio *folio;
809-
bool is_prepared = false;
810794
int r = 0;
811795

812796
CLASS(gmem_get_file, file)(slot);
813797
if (!file)
814798
return -EFAULT;
815799

816-
folio = __kvm_gmem_get_pfn(file, slot, index, pfn, &is_prepared, max_order);
800+
folio = __kvm_gmem_get_pfn(file, slot, index, pfn, max_order);
817801
if (IS_ERR(folio))
818802
return PTR_ERR(folio);
819803

820-
if (!is_prepared)
821-
r = kvm_gmem_prepare_folio(kvm, slot, gfn, folio);
804+
if (!folio_test_uptodate(folio)) {
805+
clear_highpage(folio_page(folio, 0));
806+
folio_mark_uptodate(folio);
807+
}
808+
809+
r = kvm_gmem_prepare_folio(kvm, slot, gfn, folio);
822810

823811
folio_unlock(folio);
824812

@@ -861,27 +849,19 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
861849
struct folio *folio;
862850
gfn_t gfn = start_gfn + i;
863851
pgoff_t index = kvm_gmem_get_index(slot, gfn);
864-
bool is_prepared = false;
865852
kvm_pfn_t pfn;
866853

867854
if (signal_pending(current)) {
868855
ret = -EINTR;
869856
break;
870857
}
871858

872-
folio = __kvm_gmem_get_pfn(file, slot, index, &pfn, &is_prepared, NULL);
859+
folio = __kvm_gmem_get_pfn(file, slot, index, &pfn, NULL);
873860
if (IS_ERR(folio)) {
874861
ret = PTR_ERR(folio);
875862
break;
876863
}
877864

878-
if (is_prepared) {
879-
folio_unlock(folio);
880-
folio_put(folio);
881-
ret = -EEXIST;
882-
break;
883-
}
884-
885865
folio_unlock(folio);
886866

887867
ret = -EINVAL;
@@ -893,7 +873,7 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
893873
p = src ? src + i * PAGE_SIZE : NULL;
894874
ret = post_populate(kvm, gfn, pfn, p, opaque);
895875
if (!ret)
896-
kvm_gmem_mark_prepared(folio);
876+
folio_mark_uptodate(folio);
897877

898878
put_folio_and_exit:
899879
folio_put(folio);

0 commit comments

Comments
 (0)