Skip to content

Commit 631426b

Browse files
davidhildenbrandakpm00
authored andcommitted
mm/madvise: make MADV_POPULATE_(READ|WRITE) handle VM_FAULT_RETRY properly
Darrick reports that in some cases where pread() would fail with -EIO and mmap()+access would generate a SIGBUS signal, MADV_POPULATE_READ / MADV_POPULATE_WRITE will keep retrying forever and not fail with -EFAULT. While the madvise() call can be interrupted by a signal, this is not the desired behavior. MADV_POPULATE_READ / MADV_POPULATE_WRITE should behave like page faults in that case: fail and not retry forever. A reproducer can be found at [1]. The reason is that __get_user_pages(), as called by faultin_vma_page_range(), will not handle VM_FAULT_RETRY in a proper way: it will simply return 0 when VM_FAULT_RETRY happened, making madvise_populate()->faultin_vma_page_range() retry again and again, never setting FOLL_TRIED->FAULT_FLAG_TRIED for __get_user_pages(). __get_user_pages_locked() does what we want, but duplicating that logic in faultin_vma_page_range() feels wrong. So let's use __get_user_pages_locked() instead, that will detect VM_FAULT_RETRY and set FOLL_TRIED when retrying, making the fault handler return VM_FAULT_SIGBUS (VM_FAULT_ERROR) at some point, propagating -EFAULT from faultin_page() to __get_user_pages(), all the way to madvise_populate(). But, there is an issue: __get_user_pages_locked() will end up re-taking the MM lock and then __get_user_pages() will do another VMA lookup. In the meantime, the VMA layout could have changed and we'd fail with different error codes than we'd want to. As __get_user_pages() will currently do a new VMA lookup either way, let it do the VMA handling in a different way, controlled by a new FOLL_MADV_POPULATE flag, effectively moving these checks from madvise_populate() + faultin_page_range() in there. With this change, Darricks reproducer properly fails with -EFAULT, as documented for MADV_POPULATE_READ / MADV_POPULATE_WRITE. [1] https://lore.kernel.org/all/20240313171936.GN1927156@frogsfrogsfrogs/ Link: https://lkml.kernel.org/r/20240314161300.382526-1-david@redhat.com Link: https://lkml.kernel.org/r/20240314161300.382526-2-david@redhat.com Fixes: 4ca9b38 ("mm/madvise: introduce MADV_POPULATE_(READ|WRITE) to prefault page tables") Signed-off-by: David Hildenbrand <david@redhat.com> Reported-by: Darrick J. Wong <djwong@kernel.org> Closes: https://lore.kernel.org/all/20240311223815.GW1927156@frogsfrogsfrogs/ Cc: Darrick J. Wong <djwong@kernel.org> Cc: Hugh Dickins <hughd@google.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent 0bbac3f commit 631426b

3 files changed

Lines changed: 40 additions & 41 deletions

File tree

mm/gup.c

Lines changed: 32 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1206,6 +1206,22 @@ static long __get_user_pages(struct mm_struct *mm,
12061206

12071207
/* first iteration or cross vma bound */
12081208
if (!vma || start >= vma->vm_end) {
1209+
/*
1210+
* MADV_POPULATE_(READ|WRITE) wants to handle VMA
1211+
* lookups+error reporting differently.
1212+
*/
1213+
if (gup_flags & FOLL_MADV_POPULATE) {
1214+
vma = vma_lookup(mm, start);
1215+
if (!vma) {
1216+
ret = -ENOMEM;
1217+
goto out;
1218+
}
1219+
if (check_vma_flags(vma, gup_flags)) {
1220+
ret = -EINVAL;
1221+
goto out;
1222+
}
1223+
goto retry;
1224+
}
12091225
vma = gup_vma_lookup(mm, start);
12101226
if (!vma && in_gate_area(mm, start)) {
12111227
ret = get_gate_page(mm, start & PAGE_MASK,
@@ -1685,35 +1701,35 @@ long populate_vma_page_range(struct vm_area_struct *vma,
16851701
}
16861702

16871703
/*
1688-
* faultin_vma_page_range() - populate (prefault) page tables inside the
1689-
* given VMA range readable/writable
1704+
* faultin_page_range() - populate (prefault) page tables inside the
1705+
* given range readable/writable
16901706
*
16911707
* This takes care of mlocking the pages, too, if VM_LOCKED is set.
16921708
*
1693-
* @vma: target vma
1709+
* @mm: the mm to populate page tables in
16941710
* @start: start address
16951711
* @end: end address
16961712
* @write: whether to prefault readable or writable
16971713
* @locked: whether the mmap_lock is still held
16981714
*
1699-
* Returns either number of processed pages in the vma, or a negative error
1700-
* code on error (see __get_user_pages()).
1715+
* Returns either number of processed pages in the MM, or a negative error
1716+
* code on error (see __get_user_pages()). Note that this function reports
1717+
* errors related to VMAs, such as incompatible mappings, as expected by
1718+
* MADV_POPULATE_(READ|WRITE).
17011719
*
1702-
* vma->vm_mm->mmap_lock must be held. The range must be page-aligned and
1703-
* covered by the VMA. If it's released, *@locked will be set to 0.
1720+
* The range must be page-aligned.
1721+
*
1722+
* mm->mmap_lock must be held. If it's released, *@locked will be set to 0.
17041723
*/
1705-
long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
1706-
unsigned long end, bool write, int *locked)
1724+
long faultin_page_range(struct mm_struct *mm, unsigned long start,
1725+
unsigned long end, bool write, int *locked)
17071726
{
1708-
struct mm_struct *mm = vma->vm_mm;
17091727
unsigned long nr_pages = (end - start) / PAGE_SIZE;
17101728
int gup_flags;
17111729
long ret;
17121730

17131731
VM_BUG_ON(!PAGE_ALIGNED(start));
17141732
VM_BUG_ON(!PAGE_ALIGNED(end));
1715-
VM_BUG_ON_VMA(start < vma->vm_start, vma);
1716-
VM_BUG_ON_VMA(end > vma->vm_end, vma);
17171733
mmap_assert_locked(mm);
17181734

17191735
/*
@@ -1725,19 +1741,13 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
17251741
* a poisoned page.
17261742
* !FOLL_FORCE: Require proper access permissions.
17271743
*/
1728-
gup_flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_UNLOCKABLE;
1744+
gup_flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_UNLOCKABLE |
1745+
FOLL_MADV_POPULATE;
17291746
if (write)
17301747
gup_flags |= FOLL_WRITE;
17311748

1732-
/*
1733-
* We want to report -EINVAL instead of -EFAULT for any permission
1734-
* problems or incompatible mappings.
1735-
*/
1736-
if (check_vma_flags(vma, gup_flags))
1737-
return -EINVAL;
1738-
1739-
ret = __get_user_pages(mm, start, nr_pages, gup_flags,
1740-
NULL, locked);
1749+
ret = __get_user_pages_locked(mm, start, nr_pages, NULL, locked,
1750+
gup_flags);
17411751
lru_add_drain();
17421752
return ret;
17431753
}

mm/internal.h

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -686,9 +686,8 @@ struct anon_vma *folio_anon_vma(struct folio *folio);
686686
void unmap_mapping_folio(struct folio *folio);
687687
extern long populate_vma_page_range(struct vm_area_struct *vma,
688688
unsigned long start, unsigned long end, int *locked);
689-
extern long faultin_vma_page_range(struct vm_area_struct *vma,
690-
unsigned long start, unsigned long end,
691-
bool write, int *locked);
689+
extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
690+
unsigned long end, bool write, int *locked);
692691
extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
693692
unsigned long bytes);
694693

@@ -1127,10 +1126,13 @@ enum {
11271126
FOLL_FAST_ONLY = 1 << 20,
11281127
/* allow unlocking the mmap lock */
11291128
FOLL_UNLOCKABLE = 1 << 21,
1129+
/* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */
1130+
FOLL_MADV_POPULATE = 1 << 22,
11301131
};
11311132

11321133
#define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \
1133-
FOLL_FAST_ONLY | FOLL_UNLOCKABLE)
1134+
FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \
1135+
FOLL_MADV_POPULATE)
11341136

11351137
/*
11361138
* Indicates for which pages that are write-protected in the page table,

mm/madvise.c

Lines changed: 2 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -908,27 +908,14 @@ static long madvise_populate(struct vm_area_struct *vma,
908908
{
909909
const bool write = behavior == MADV_POPULATE_WRITE;
910910
struct mm_struct *mm = vma->vm_mm;
911-
unsigned long tmp_end;
912911
int locked = 1;
913912
long pages;
914913

915914
*prev = vma;
916915

917916
while (start < end) {
918-
/*
919-
* We might have temporarily dropped the lock. For example,
920-
* our VMA might have been split.
921-
*/
922-
if (!vma || start >= vma->vm_end) {
923-
vma = vma_lookup(mm, start);
924-
if (!vma)
925-
return -ENOMEM;
926-
}
927-
928-
tmp_end = min_t(unsigned long, end, vma->vm_end);
929917
/* Populate (prefault) page tables readable/writable. */
930-
pages = faultin_vma_page_range(vma, start, tmp_end, write,
931-
&locked);
918+
pages = faultin_page_range(mm, start, end, write, &locked);
932919
if (!locked) {
933920
mmap_read_lock(mm);
934921
locked = 1;
@@ -949,7 +936,7 @@ static long madvise_populate(struct vm_area_struct *vma,
949936
pr_warn_once("%s: unhandled return value: %ld\n",
950937
__func__, pages);
951938
fallthrough;
952-
case -ENOMEM:
939+
case -ENOMEM: /* No VMA or out of memory. */
953940
return -ENOMEM;
954941
}
955942
}

0 commit comments

Comments
 (0)