Skip to content

Commit 0d25148

Browse files
author
Matthew Wilcox (Oracle)
committed
mm/rmap: Convert make_device_exclusive_range() to use folios
Move the PageTail check earlier so we can avoid even taking the folio lock on tail pages. Otherwise, this is a straightforward use of folios throughout. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
1 parent 4b8554c commit 0d25148

1 file changed

Lines changed: 31 additions & 27 deletions

File tree

mm/rmap.c

Lines changed: 31 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -1990,8 +1990,9 @@ struct make_exclusive_args {
19901990
static bool page_make_device_exclusive_one(struct page *page,
19911991
struct vm_area_struct *vma, unsigned long address, void *priv)
19921992
{
1993+
struct folio *folio = page_folio(page);
19931994
struct mm_struct *mm = vma->vm_mm;
1994-
DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0);
1995+
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
19951996
struct make_exclusive_args *args = priv;
19961997
pte_t pteval;
19971998
struct page *subpage;
@@ -2002,29 +2003,31 @@ static bool page_make_device_exclusive_one(struct page *page,
20022003

20032004
mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma,
20042005
vma->vm_mm, address, min(vma->vm_end,
2005-
address + page_size(page)), args->owner);
2006+
address + folio_size(folio)),
2007+
args->owner);
20062008
mmu_notifier_invalidate_range_start(&range);
20072009

20082010
while (page_vma_mapped_walk(&pvmw)) {
20092011
/* Unexpected PMD-mapped THP? */
2010-
VM_BUG_ON_PAGE(!pvmw.pte, page);
2012+
VM_BUG_ON_FOLIO(!pvmw.pte, folio);
20112013

20122014
if (!pte_present(*pvmw.pte)) {
20132015
ret = false;
20142016
page_vma_mapped_walk_done(&pvmw);
20152017
break;
20162018
}
20172019

2018-
subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
2020+
subpage = folio_page(folio,
2021+
pte_pfn(*pvmw.pte) - folio_pfn(folio));
20192022
address = pvmw.address;
20202023

20212024
/* Nuke the page table entry. */
20222025
flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
20232026
pteval = ptep_clear_flush(vma, address, pvmw.pte);
20242027

2025-
/* Move the dirty bit to the page. Now the pte is gone. */
2028+
/* Set the dirty flag on the folio now the pte is gone. */
20262029
if (pte_dirty(pteval))
2027-
set_page_dirty(page);
2030+
folio_mark_dirty(folio);
20282031

20292032
/*
20302033
* Check that our target page is still mapped at the expected
@@ -2066,21 +2069,22 @@ static bool page_make_device_exclusive_one(struct page *page,
20662069
}
20672070

20682071
/**
2069-
* page_make_device_exclusive - mark the page exclusively owned by a device
2070-
* @page: the page to replace page table entries for
2071-
* @mm: the mm_struct where the page is expected to be mapped
2072-
* @address: address where the page is expected to be mapped
2072+
* folio_make_device_exclusive - Mark the folio exclusively owned by a device.
2073+
* @folio: The folio to replace page table entries for.
2074+
* @mm: The mm_struct where the folio is expected to be mapped.
2075+
* @address: Address where the folio is expected to be mapped.
20732076
* @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks
20742077
*
2075-
* Tries to remove all the page table entries which are mapping this page and
2076-
* replace them with special device exclusive swap entries to grant a device
2077-
* exclusive access to the page. Caller must hold the page lock.
2078+
* Tries to remove all the page table entries which are mapping this
2079+
* folio and replace them with special device exclusive swap entries to
2080+
* grant a device exclusive access to the folio.
20782081
*
2079-
* Returns false if the page is still mapped, or if it could not be unmapped
2082+
* Context: Caller must hold the folio lock.
2083+
* Return: false if the page is still mapped, or if it could not be unmapped
20802084
* from the expected address. Otherwise returns true (success).
20812085
*/
2082-
static bool page_make_device_exclusive(struct page *page, struct mm_struct *mm,
2083-
unsigned long address, void *owner)
2086+
static bool folio_make_device_exclusive(struct folio *folio,
2087+
struct mm_struct *mm, unsigned long address, void *owner)
20842088
{
20852089
struct make_exclusive_args args = {
20862090
.mm = mm,
@@ -2096,16 +2100,15 @@ static bool page_make_device_exclusive(struct page *page, struct mm_struct *mm,
20962100
};
20972101

20982102
/*
2099-
* Restrict to anonymous pages for now to avoid potential writeback
2100-
* issues. Also tail pages shouldn't be passed to rmap_walk so skip
2101-
* those.
2103+
* Restrict to anonymous folios for now to avoid potential writeback
2104+
* issues.
21022105
*/
2103-
if (!PageAnon(page) || PageTail(page))
2106+
if (!folio_test_anon(folio))
21042107
return false;
21052108

2106-
rmap_walk(page, &rwc);
2109+
rmap_walk(&folio->page, &rwc);
21072110

2108-
return args.valid && !page_mapcount(page);
2111+
return args.valid && !folio_mapcount(folio);
21092112
}
21102113

21112114
/**
@@ -2143,15 +2146,16 @@ int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
21432146
return npages;
21442147

21452148
for (i = 0; i < npages; i++, start += PAGE_SIZE) {
2146-
if (!trylock_page(pages[i])) {
2147-
put_page(pages[i]);
2149+
struct folio *folio = page_folio(pages[i]);
2150+
if (PageTail(pages[i]) || !folio_trylock(folio)) {
2151+
folio_put(folio);
21482152
pages[i] = NULL;
21492153
continue;
21502154
}
21512155

2152-
if (!page_make_device_exclusive(pages[i], mm, start, owner)) {
2153-
unlock_page(pages[i]);
2154-
put_page(pages[i]);
2156+
if (!folio_make_device_exclusive(folio, mm, start, owner)) {
2157+
folio_unlock(folio);
2158+
folio_put(folio);
21552159
pages[i] = NULL;
21562160
}
21572161
}

0 commit comments

Comments
 (0)