Skip to content

Commit af28a98

Browse files
author
Matthew Wilcox (Oracle)
committed
mm/huge_memory: Convert __split_huge_pmd() to take a folio
Convert split_huge_pmd_address() at the same time since it only passes the folio through, and its two callers already have a folio on hand. Removes numerous calls to compound_head() and removes an assumption that a page cannot be larger than a PMD. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
1 parent b3ac041 commit af28a98

3 files changed

Lines changed: 31 additions & 29 deletions

File tree

include/linux/huge_mm.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -194,7 +194,7 @@ static inline int split_huge_page(struct page *page)
194194
void deferred_split_huge_page(struct page *page);
195195

196196
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
197-
unsigned long address, bool freeze, struct page *page);
197+
unsigned long address, bool freeze, struct folio *folio);
198198

199199
#define split_huge_pmd(__vma, __pmd, __address) \
200200
do { \
@@ -207,7 +207,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
207207

208208

209209
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
210-
bool freeze, struct page *page);
210+
bool freeze, struct folio *folio);
211211

212212
void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
213213
unsigned long address);
@@ -406,9 +406,9 @@ static inline void deferred_split_huge_page(struct page *page) {}
406406
do { } while (0)
407407

408408
static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
409-
unsigned long address, bool freeze, struct page *page) {}
409+
unsigned long address, bool freeze, struct folio *folio) {}
410410
static inline void split_huge_pmd_address(struct vm_area_struct *vma,
411-
unsigned long address, bool freeze, struct page *page) {}
411+
unsigned long address, bool freeze, struct folio *folio) {}
412412

413413
#define split_huge_pud(__vma, __pmd, __address) \
414414
do { } while (0)

mm/huge_memory.c

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -2113,11 +2113,11 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
21132113
}
21142114

21152115
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
2116-
unsigned long address, bool freeze, struct page *page)
2116+
unsigned long address, bool freeze, struct folio *folio)
21172117
{
21182118
spinlock_t *ptl;
21192119
struct mmu_notifier_range range;
2120-
bool do_unlock_page = false;
2120+
bool do_unlock_folio = false;
21212121
pmd_t _pmd;
21222122

21232123
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
@@ -2127,52 +2127,52 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
21272127
ptl = pmd_lock(vma->vm_mm, pmd);
21282128

21292129
/*
2130-
* If caller asks to setup a migration entries, we need a page to check
2131-
* pmd against. Otherwise we can end up replacing wrong page.
2130+
* If caller asks to setup a migration entry, we need a folio to check
2131+
* pmd against. Otherwise we can end up replacing wrong folio.
21322132
*/
2133-
VM_BUG_ON(freeze && !page);
2134-
if (page) {
2135-
VM_WARN_ON_ONCE(!PageLocked(page));
2136-
if (page != pmd_page(*pmd))
2133+
VM_BUG_ON(freeze && !folio);
2134+
if (folio) {
2135+
VM_WARN_ON_ONCE(!folio_test_locked(folio));
2136+
if (folio != page_folio(pmd_page(*pmd)))
21372137
goto out;
21382138
}
21392139

21402140
repeat:
21412141
if (pmd_trans_huge(*pmd)) {
2142-
if (!page) {
2143-
page = pmd_page(*pmd);
2142+
if (!folio) {
2143+
folio = page_folio(pmd_page(*pmd));
21442144
/*
21452145
* An anonymous page must be locked, to ensure that a
21462146
* concurrent reuse_swap_page() sees stable mapcount;
21472147
* but reuse_swap_page() is not used on shmem or file,
21482148
* and page lock must not be taken when zap_pmd_range()
21492149
* calls __split_huge_pmd() while i_mmap_lock is held.
21502150
*/
2151-
if (PageAnon(page)) {
2152-
if (unlikely(!trylock_page(page))) {
2153-
get_page(page);
2151+
if (folio_test_anon(folio)) {
2152+
if (unlikely(!folio_trylock(folio))) {
2153+
folio_get(folio);
21542154
_pmd = *pmd;
21552155
spin_unlock(ptl);
2156-
lock_page(page);
2156+
folio_lock(folio);
21572157
spin_lock(ptl);
21582158
if (unlikely(!pmd_same(*pmd, _pmd))) {
2159-
unlock_page(page);
2160-
put_page(page);
2161-
page = NULL;
2159+
folio_unlock(folio);
2160+
folio_put(folio);
2161+
folio = NULL;
21622162
goto repeat;
21632163
}
2164-
put_page(page);
2164+
folio_put(folio);
21652165
}
2166-
do_unlock_page = true;
2166+
do_unlock_folio = true;
21672167
}
21682168
}
21692169
} else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd)))
21702170
goto out;
21712171
__split_huge_pmd_locked(vma, pmd, range.start, freeze);
21722172
out:
21732173
spin_unlock(ptl);
2174-
if (do_unlock_page)
2175-
unlock_page(page);
2174+
if (do_unlock_folio)
2175+
folio_unlock(folio);
21762176
/*
21772177
* No need to double call mmu_notifier->invalidate_range() callback.
21782178
* They are 3 cases to consider inside __split_huge_pmd_locked():
@@ -2190,7 +2190,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
21902190
}
21912191

21922192
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
2193-
bool freeze, struct page *page)
2193+
bool freeze, struct folio *folio)
21942194
{
21952195
pgd_t *pgd;
21962196
p4d_t *p4d;
@@ -2211,7 +2211,7 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
22112211

22122212
pmd = pmd_offset(pud, address);
22132213

2214-
__split_huge_pmd(vma, pmd, address, freeze, page);
2214+
__split_huge_pmd(vma, pmd, address, freeze, folio);
22152215
}
22162216

22172217
static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address)

mm/rmap.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1410,6 +1410,7 @@ void page_remove_rmap(struct page *page,
14101410
static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
14111411
unsigned long address, void *arg)
14121412
{
1413+
struct folio *folio = page_folio(page);
14131414
struct mm_struct *mm = vma->vm_mm;
14141415
DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0);
14151416
pte_t pteval;
@@ -1428,7 +1429,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
14281429
pvmw.flags = PVMW_SYNC;
14291430

14301431
if (flags & TTU_SPLIT_HUGE_PMD)
1431-
split_huge_pmd_address(vma, address, false, page);
1432+
split_huge_pmd_address(vma, address, false, folio);
14321433

14331434
/*
14341435
* For THP, we have to assume the worse case ie pmd for invalidation.
@@ -1700,6 +1701,7 @@ void try_to_unmap(struct page *page, enum ttu_flags flags)
17001701
static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
17011702
unsigned long address, void *arg)
17021703
{
1704+
struct folio *folio = page_folio(page);
17031705
struct mm_struct *mm = vma->vm_mm;
17041706
DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0);
17051707
pte_t pteval;
@@ -1722,7 +1724,7 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
17221724
* TTU_SPLIT_HUGE_PMD and it wants to freeze.
17231725
*/
17241726
if (flags & TTU_SPLIT_HUGE_PMD)
1725-
split_huge_pmd_address(vma, address, true, page);
1727+
split_huge_pmd_address(vma, address, true, folio);
17261728

17271729
/*
17281730
* For THP, we have to assume the worse case ie pmd for invalidation.

0 commit comments

Comments
 (0)