Skip to content

Commit e05b345

Browse files
author
Matthew Wilcox (Oracle)
committed
mm: Turn page_anon_vma() into folio_anon_vma()
Move the prototype from mm.h to mm/internal.h and convert all callers to pass a folio. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
1 parent 9595d76 commit e05b345

5 files changed

Lines changed: 16 additions & 11 deletions

File tree

include/linux/mm.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1730,7 +1730,6 @@ static inline void *folio_address(const struct folio *folio)
17301730
}
17311731

17321732
extern void *page_rmapping(struct page *page);
1733-
extern struct anon_vma *page_anon_vma(struct page *page);
17341733
extern pgoff_t __page_file_index(struct page *page);
17351734

17361735
/*

mm/internal.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -392,6 +392,7 @@ static inline bool is_data_mapping(vm_flags_t flags)
392392
void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
393393
struct vm_area_struct *prev);
394394
void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma);
395+
struct anon_vma *folio_anon_vma(struct folio *folio);
395396

396397
#ifdef CONFIG_MMU
397398
void unmap_mapping_folio(struct folio *folio);

mm/ksm.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2554,7 +2554,8 @@ void __ksm_exit(struct mm_struct *mm)
25542554
struct page *ksm_might_need_to_copy(struct page *page,
25552555
struct vm_area_struct *vma, unsigned long address)
25562556
{
2557-
struct anon_vma *anon_vma = page_anon_vma(page);
2557+
struct folio *folio = page_folio(page);
2558+
struct anon_vma *anon_vma = folio_anon_vma(folio);
25582559
struct page *new_page;
25592560

25602561
if (PageKsm(page)) {

mm/rmap.c

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -737,8 +737,9 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
737737
*/
738738
unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
739739
{
740-
if (PageAnon(page)) {
741-
struct anon_vma *page__anon_vma = page_anon_vma(page);
740+
struct folio *folio = page_folio(page);
741+
if (folio_test_anon(folio)) {
742+
struct anon_vma *page__anon_vma = folio_anon_vma(folio);
742743
/*
743744
* Note: swapoff's unuse_vma() is more efficient with this
744745
* check, and needs it to match anon_vma when KSM is active.
@@ -748,7 +749,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
748749
return -EFAULT;
749750
} else if (!vma->vm_file) {
750751
return -EFAULT;
751-
} else if (vma->vm_file->f_mapping != compound_head(page)->mapping) {
752+
} else if (vma->vm_file->f_mapping != folio->mapping) {
752753
return -EFAULT;
753754
}
754755

@@ -1103,6 +1104,7 @@ static void __page_set_anon_rmap(struct page *page,
11031104
static void __page_check_anon_rmap(struct page *page,
11041105
struct vm_area_struct *vma, unsigned long address)
11051106
{
1107+
struct folio *folio = page_folio(page);
11061108
/*
11071109
* The page's anon-rmap details (mapping and index) are guaranteed to
11081110
* be set up correctly at this point.
@@ -1114,7 +1116,8 @@ static void __page_check_anon_rmap(struct page *page,
11141116
* are initially only visible via the pagetables, and the pte is locked
11151117
* over the call to page_add_new_anon_rmap.
11161118
*/
1117-
VM_BUG_ON_PAGE(page_anon_vma(page)->root != vma->anon_vma->root, page);
1119+
VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root,
1120+
folio);
11181121
VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address),
11191122
page);
11201123
}
@@ -2177,6 +2180,7 @@ void __put_anon_vma(struct anon_vma *anon_vma)
21772180
static struct anon_vma *rmap_walk_anon_lock(struct page *page,
21782181
struct rmap_walk_control *rwc)
21792182
{
2183+
struct folio *folio = page_folio(page);
21802184
struct anon_vma *anon_vma;
21812185

21822186
if (rwc->anon_lock)
@@ -2188,7 +2192,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
21882192
* are holding mmap_lock. Users without mmap_lock are required to
21892193
* take a reference count to prevent the anon_vma disappearing
21902194
*/
2191-
anon_vma = page_anon_vma(page);
2195+
anon_vma = folio_anon_vma(folio);
21922196
if (!anon_vma)
21932197
return NULL;
21942198

@@ -2208,14 +2212,15 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
22082212
static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
22092213
bool locked)
22102214
{
2215+
struct folio *folio = page_folio(page);
22112216
struct anon_vma *anon_vma;
22122217
pgoff_t pgoff_start, pgoff_end;
22132218
struct anon_vma_chain *avc;
22142219

22152220
if (locked) {
2216-
anon_vma = page_anon_vma(page);
2221+
anon_vma = folio_anon_vma(folio);
22172222
/* anon_vma disappear under us? */
2218-
VM_BUG_ON_PAGE(!anon_vma, page);
2223+
VM_BUG_ON_FOLIO(!anon_vma, folio);
22192224
} else {
22202225
anon_vma = rmap_walk_anon_lock(page, rwc);
22212226
}

mm/util.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -679,9 +679,8 @@ bool folio_mapped(struct folio *folio)
679679
}
680680
EXPORT_SYMBOL(folio_mapped);
681681

682-
struct anon_vma *page_anon_vma(struct page *page)
682+
struct anon_vma *folio_anon_vma(struct folio *folio)
683683
{
684-
struct folio *folio = page_folio(page);
685684
unsigned long mapping = (unsigned long)folio->mapping;
686685

687686
if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)

0 commit comments

Comments
 (0)