Skip to content

Commit 9595d76

Browse files
author
Matthew Wilcox (Oracle)
committed
mm/rmap: Turn page_lock_anon_vma_read() into folio_lock_anon_vma_read()
Add back page_lock_anon_vma_read() as a wrapper. This saves a few calls to compound_head(). If any callers were passing a tail page before, this would have failed to lock the anon VMA as page->mapping is not valid for tail pages. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
1 parent c842318 commit 9595d76

4 files changed

Lines changed: 16 additions & 7 deletions

File tree

include/linux/rmap.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -267,6 +267,7 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked);
267267
* Called by memory-failure.c to kill processes.
268268
*/
269269
struct anon_vma *page_lock_anon_vma_read(struct page *page);
270+
struct anon_vma *folio_lock_anon_vma_read(struct folio *folio);
270271
void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
271272
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
272273

mm/folio-compat.c

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -164,3 +164,10 @@ void putback_lru_page(struct page *page)
164164
{
165165
folio_putback_lru(page_folio(page));
166166
}
167+
168+
#ifdef CONFIG_MMU
169+
struct anon_vma *page_lock_anon_vma_read(struct page *page)
170+
{
171+
return folio_lock_anon_vma_read(page_folio(page));
172+
}
173+
#endif

mm/memory-failure.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -487,12 +487,13 @@ static struct task_struct *task_early_kill(struct task_struct *tsk,
487487
static void collect_procs_anon(struct page *page, struct list_head *to_kill,
488488
int force_early)
489489
{
490+
struct folio *folio = page_folio(page);
490491
struct vm_area_struct *vma;
491492
struct task_struct *tsk;
492493
struct anon_vma *av;
493494
pgoff_t pgoff;
494495

495-
av = page_lock_anon_vma_read(page);
496+
av = folio_lock_anon_vma_read(folio);
496497
if (av == NULL) /* Not actually mapped anymore */
497498
return;
498499

mm/rmap.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -526,28 +526,28 @@ struct anon_vma *page_get_anon_vma(struct page *page)
526526
* atomic op -- the trylock. If we fail the trylock, we fall back to getting a
527527
* reference like with page_get_anon_vma() and then block on the mutex.
528528
*/
529-
struct anon_vma *page_lock_anon_vma_read(struct page *page)
529+
struct anon_vma *folio_lock_anon_vma_read(struct folio *folio)
530530
{
531531
struct anon_vma *anon_vma = NULL;
532532
struct anon_vma *root_anon_vma;
533533
unsigned long anon_mapping;
534534

535535
rcu_read_lock();
536-
anon_mapping = (unsigned long)READ_ONCE(page->mapping);
536+
anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
537537
if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
538538
goto out;
539-
if (!page_mapped(page))
539+
if (!folio_mapped(folio))
540540
goto out;
541541

542542
anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
543543
root_anon_vma = READ_ONCE(anon_vma->root);
544544
if (down_read_trylock(&root_anon_vma->rwsem)) {
545545
/*
546-
* If the page is still mapped, then this anon_vma is still
546+
* If the folio is still mapped, then this anon_vma is still
547547
* its anon_vma, and holding the mutex ensures that it will
548548
* not go away, see anon_vma_free().
549549
*/
550-
if (!page_mapped(page)) {
550+
if (!folio_mapped(folio)) {
551551
up_read(&root_anon_vma->rwsem);
552552
anon_vma = NULL;
553553
}
@@ -560,7 +560,7 @@ struct anon_vma *page_lock_anon_vma_read(struct page *page)
560560
goto out;
561561
}
562562

563-
if (!page_mapped(page)) {
563+
if (!folio_mapped(folio)) {
564564
rcu_read_unlock();
565565
put_anon_vma(anon_vma);
566566
return NULL;

0 commit comments

Comments
 (0)