Skip to content

Commit 8dd1e89

Browse files
VMoolaakpm00
authored andcommitted
mm/khugepaged: convert __collapse_huge_page_isolate() to use folios
Patch series "Some khugepaged folio conversions", v3. This patchset converts a number of functions to use folios. This cleans up some khugepaged code and removes a large number of hidden compound_head() calls. This patch (of 5): Replaces 11 calls to compound_head() with 1, and removes 1348 bytes of kernel text. Link: https://lkml.kernel.org/r/20231020183331.10770-1-vishal.moola@gmail.com Link: https://lkml.kernel.org/r/20231020183331.10770-2-vishal.moola@gmail.com Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com> Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: David Hildenbrand <david@redhat.com> Reviewed-by: Yang Shi <shy828301@gmail.com> Cc: Kefeng Wang <wangkefeng.wang@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent b7812c8 commit 8dd1e89

1 file changed

Lines changed: 23 additions & 22 deletions

File tree

mm/khugepaged.c

Lines changed: 23 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -542,6 +542,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
542542
struct list_head *compound_pagelist)
543543
{
544544
struct page *page = NULL;
545+
struct folio *folio = NULL;
545546
pte_t *_pte;
546547
int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
547548
bool writable = false;
@@ -576,7 +577,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
576577
goto out;
577578
}
578579

579-
VM_BUG_ON_PAGE(!PageAnon(page), page);
580+
folio = page_folio(page);
581+
VM_BUG_ON_FOLIO(!folio_test_anon(folio), folio);
580582

581583
if (page_mapcount(page) > 1) {
582584
++shared;
@@ -588,16 +590,15 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
588590
}
589591
}
590592

591-
if (PageCompound(page)) {
592-
struct page *p;
593-
page = compound_head(page);
593+
if (folio_test_large(folio)) {
594+
struct folio *f;
594595

595596
/*
596597
* Check if we have dealt with the compound page
597598
* already
598599
*/
599-
list_for_each_entry(p, compound_pagelist, lru) {
600-
if (page == p)
600+
list_for_each_entry(f, compound_pagelist, lru) {
601+
if (folio == f)
601602
goto next;
602603
}
603604
}
@@ -608,7 +609,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
608609
* is needed to serialize against split_huge_page
609610
* when invoked from the VM.
610611
*/
611-
if (!trylock_page(page)) {
612+
if (!folio_trylock(folio)) {
612613
result = SCAN_PAGE_LOCK;
613614
goto out;
614615
}
@@ -624,8 +625,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
624625
* but not from this process. The other process cannot write to
625626
* the page, only trigger CoW.
626627
*/
627-
if (!is_refcount_suitable(page)) {
628-
unlock_page(page);
628+
if (!is_refcount_suitable(&folio->page)) {
629+
folio_unlock(folio);
629630
result = SCAN_PAGE_COUNT;
630631
goto out;
631632
}
@@ -634,27 +635,27 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
634635
* Isolate the page to avoid collapsing an hugepage
635636
* currently in use by the VM.
636637
*/
637-
if (!isolate_lru_page(page)) {
638-
unlock_page(page);
638+
if (!folio_isolate_lru(folio)) {
639+
folio_unlock(folio);
639640
result = SCAN_DEL_PAGE_LRU;
640641
goto out;
641642
}
642-
mod_node_page_state(page_pgdat(page),
643-
NR_ISOLATED_ANON + page_is_file_lru(page),
644-
compound_nr(page));
645-
VM_BUG_ON_PAGE(!PageLocked(page), page);
646-
VM_BUG_ON_PAGE(PageLRU(page), page);
643+
node_stat_mod_folio(folio,
644+
NR_ISOLATED_ANON + folio_is_file_lru(folio),
645+
folio_nr_pages(folio));
646+
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
647+
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
647648

648-
if (PageCompound(page))
649-
list_add_tail(&page->lru, compound_pagelist);
649+
if (folio_test_large(folio))
650+
list_add_tail(&folio->lru, compound_pagelist);
650651
next:
651652
/*
652653
* If collapse was initiated by khugepaged, check that there is
653654
* enough young pte to justify collapsing the page
654655
*/
655656
if (cc->is_khugepaged &&
656-
(pte_young(pteval) || page_is_young(page) ||
657-
PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
657+
(pte_young(pteval) || folio_test_young(folio) ||
658+
folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
658659
address)))
659660
referenced++;
660661

@@ -668,13 +669,13 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
668669
result = SCAN_LACK_REFERENCED_PAGE;
669670
} else {
670671
result = SCAN_SUCCEED;
671-
trace_mm_collapse_huge_page_isolate(page, none_or_zero,
672+
trace_mm_collapse_huge_page_isolate(&folio->page, none_or_zero,
672673
referenced, writable, result);
673674
return result;
674675
}
675676
out:
676677
release_pte_pages(pte, _pte, compound_pagelist);
677-
trace_mm_collapse_huge_page_isolate(page, none_or_zero,
678+
trace_mm_collapse_huge_page_isolate(&folio->page, none_or_zero,
678679
referenced, writable, result);
679680
return result;
680681
}

0 commit comments

Comments
 (0)