Skip to content

Commit 1fec689

Browse files
Matthew Wilcox (Oracle)akpm00
authored andcommitted
mm: remove references to pagevec
Most of these should just refer to the LRU cache rather than the data structure used to implement the LRU cache. Link: https://lkml.kernel.org/r/20230621164557.3510324-13-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent 1a0fc81 commit 1fec689

7 files changed

Lines changed: 13 additions & 13 deletions

File tree

mm/huge_memory.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1344,7 +1344,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
13441344
/*
13451345
* See do_wp_page(): we can only reuse the folio exclusively if
13461346
* there are no additional references. Note that we always drain
1347-
* the LRU pagevecs immediately after adding a THP.
1347+
* the LRU cache immediately after adding a THP.
13481348
*/
13491349
if (folio_ref_count(folio) >
13501350
1 + folio_test_swapcache(folio) * folio_nr_pages(folio))

mm/khugepaged.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1051,7 +1051,7 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm,
10511051
if (pte)
10521052
pte_unmap(pte);
10531053

1054-
/* Drain LRU add pagevec to remove extra pin on the swapped in pages */
1054+
/* Drain LRU cache to remove extra pin on the swapped in pages */
10551055
if (swapped_in)
10561056
lru_add_drain();
10571057

@@ -1972,7 +1972,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
19721972
result = SCAN_FAIL;
19731973
goto xa_unlocked;
19741974
}
1975-
/* drain pagevecs to help isolate_lru_page() */
1975+
/* drain lru cache to help isolate_lru_page() */
19761976
lru_add_drain();
19771977
page = folio_file_page(folio, index);
19781978
} else if (trylock_page(page)) {
@@ -1988,7 +1988,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
19881988
page_cache_sync_readahead(mapping, &file->f_ra,
19891989
file, index,
19901990
end - index);
1991-
/* drain pagevecs to help isolate_lru_page() */
1991+
/* drain lru cache to help isolate_lru_page() */
19921992
lru_add_drain();
19931993
page = find_lock_page(mapping, index);
19941994
if (unlikely(page == NULL)) {

mm/ksm.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -932,7 +932,7 @@ static int remove_stable_node(struct ksm_stable_node *stable_node)
932932
* The stable node did not yet appear stale to get_ksm_page(),
933933
* since that allows for an unmapped ksm page to be recognized
934934
* right up until it is freed; but the node is safe to remove.
935-
* This page might be in a pagevec waiting to be freed,
935+
* This page might be in an LRU cache waiting to be freed,
936936
* or it might be PageSwapCache (perhaps under writeback),
937937
* or it might have been removed from swapcache a moment ago.
938938
*/
@@ -2303,8 +2303,8 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
23032303
trace_ksm_start_scan(ksm_scan.seqnr, ksm_rmap_items);
23042304

23052305
/*
2306-
* A number of pages can hang around indefinitely on per-cpu
2307-
* pagevecs, raised page count preventing write_protect_page
2306+
* A number of pages can hang around indefinitely in per-cpu
2307+
* LRU cache, raised page count preventing write_protect_page
23082308
* from merging them. Though it doesn't really matter much,
23092309
* it is puzzling to see some stuck in pages_volatile until
23102310
* other activity jostles them out, and they also prevented

mm/memory.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3401,8 +3401,8 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
34013401
goto copy;
34023402
if (!folio_test_lru(folio))
34033403
/*
3404-
* Note: We cannot easily detect+handle references from
3405-
* remote LRU pagevecs or references to LRU folios.
3404+
* We cannot easily detect+handle references from
3405+
* remote LRU caches or references to LRU folios.
34063406
*/
34073407
lru_add_drain();
34083408
if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio))
@@ -3880,7 +3880,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
38803880
* If we want to map a page that's in the swapcache writable, we
38813881
* have to detect via the refcount if we're really the exclusive
38823882
* owner. Try removing the extra reference from the local LRU
3883-
* pagevecs if required.
3883+
* caches if required.
38843884
*/
38853885
if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache &&
38863886
!folio_test_ksm(folio) && !folio_test_lru(folio))

mm/migrate_device.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -376,7 +376,7 @@ static unsigned long migrate_device_unmap(unsigned long *src_pfns,
376376
/* ZONE_DEVICE pages are not on LRU */
377377
if (!is_zone_device_page(page)) {
378378
if (!PageLRU(page) && allow_drain) {
379-
/* Drain CPU's pagevec */
379+
/* Drain CPU's lru cache */
380380
lru_add_drain_all();
381381
allow_drain = false;
382382
}

mm/swap.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = {
7676

7777
/*
7878
* This path almost never happens for VM activity - pages are normally freed
79-
* via pagevecs. But it gets used by networking - and for compound pages.
79+
* in batches. But it gets used by networking - and for compound pages.
8080
*/
8181
static void __page_cache_release(struct folio *folio)
8282
{

mm/truncate.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -565,7 +565,7 @@ EXPORT_SYMBOL(invalidate_mapping_pages);
565565
* refcount. We do this because invalidate_inode_pages2() needs stronger
566566
* invalidation guarantees, and cannot afford to leave pages behind because
567567
* shrink_page_list() has a temp ref on them, or because they're transiently
568-
* sitting in the folio_add_lru() pagevecs.
568+
* sitting in the folio_add_lru() caches.
569569
*/
570570
static int invalidate_complete_folio2(struct address_space *mapping,
571571
struct folio *folio)

0 commit comments

Comments
 (0)