Skip to content

Commit 1e0877d

Browse files
Matthew Wilcox (Oracle)akpm00
authored andcommitted
mm: remove struct pagevec
All users are now converted to use the folio_batch so we can get rid of this data structure. Link: https://lkml.kernel.org/r/20230621164557.3510324-11-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent 76fa884 commit 1e0877d

2 files changed

Lines changed: 13 additions & 68 deletions

File tree

include/linux/pagevec.h

Lines changed: 4 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -3,65 +3,18 @@
33
* include/linux/pagevec.h
44
*
55
* In many places it is efficient to batch an operation up against multiple
6-
* pages. A pagevec is a multipage container which is used for that.
6+
* folios. A folio_batch is a container which is used for that.
77
*/
88

99
#ifndef _LINUX_PAGEVEC_H
1010
#define _LINUX_PAGEVEC_H
1111

12-
#include <linux/xarray.h>
12+
#include <linux/types.h>
1313

14-
/* 15 pointers + header align the pagevec structure to a power of two */
14+
/* 15 pointers + header align the folio_batch structure to a power of two */
1515
#define PAGEVEC_SIZE 15
1616

17-
struct page;
1817
struct folio;
19-
struct address_space;
20-
21-
/* Layout must match folio_batch */
22-
struct pagevec {
23-
unsigned char nr;
24-
bool percpu_pvec_drained;
25-
struct page *pages[PAGEVEC_SIZE];
26-
};
27-
28-
void __pagevec_release(struct pagevec *pvec);
29-
30-
static inline void pagevec_init(struct pagevec *pvec)
31-
{
32-
pvec->nr = 0;
33-
pvec->percpu_pvec_drained = false;
34-
}
35-
36-
static inline void pagevec_reinit(struct pagevec *pvec)
37-
{
38-
pvec->nr = 0;
39-
}
40-
41-
static inline unsigned pagevec_count(struct pagevec *pvec)
42-
{
43-
return pvec->nr;
44-
}
45-
46-
static inline unsigned pagevec_space(struct pagevec *pvec)
47-
{
48-
return PAGEVEC_SIZE - pvec->nr;
49-
}
50-
51-
/*
52-
* Add a page to a pagevec. Returns the number of slots still available.
53-
*/
54-
static inline unsigned pagevec_add(struct pagevec *pvec, struct page *page)
55-
{
56-
pvec->pages[pvec->nr++] = page;
57-
return pagevec_space(pvec);
58-
}
59-
60-
static inline void pagevec_release(struct pagevec *pvec)
61-
{
62-
if (pagevec_count(pvec))
63-
__pagevec_release(pvec);
64-
}
6518

6619
/**
6720
* struct folio_batch - A collection of folios.
@@ -78,11 +31,6 @@ struct folio_batch {
7831
struct folio *folios[PAGEVEC_SIZE];
7932
};
8033

81-
/* Layout must match pagevec */
82-
static_assert(sizeof(struct pagevec) == sizeof(struct folio_batch));
83-
static_assert(offsetof(struct pagevec, pages) ==
84-
offsetof(struct folio_batch, folios));
85-
8634
/**
8735
* folio_batch_init() - Initialise a batch of folios
8836
* @fbatch: The folio batch.
@@ -127,10 +75,7 @@ static inline unsigned folio_batch_add(struct folio_batch *fbatch,
12775
return folio_batch_space(fbatch);
12876
}
12977

130-
static inline void __folio_batch_release(struct folio_batch *fbatch)
131-
{
132-
__pagevec_release((struct pagevec *)fbatch);
133-
}
78+
void __folio_batch_release(struct folio_batch *pvec);
13479

13580
static inline void folio_batch_release(struct folio_batch *fbatch)
13681
{

mm/swap.c

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1044,25 +1044,25 @@ void release_pages(release_pages_arg arg, int nr)
10441044
EXPORT_SYMBOL(release_pages);
10451045

10461046
/*
1047-
* The pages which we're about to release may be in the deferred lru-addition
1047+
* The folios which we're about to release may be in the deferred lru-addition
10481048
* queues. That would prevent them from really being freed right now. That's
1049-
* OK from a correctness point of view but is inefficient - those pages may be
1049+
* OK from a correctness point of view but is inefficient - those folios may be
10501050
* cache-warm and we want to give them back to the page allocator ASAP.
10511051
*
1052-
* So __pagevec_release() will drain those queues here.
1052+
* So __folio_batch_release() will drain those queues here.
10531053
* folio_batch_move_lru() calls folios_put() directly to avoid
10541054
* mutual recursion.
10551055
*/
1056-
void __pagevec_release(struct pagevec *pvec)
1056+
void __folio_batch_release(struct folio_batch *fbatch)
10571057
{
1058-
if (!pvec->percpu_pvec_drained) {
1058+
if (!fbatch->percpu_pvec_drained) {
10591059
lru_add_drain();
1060-
pvec->percpu_pvec_drained = true;
1060+
fbatch->percpu_pvec_drained = true;
10611061
}
1062-
release_pages(pvec->pages, pagevec_count(pvec));
1063-
pagevec_reinit(pvec);
1062+
release_pages(fbatch->folios, folio_batch_count(fbatch));
1063+
folio_batch_reinit(fbatch);
10641064
}
1065-
EXPORT_SYMBOL(__pagevec_release);
1065+
EXPORT_SYMBOL(__folio_batch_release);
10661066

10671067
/**
10681068
* folio_batch_remove_exceptionals() - Prune non-folios from a batch.

0 commit comments

Comments
 (0)