Skip to content

Commit 86f35f6

Browse files
fyin1akpm00
authored andcommitted
rmap: add folio_add_file_rmap_range()
folio_add_file_rmap_range() allows to add pte mapping to a specific range of file folio. Comparing to page_add_file_rmap(), it batched updates __lruvec_stat for large folio. Link: https://lkml.kernel.org/r/20230802151406.3735276-36-willy@infradead.org Signed-off-by: Yin Fengwei <fengwei.yin@intel.com> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent de74976 commit 86f35f6

2 files changed

Lines changed: 48 additions & 14 deletions

File tree

include/linux/rmap.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -198,6 +198,8 @@ void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
198198
unsigned long address);
199199
void page_add_file_rmap(struct page *, struct vm_area_struct *,
200200
bool compound);
201+
void folio_add_file_rmap_range(struct folio *, struct page *, unsigned int nr,
202+
struct vm_area_struct *, bool compound);
201203
void page_remove_rmap(struct page *, struct vm_area_struct *,
202204
bool compound);
203205

mm/rmap.c

Lines changed: 46 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1294,31 +1294,39 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
12941294
}
12951295

12961296
/**
1297-
* page_add_file_rmap - add pte mapping to a file page
1298-
* @page: the page to add the mapping to
1297+
* folio_add_file_rmap_range - add pte mapping to page range of a folio
1298+
* @folio: The folio to add the mapping to
1299+
* @page: The first page to add
1300+
* @nr_pages: The number of pages which will be mapped
12991301
* @vma: the vm area in which the mapping is added
13001302
* @compound: charge the page as compound or small page
13011303
*
1304+
* The page range of folio is defined by [first_page, first_page + nr_pages)
1305+
*
13021306
* The caller needs to hold the pte lock.
13031307
*/
1304-
void page_add_file_rmap(struct page *page, struct vm_area_struct *vma,
1305-
bool compound)
1308+
void folio_add_file_rmap_range(struct folio *folio, struct page *page,
1309+
unsigned int nr_pages, struct vm_area_struct *vma,
1310+
bool compound)
13061311
{
1307-
struct folio *folio = page_folio(page);
13081312
atomic_t *mapped = &folio->_nr_pages_mapped;
1309-
int nr = 0, nr_pmdmapped = 0;
1310-
bool first;
1313+
unsigned int nr_pmdmapped = 0, first;
1314+
int nr = 0;
13111315

1312-
VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
1316+
VM_WARN_ON_FOLIO(compound && !folio_test_pmd_mappable(folio), folio);
13131317

13141318
/* Is page being mapped by PTE? Is this its first map to be added? */
13151319
if (likely(!compound)) {
1316-
first = atomic_inc_and_test(&page->_mapcount);
1317-
nr = first;
1318-
if (first && folio_test_large(folio)) {
1319-
nr = atomic_inc_return_relaxed(mapped);
1320-
nr = (nr < COMPOUND_MAPPED);
1321-
}
1320+
do {
1321+
first = atomic_inc_and_test(&page->_mapcount);
1322+
if (first && folio_test_large(folio)) {
1323+
first = atomic_inc_return_relaxed(mapped);
1324+
first = (first < COMPOUND_MAPPED);
1325+
}
1326+
1327+
if (first)
1328+
nr++;
1329+
} while (page++, --nr_pages > 0);
13221330
} else if (folio_test_pmd_mappable(folio)) {
13231331
/* That test is redundant: it's for safety or to optimize out */
13241332

@@ -1347,6 +1355,30 @@ void page_add_file_rmap(struct page *page, struct vm_area_struct *vma,
13471355
mlock_vma_folio(folio, vma, compound);
13481356
}
13491357

1358+
/**
1359+
* page_add_file_rmap - add pte mapping to a file page
1360+
* @page: the page to add the mapping to
1361+
* @vma: the vm area in which the mapping is added
1362+
* @compound: charge the page as compound or small page
1363+
*
1364+
* The caller needs to hold the pte lock.
1365+
*/
1366+
void page_add_file_rmap(struct page *page, struct vm_area_struct *vma,
1367+
bool compound)
1368+
{
1369+
struct folio *folio = page_folio(page);
1370+
unsigned int nr_pages;
1371+
1372+
VM_WARN_ON_ONCE_PAGE(compound && !PageTransHuge(page), page);
1373+
1374+
if (likely(!compound))
1375+
nr_pages = 1;
1376+
else
1377+
nr_pages = folio_nr_pages(folio);
1378+
1379+
folio_add_file_rmap_range(folio, page, nr_pages, vma, compound);
1380+
}
1381+
13501382
/**
13511383
* page_remove_rmap - take down pte mapping from a page
13521384
* @page: page to remove mapping from

0 commit comments

Comments
 (0)