Skip to content

Commit 5c447d2

Browse files
shakeelbtorvalds
authored andcommitted
mm: fix numa stats for thp migration
Currently the kernel is not correctly updating the numa stats for NR_FILE_PAGES and NR_SHMEM on THP migration. Fix that. For NR_FILE_DIRTY and NR_ZONE_WRITE_PENDING, although at the moment there is no need to handle THP migration as kernel still does not have write support for file THP but to be more future proof, this patch adds the THP support for those stats as well. Link: https://lkml.kernel.org/r/20210108155813.2914586-2-shakeelb@google.com Fixes: e71769a ("mm: enable thp migration for shmem thp") Signed-off-by: Shakeel Butt <shakeelb@google.com> Acked-by: Yang Shi <shy828301@gmail.com> Reviewed-by: Roman Gushchin <guro@fb.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Muchun Song <songmuchun@bytedance.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 8a8792f commit 5c447d2

1 file changed

Lines changed: 12 additions & 11 deletions

File tree

mm/migrate.c

Lines changed: 12 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -402,6 +402,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
402402
struct zone *oldzone, *newzone;
403403
int dirty;
404404
int expected_count = expected_page_refs(mapping, page) + extra_count;
405+
int nr = thp_nr_pages(page);
405406

406407
if (!mapping) {
407408
/* Anonymous page without mapping */
@@ -437,7 +438,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
437438
*/
438439
newpage->index = page->index;
439440
newpage->mapping = page->mapping;
440-
page_ref_add(newpage, thp_nr_pages(page)); /* add cache reference */
441+
page_ref_add(newpage, nr); /* add cache reference */
441442
if (PageSwapBacked(page)) {
442443
__SetPageSwapBacked(newpage);
443444
if (PageSwapCache(page)) {
@@ -459,7 +460,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
459460
if (PageTransHuge(page)) {
460461
int i;
461462

462-
for (i = 1; i < HPAGE_PMD_NR; i++) {
463+
for (i = 1; i < nr; i++) {
463464
xas_next(&xas);
464465
xas_store(&xas, newpage);
465466
}
@@ -470,7 +471,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
470471
* to one less reference.
471472
* We know this isn't the last reference.
472473
*/
473-
page_ref_unfreeze(page, expected_count - thp_nr_pages(page));
474+
page_ref_unfreeze(page, expected_count - nr);
474475

475476
xas_unlock(&xas);
476477
/* Leave irq disabled to prevent preemption while updating stats */
@@ -493,17 +494,17 @@ int migrate_page_move_mapping(struct address_space *mapping,
493494
old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
494495
new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
495496

496-
__dec_lruvec_state(old_lruvec, NR_FILE_PAGES);
497-
__inc_lruvec_state(new_lruvec, NR_FILE_PAGES);
497+
__mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
498+
__mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
498499
if (PageSwapBacked(page) && !PageSwapCache(page)) {
499-
__dec_lruvec_state(old_lruvec, NR_SHMEM);
500-
__inc_lruvec_state(new_lruvec, NR_SHMEM);
500+
__mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
501+
__mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
501502
}
502503
if (dirty && mapping_can_writeback(mapping)) {
503-
__dec_lruvec_state(old_lruvec, NR_FILE_DIRTY);
504-
__dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
505-
__inc_lruvec_state(new_lruvec, NR_FILE_DIRTY);
506-
__inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
504+
__mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
505+
__mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
506+
__mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
507+
__mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
507508
}
508509
}
509510
local_irq_enable();

0 commit comments

Comments
 (0)