Skip to content

Commit 5842bcb

Browse files
x-y-zakpm00
authored andcommitted
mm/huge_memory: replace can_split_folio() with direct refcount calculation
can_split_folio() is just a refcount comparison, making sure only the split caller holds an extra pin. Open code it with folio_expected_ref_count() != folio_ref_count() - 1. For the extra_pins used by folio_ref_freeze(), add folio_cache_ref_count() to calculate it. Also replace folio_expected_ref_count() with folio_cache_ref_count() used by folio_ref_unfreeze(), since they are returning the same values when a folio is frozen and folio_cache_ref_count() does not have unnecessary folio_mapcount() in its implementation. Link: https://lkml.kernel.org/r/20251126210618.1971206-3-ziy@nvidia.com Signed-off-by: Zi Yan <ziy@nvidia.com> Suggested-by: David Hildenbrand (Red Hat) <david@kernel.org> Reviewed-by: Wei Yang <richard.weiyang@gmail.com> Acked-by: David Hildenbrand (Red Hat) <david@kernel.org> Cc: Balbir Singh <balbirs@nvidia.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Barry Song <baohua@kernel.org> Cc: Dev Jain <dev.jain@arm.com> Cc: Lance Yang <lance.yang@linux.dev> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Naoya Horiguchi <nao.horiguchi@gmail.com> Cc: Nico Pache <npache@redhat.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent bdd0d69 commit 5842bcb

3 files changed

Lines changed: 22 additions & 34 deletions

File tree

include/linux/huge_mm.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -369,7 +369,6 @@ enum split_type {
369369
SPLIT_TYPE_NON_UNIFORM,
370370
};
371371

372-
bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
373372
int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
374373
unsigned int new_order);
375374
int folio_split_unmapped(struct folio *folio, unsigned int new_order);

mm/huge_memory.c

Lines changed: 20 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -3455,23 +3455,6 @@ static void lru_add_split_folio(struct folio *folio, struct folio *new_folio,
34553455
}
34563456
}
34573457

3458-
/* Racy check whether the huge page can be split */
3459-
bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
3460-
{
3461-
int extra_pins;
3462-
3463-
/* Additional pins from page cache */
3464-
if (folio_test_anon(folio))
3465-
extra_pins = folio_test_swapcache(folio) ?
3466-
folio_nr_pages(folio) : 0;
3467-
else
3468-
extra_pins = folio_nr_pages(folio);
3469-
if (pextra_pins)
3470-
*pextra_pins = extra_pins;
3471-
return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins -
3472-
caller_pins;
3473-
}
3474-
34753458
static bool page_range_has_hwpoisoned(struct page *page, long nr_pages)
34763459
{
34773460
for (; nr_pages; page++, nr_pages--)
@@ -3767,11 +3750,19 @@ int folio_check_splittable(struct folio *folio, unsigned int new_order,
37673750
return 0;
37683751
}
37693752

3753+
/* Number of folio references from the pagecache or the swapcache. */
3754+
static unsigned int folio_cache_ref_count(const struct folio *folio)
3755+
{
3756+
if (folio_test_anon(folio) && !folio_test_swapcache(folio))
3757+
return 0;
3758+
return folio_nr_pages(folio);
3759+
}
3760+
37703761
static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int new_order,
37713762
struct page *split_at, struct xa_state *xas,
37723763
struct address_space *mapping, bool do_lru,
37733764
struct list_head *list, enum split_type split_type,
3774-
pgoff_t end, int *nr_shmem_dropped, int extra_pins)
3765+
pgoff_t end, int *nr_shmem_dropped)
37753766
{
37763767
struct folio *end_folio = folio_next(folio);
37773768
struct folio *new_folio, *next;
@@ -3782,10 +3773,9 @@ static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int n
37823773
VM_WARN_ON_ONCE(!mapping && end);
37833774
/* Prevent deferred_split_scan() touching ->_refcount */
37843775
ds_queue = folio_split_queue_lock(folio);
3785-
if (folio_ref_freeze(folio, 1 + extra_pins)) {
3776+
if (folio_ref_freeze(folio, folio_cache_ref_count(folio) + 1)) {
37863777
struct swap_cluster_info *ci = NULL;
37873778
struct lruvec *lruvec;
3788-
int expected_refs;
37893779

37903780
if (old_order > 1) {
37913781
if (!list_empty(&folio->_deferred_list)) {
@@ -3853,8 +3843,8 @@ static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int n
38533843

38543844
zone_device_private_split_cb(folio, new_folio);
38553845

3856-
expected_refs = folio_expected_ref_count(new_folio) + 1;
3857-
folio_ref_unfreeze(new_folio, expected_refs);
3846+
folio_ref_unfreeze(new_folio,
3847+
folio_cache_ref_count(new_folio) + 1);
38583848

38593849
if (do_lru)
38603850
lru_add_split_folio(folio, new_folio, lruvec, list);
@@ -3897,8 +3887,7 @@ static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int n
38973887
* Otherwise, a parallel folio_try_get() can grab @folio
38983888
* and its caller can see stale page cache entries.
38993889
*/
3900-
expected_refs = folio_expected_ref_count(folio) + 1;
3901-
folio_ref_unfreeze(folio, expected_refs);
3890+
folio_ref_unfreeze(folio, folio_cache_ref_count(folio) + 1);
39023891

39033892
if (do_lru)
39043893
unlock_page_lruvec(lruvec);
@@ -3947,7 +3936,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
39473936
struct folio *new_folio, *next;
39483937
int nr_shmem_dropped = 0;
39493938
int remap_flags = 0;
3950-
int extra_pins, ret;
3939+
int ret;
39513940
pgoff_t end = 0;
39523941

39533942
VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio);
@@ -4028,7 +4017,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
40284017
* Racy check if we can split the page, before unmap_folio() will
40294018
* split PMDs
40304019
*/
4031-
if (!can_split_folio(folio, 1, &extra_pins)) {
4020+
if (folio_expected_ref_count(folio) != folio_ref_count(folio) - 1) {
40324021
ret = -EAGAIN;
40334022
goto out_unlock;
40344023
}
@@ -4051,8 +4040,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
40514040
}
40524041

40534042
ret = __folio_freeze_and_split_unmapped(folio, new_order, split_at, &xas, mapping,
4054-
true, list, split_type, end, &nr_shmem_dropped,
4055-
extra_pins);
4043+
true, list, split_type, end, &nr_shmem_dropped);
40564044
fail:
40574045
if (mapping)
40584046
xas_unlock(&xas);
@@ -4126,20 +4114,20 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
41264114
*/
41274115
int folio_split_unmapped(struct folio *folio, unsigned int new_order)
41284116
{
4129-
int extra_pins, ret = 0;
4117+
int ret = 0;
41304118

41314119
VM_WARN_ON_ONCE_FOLIO(folio_mapped(folio), folio);
41324120
VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio);
41334121
VM_WARN_ON_ONCE_FOLIO(!folio_test_large(folio), folio);
41344122
VM_WARN_ON_ONCE_FOLIO(!folio_test_anon(folio), folio);
41354123

4136-
if (!can_split_folio(folio, 1, &extra_pins))
4124+
if (folio_expected_ref_count(folio) != folio_ref_count(folio) - 1)
41374125
return -EAGAIN;
41384126

41394127
local_irq_disable();
41404128
ret = __folio_freeze_and_split_unmapped(folio, new_order, &folio->page, NULL,
41414129
NULL, false, NULL, SPLIT_TYPE_UNIFORM,
4142-
0, NULL, extra_pins);
4130+
0, NULL);
41434131
local_irq_enable();
41444132
return ret;
41454133
}
@@ -4632,7 +4620,7 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
46324620
* can be split or not. So skip the check here.
46334621
*/
46344622
if (!folio_test_private(folio) &&
4635-
!can_split_folio(folio, 0, NULL))
4623+
folio_expected_ref_count(folio) != folio_ref_count(folio))
46364624
goto next;
46374625

46384626
if (!folio_trylock(folio))

mm/vmscan.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1284,7 +1284,8 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
12841284
goto keep_locked;
12851285
if (folio_test_large(folio)) {
12861286
/* cannot split folio, skip it */
1287-
if (!can_split_folio(folio, 1, NULL))
1287+
if (folio_expected_ref_count(folio) !=
1288+
folio_ref_count(folio) - 1)
12881289
goto activate_locked;
12891290
/*
12901291
* Split partially mapped folios right away.

0 commit comments

Comments
 (0)