Skip to content

Commit 8a399e2

Browse files
Chengming Zhoutehcaster
authored andcommitted
slub: Keep track of whether slub is on the per-node partial list
Now we rely on the "frozen" bit to see if we should manipulate the slab->slab_list, which will be changed in the following patch. Instead we introduce another way to keep track of whether slub is on the per-node partial list, here we reuse the PG_workingset bit. We have to use the atomic set_bit() and clear_bit() variants and change slab_unlock() to bit_spin_unlock() because when cmpxchg is not available and PG_lock is used, there may be concurrent operations on the two bits. Thanks to Mark Brown for reporting a hang and testing of a previous version where the non-atomic operations were used. Suggested-by: Matthew Wilcox <willy@infradead.org> Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
1 parent 43c4c34 commit 8a399e2

1 file changed

Lines changed: 23 additions & 1 deletion

File tree

mm/slub.c

Lines changed: 23 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -522,7 +522,7 @@ static __always_inline void slab_unlock(struct slab *slab)
522522
struct page *page = slab_page(slab);
523523

524524
VM_BUG_ON_PAGE(PageTail(page), page);
525-
__bit_spin_unlock(PG_locked, &page->flags);
525+
bit_spin_unlock(PG_locked, &page->flags);
526526
}
527527

528528
static inline bool
@@ -2116,6 +2116,25 @@ static void discard_slab(struct kmem_cache *s, struct slab *slab)
21162116
free_slab(s, slab);
21172117
}
21182118

2119+
/*
2120+
* SLUB reuses PG_workingset bit to keep track of whether it's on
2121+
* the per-node partial list.
2122+
*/
2123+
static inline bool slab_test_node_partial(const struct slab *slab)
2124+
{
2125+
return folio_test_workingset((struct folio *)slab_folio(slab));
2126+
}
2127+
2128+
static inline void slab_set_node_partial(struct slab *slab)
2129+
{
2130+
set_bit(PG_workingset, folio_flags(slab_folio(slab), 0));
2131+
}
2132+
2133+
static inline void slab_clear_node_partial(struct slab *slab)
2134+
{
2135+
clear_bit(PG_workingset, folio_flags(slab_folio(slab), 0));
2136+
}
2137+
21192138
/*
21202139
* Management of partially allocated slabs.
21212140
*/
@@ -2127,6 +2146,7 @@ __add_partial(struct kmem_cache_node *n, struct slab *slab, int tail)
21272146
list_add_tail(&slab->slab_list, &n->partial);
21282147
else
21292148
list_add(&slab->slab_list, &n->partial);
2149+
slab_set_node_partial(slab);
21302150
}
21312151

21322152
static inline void add_partial(struct kmem_cache_node *n,
@@ -2141,6 +2161,7 @@ static inline void remove_partial(struct kmem_cache_node *n,
21412161
{
21422162
lockdep_assert_held(&n->list_lock);
21432163
list_del(&slab->slab_list);
2164+
slab_clear_node_partial(slab);
21442165
n->nr_partial--;
21452166
}
21462167

@@ -4833,6 +4854,7 @@ static int __kmem_cache_do_shrink(struct kmem_cache *s)
48334854

48344855
if (free == slab->objects) {
48354856
list_move(&slab->slab_list, &discard);
4857+
slab_clear_node_partial(slab);
48364858
n->nr_partial--;
48374859
dec_slabs_node(s, node, slab->objects);
48384860
} else if (free <= SHRINK_PROMOTE_MAX)

0 commit comments

Comments
 (0)