Skip to content

Commit f6087b9

Browse files
committed
slab: make __slab_free() more clear
The function is tricky and many of its tests are hard to understand. Try to improve that by using more descriptively named variables and added comments. - rename 'prior' to 'old_head' to match the head and tail parameters - introduce a 'bool was_full' to make it more obvious what we are testing instead of the !prior and prior tests - add or improve comments in various places to explain what we're doing Also replace kmem_cache_has_cpu_partial() tests with IS_ENABLED(CONFIG_SLUB_CPU_PARTIAL) which are compile-time constants. We can do that because the kmem_cache_debug(s) case is handled upfront via free_to_partial_list(). Reviewed-by: Harry Yoo <harry.yoo@oracle.com> Link: https://patch.msgid.link/20251105-sheaves-cleanups-v1-1-b8218e1ac7ef@suse.cz Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
1 parent c379b74 commit f6087b9

1 file changed

Lines changed: 45 additions & 17 deletions

File tree

mm/slub.c

Lines changed: 45 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -5859,8 +5859,8 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
58595859
unsigned long addr)
58605860

58615861
{
5862-
void *prior;
5863-
int was_frozen;
5862+
void *old_head;
5863+
bool was_frozen, was_full;
58645864
struct slab new;
58655865
unsigned long counters;
58665866
struct kmem_cache_node *n = NULL;
@@ -5874,20 +5874,37 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
58745874
return;
58755875
}
58765876

5877+
/*
5878+
* It is enough to test IS_ENABLED(CONFIG_SLUB_CPU_PARTIAL) below
5879+
* instead of kmem_cache_has_cpu_partial(s), because kmem_cache_debug(s)
5880+
* is the only other reason it can be false, and it is already handled
5881+
* above.
5882+
*/
5883+
58775884
do {
58785885
if (unlikely(n)) {
58795886
spin_unlock_irqrestore(&n->list_lock, flags);
58805887
n = NULL;
58815888
}
5882-
prior = slab->freelist;
5889+
old_head = slab->freelist;
58835890
counters = slab->counters;
5884-
set_freepointer(s, tail, prior);
5891+
set_freepointer(s, tail, old_head);
58855892
new.counters = counters;
5886-
was_frozen = new.frozen;
5893+
was_frozen = !!new.frozen;
5894+
was_full = (old_head == NULL);
58875895
new.inuse -= cnt;
5888-
if ((!new.inuse || !prior) && !was_frozen) {
5889-
/* Needs to be taken off a list */
5890-
if (!kmem_cache_has_cpu_partial(s) || prior) {
5896+
/*
5897+
* Might need to be taken off (due to becoming empty) or added
5898+
* to (due to not being full anymore) the partial list.
5899+
* Unless it's frozen.
5900+
*/
5901+
if ((!new.inuse || was_full) && !was_frozen) {
5902+
/*
5903+
* If slab becomes non-full and we have cpu partial
5904+
* lists, we put it there unconditionally to avoid
5905+
* taking the list_lock. Otherwise we need it.
5906+
*/
5907+
if (!(IS_ENABLED(CONFIG_SLUB_CPU_PARTIAL) && was_full)) {
58915908

58925909
n = get_node(s, slab_nid(slab));
58935910
/*
@@ -5905,7 +5922,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
59055922
}
59065923

59075924
} while (!slab_update_freelist(s, slab,
5908-
prior, counters,
5925+
old_head, counters,
59095926
head, new.counters,
59105927
"__slab_free"));
59115928

@@ -5917,7 +5934,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
59175934
* activity can be necessary.
59185935
*/
59195936
stat(s, FREE_FROZEN);
5920-
} else if (kmem_cache_has_cpu_partial(s) && !prior) {
5937+
} else if (IS_ENABLED(CONFIG_SLUB_CPU_PARTIAL) && was_full) {
59215938
/*
59225939
* If we started with a full slab then put it onto the
59235940
* per cpu partial list.
@@ -5926,37 +5943,48 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
59265943
stat(s, CPU_PARTIAL_FREE);
59275944
}
59285945

5946+
/*
5947+
* In other cases we didn't take the list_lock because the slab
5948+
* was already on the partial list and will remain there.
5949+
*/
5950+
59295951
return;
59305952
}
59315953

59325954
/*
59335955
* This slab was partially empty but not on the per-node partial list,
59345956
* in which case we shouldn't manipulate its list, just return.
59355957
*/
5936-
if (prior && !on_node_partial) {
5958+
if (!was_full && !on_node_partial) {
59375959
spin_unlock_irqrestore(&n->list_lock, flags);
59385960
return;
59395961
}
59405962

5963+
/*
5964+
* If slab became empty, should we add/keep it on the partial list or we
5965+
* have enough?
5966+
*/
59415967
if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
59425968
goto slab_empty;
59435969

59445970
/*
59455971
* Objects left in the slab. If it was not on the partial list before
5946-
* then add it.
5972+
* then add it. This can only happen when cache has no per cpu partial
5973+
* list otherwise we would have put it there.
59475974
*/
5948-
if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
5975+
if (!IS_ENABLED(CONFIG_SLUB_CPU_PARTIAL) && unlikely(was_full)) {
59495976
add_partial(n, slab, DEACTIVATE_TO_TAIL);
59505977
stat(s, FREE_ADD_PARTIAL);
59515978
}
59525979
spin_unlock_irqrestore(&n->list_lock, flags);
59535980
return;
59545981

59555982
slab_empty:
5956-
if (prior) {
5957-
/*
5958-
* Slab on the partial list.
5959-
*/
5983+
/*
5984+
* The slab could have a single object and thus go from full to empty in
5985+
* a single free, but more likely it was on the partial list. Remove it.
5986+
*/
5987+
if (likely(!was_full)) {
59605988
remove_partial(n, slab);
59615989
stat(s, FREE_REMOVE_PARTIAL);
59625990
}

0 commit comments

Comments
 (0)