Skip to content

Commit b16af1c

Browse files
committed
slab: remove frozen slab checks from __slab_free()
Currently slabs are only frozen after consistency checks failed. This can happen only in caches with debugging enabled, and those use free_to_partial_list() for freeing. The non-debug operation of __slab_free() can thus stop considering the frozen field, and we can remove the FREE_FROZEN stat. Reviewed-by: Suren Baghdasaryan <surenb@google.com> Reviewed-by: Hao Li <hao.li@linux.dev> Reviewed-by: Harry Yoo <harry.yoo@oracle.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
1 parent 0f7075b commit b16af1c

1 file changed

Lines changed: 4 additions & 18 deletions

File tree

mm/slub.c

Lines changed: 4 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -338,7 +338,6 @@ enum stat_item {
338338
FREE_RCU_SHEAF_FAIL, /* Failed to free to a rcu_free sheaf */
339339
FREE_FASTPATH, /* Free to cpu slab */
340340
FREE_SLOWPATH, /* Freeing not to cpu slab */
341-
FREE_FROZEN, /* Freeing to frozen slab */
342341
FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
343342
FREE_REMOVE_PARTIAL, /* Freeing removes last object */
344343
ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
@@ -5109,7 +5108,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
51095108
unsigned long addr)
51105109

51115110
{
5112-
bool was_frozen, was_full;
5111+
bool was_full;
51135112
struct freelist_counters old, new;
51145113
struct kmem_cache_node *n = NULL;
51155114
unsigned long flags;
@@ -5132,7 +5131,6 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
51325131
old.counters = slab->counters;
51335132

51345133
was_full = (old.freelist == NULL);
5135-
was_frozen = old.frozen;
51365134

51375135
set_freepointer(s, tail, old.freelist);
51385136

@@ -5145,7 +5143,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
51455143
* to (due to not being full anymore) the partial list.
51465144
* Unless it's frozen.
51475145
*/
5148-
if ((!new.inuse || was_full) && !was_frozen) {
5146+
if (!new.inuse || was_full) {
51495147

51505148
n = get_node(s, slab_nid(slab));
51515149
/*
@@ -5164,20 +5162,10 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
51645162
} while (!slab_update_freelist(s, slab, &old, &new, "__slab_free"));
51655163

51665164
if (likely(!n)) {
5167-
5168-
if (likely(was_frozen)) {
5169-
/*
5170-
* The list lock was not taken therefore no list
5171-
* activity can be necessary.
5172-
*/
5173-
stat(s, FREE_FROZEN);
5174-
}
5175-
51765165
/*
5177-
* In other cases we didn't take the list_lock because the slab
5178-
* was already on the partial list and will remain there.
5166+
* We didn't take the list_lock because the slab was already on
5167+
* the partial list and will remain there.
51795168
*/
5180-
51815169
return;
51825170
}
51835171

@@ -8766,7 +8754,6 @@ STAT_ATTR(FREE_RCU_SHEAF, free_rcu_sheaf);
87668754
STAT_ATTR(FREE_RCU_SHEAF_FAIL, free_rcu_sheaf_fail);
87678755
STAT_ATTR(FREE_FASTPATH, free_fastpath);
87688756
STAT_ATTR(FREE_SLOWPATH, free_slowpath);
8769-
STAT_ATTR(FREE_FROZEN, free_frozen);
87708757
STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
87718758
STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
87728759
STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
@@ -8871,7 +8858,6 @@ static struct attribute *slab_attrs[] = {
88718858
&free_rcu_sheaf_fail_attr.attr,
88728859
&free_fastpath_attr.attr,
88738860
&free_slowpath_attr.attr,
8874-
&free_frozen_attr.attr,
88758861
&free_add_partial_attr.attr,
88768862
&free_remove_partial_attr.attr,
88778863
&alloc_from_partial_attr.attr,

0 commit comments

Comments
 (0)