Skip to content

Commit b687034

Browse files
committed
Merge tag 'slab-for-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab
Pull slab updates from Vlastimil Babka: - mempool_alloc_bulk() support for upcoming users in the block layer that need to allocate multiple objects at once with the mempool's guaranteed progress semantics, which is not achievable with an allocation single objects in a loop. Along with refactoring and various improvements (Christoph Hellwig) - Preparations for the upcoming separation of struct slab from struct page, mostly by removing the struct folio layer, as the purpose of struct folio has shifted since it became used in slab code (Matthew Wilcox) - Modernisation of slab's boot param API usage, which removes some unexpected parsing corner cases (Petr Tesarik) - Refactoring of freelist_aba_t (now struct freelist_counters) and associated functions for double cmpxchg, enabled by -fms-extensions (Vlastimil Babka) - Cleanups and improvements related to sheaves caching layer, that were part of the full conversion to sheaves, which is planned for the next release (Vlastimil Babka) * tag 'slab-for-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab: (42 commits) slab: Remove unnecessary call to compound_head() in alloc_from_pcs() mempool: clarify behavior of mempool_alloc_preallocated() mempool: drop the file name in the top of file comment mempool: de-typedef mempool: remove mempool_{init,create}_kvmalloc_pool mempool: legitimize the io_schedule_timeout in mempool_alloc_from_pool mempool: add mempool_{alloc,free}_bulk mempool: factor out a mempool_alloc_from_pool helper slab: Remove references to folios from virt_to_slab() kasan: Remove references to folio in __kasan_mempool_poison_object() memcg: Convert mem_cgroup_from_obj_folio() to mem_cgroup_from_obj_slab() mempool: factor out a mempool_adjust_gfp helper mempool: add error injection support mempool: improve kerneldoc comments mm: improve kerneldoc comments for __alloc_pages_bulk fault-inject: make enum fault_flags available unconditionally usercopy: Remove folio references from check_heap_object() slab: Remove folio references from kfree_nolock() slab: Remove folio references from kfree_rcu_sheaf() slab: Remove folio references from build_detached_freelist() ...
2 parents f961638 + a8ec08b commit b687034

13 files changed

Lines changed: 758 additions & 679 deletions

File tree

include/linux/fault-inject.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,10 @@
88
struct dentry;
99
struct kmem_cache;
1010

11+
enum fault_flags {
12+
FAULT_NOWARN = 1 << 0,
13+
};
14+
1115
#ifdef CONFIG_FAULT_INJECTION
1216

1317
#include <linux/atomic.h>
@@ -36,10 +40,6 @@ struct fault_attr {
3640
struct dentry *dname;
3741
};
3842

39-
enum fault_flags {
40-
FAULT_NOWARN = 1 << 0,
41-
};
42-
4343
#define FAULT_ATTR_INITIALIZER { \
4444
.interval = 1, \
4545
.times = ATOMIC_INIT(1), \

include/linux/gfp_types.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -55,9 +55,7 @@ enum {
5555
#ifdef CONFIG_LOCKDEP
5656
___GFP_NOLOCKDEP_BIT,
5757
#endif
58-
#ifdef CONFIG_SLAB_OBJ_EXT
5958
___GFP_NO_OBJ_EXT_BIT,
60-
#endif
6159
___GFP_LAST_BIT
6260
};
6361

@@ -98,11 +96,7 @@ enum {
9896
#else
9997
#define ___GFP_NOLOCKDEP 0
10098
#endif
101-
#ifdef CONFIG_SLAB_OBJ_EXT
10299
#define ___GFP_NO_OBJ_EXT BIT(___GFP_NO_OBJ_EXT_BIT)
103-
#else
104-
#define ___GFP_NO_OBJ_EXT 0
105-
#endif
106100

107101
/*
108102
* Physical address zone modifiers (see linux/mmzone.h - low four bits)

include/linux/mempool.h

Lines changed: 25 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -27,48 +27,53 @@ typedef struct mempool {
2727
wait_queue_head_t wait;
2828
} mempool_t;
2929

30-
static inline bool mempool_initialized(mempool_t *pool)
30+
static inline bool mempool_initialized(struct mempool *pool)
3131
{
3232
return pool->elements != NULL;
3333
}
3434

35-
static inline bool mempool_is_saturated(mempool_t *pool)
35+
static inline bool mempool_is_saturated(struct mempool *pool)
3636
{
3737
return READ_ONCE(pool->curr_nr) >= pool->min_nr;
3838
}
3939

40-
void mempool_exit(mempool_t *pool);
41-
int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
42-
mempool_free_t *free_fn, void *pool_data,
43-
gfp_t gfp_mask, int node_id);
44-
45-
int mempool_init_noprof(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
46-
mempool_free_t *free_fn, void *pool_data);
40+
void mempool_exit(struct mempool *pool);
41+
int mempool_init_node(struct mempool *pool, int min_nr,
42+
mempool_alloc_t *alloc_fn, mempool_free_t *free_fn,
43+
void *pool_data, gfp_t gfp_mask, int node_id);
44+
int mempool_init_noprof(struct mempool *pool, int min_nr,
45+
mempool_alloc_t *alloc_fn, mempool_free_t *free_fn,
46+
void *pool_data);
4747
#define mempool_init(...) \
4848
alloc_hooks(mempool_init_noprof(__VA_ARGS__))
4949

50-
extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
51-
mempool_free_t *free_fn, void *pool_data);
52-
53-
extern mempool_t *mempool_create_node_noprof(int min_nr, mempool_alloc_t *alloc_fn,
54-
mempool_free_t *free_fn, void *pool_data,
55-
gfp_t gfp_mask, int nid);
50+
struct mempool *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
51+
mempool_free_t *free_fn, void *pool_data);
52+
struct mempool *mempool_create_node_noprof(int min_nr,
53+
mempool_alloc_t *alloc_fn, mempool_free_t *free_fn,
54+
void *pool_data, gfp_t gfp_mask, int nid);
5655
#define mempool_create_node(...) \
5756
alloc_hooks(mempool_create_node_noprof(__VA_ARGS__))
5857

5958
#define mempool_create(_min_nr, _alloc_fn, _free_fn, _pool_data) \
6059
mempool_create_node(_min_nr, _alloc_fn, _free_fn, _pool_data, \
6160
GFP_KERNEL, NUMA_NO_NODE)
6261

63-
extern int mempool_resize(mempool_t *pool, int new_min_nr);
64-
extern void mempool_destroy(mempool_t *pool);
62+
int mempool_resize(struct mempool *pool, int new_min_nr);
63+
void mempool_destroy(struct mempool *pool);
6564

66-
extern void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask) __malloc;
65+
void *mempool_alloc_noprof(struct mempool *pool, gfp_t gfp_mask) __malloc;
6766
#define mempool_alloc(...) \
6867
alloc_hooks(mempool_alloc_noprof(__VA_ARGS__))
68+
int mempool_alloc_bulk_noprof(struct mempool *pool, void **elem,
69+
unsigned int count, unsigned int allocated);
70+
#define mempool_alloc_bulk(...) \
71+
alloc_hooks(mempool_alloc_bulk_noprof(__VA_ARGS__))
6972

70-
extern void *mempool_alloc_preallocated(mempool_t *pool) __malloc;
71-
extern void mempool_free(void *element, mempool_t *pool);
73+
void *mempool_alloc_preallocated(struct mempool *pool) __malloc;
74+
void mempool_free(void *element, struct mempool *pool);
75+
unsigned int mempool_free_bulk(struct mempool *pool, void **elem,
76+
unsigned int count);
7277

7378
/*
7479
* A mempool_alloc_t and mempool_free_t that get the memory from
@@ -97,19 +102,6 @@ void mempool_kfree(void *element, void *pool_data);
97102
mempool_create((_min_nr), mempool_kmalloc, mempool_kfree, \
98103
(void *)(unsigned long)(_size))
99104

100-
void *mempool_kvmalloc(gfp_t gfp_mask, void *pool_data);
101-
void mempool_kvfree(void *element, void *pool_data);
102-
103-
static inline int mempool_init_kvmalloc_pool(mempool_t *pool, int min_nr, size_t size)
104-
{
105-
return mempool_init(pool, min_nr, mempool_kvmalloc, mempool_kvfree, (void *) size);
106-
}
107-
108-
static inline mempool_t *mempool_create_kvmalloc_pool(int min_nr, size_t size)
109-
{
110-
return mempool_create(min_nr, mempool_kvmalloc, mempool_kvfree, (void *) size);
111-
}
112-
113105
/*
114106
* A mempool_alloc_t and mempool_free_t for a simple page allocator that
115107
* allocates pages of the order specified by pool_data

include/linux/page-flags.h

Lines changed: 2 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1048,19 +1048,7 @@ PAGE_TYPE_OPS(Table, table, pgtable)
10481048
*/
10491049
PAGE_TYPE_OPS(Guard, guard, guard)
10501050

1051-
FOLIO_TYPE_OPS(slab, slab)
1052-
1053-
/**
1054-
* PageSlab - Determine if the page belongs to the slab allocator
1055-
* @page: The page to test.
1056-
*
1057-
* Context: Any context.
1058-
* Return: True for slab pages, false for any other kind of page.
1059-
*/
1060-
static inline bool PageSlab(const struct page *page)
1061-
{
1062-
return folio_test_slab(page_folio(page));
1063-
}
1051+
PAGE_TYPE_OPS(Slab, slab, slab)
10641052

10651053
#ifdef CONFIG_HUGETLB_PAGE
10661054
FOLIO_TYPE_OPS(hugetlb, hugetlb)
@@ -1076,7 +1064,7 @@ PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc)
10761064
* Serialized with zone lock.
10771065
*/
10781066
PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted)
1079-
FOLIO_TYPE_OPS(large_kmalloc, large_kmalloc)
1067+
PAGE_TYPE_OPS(LargeKmalloc, large_kmalloc, large_kmalloc)
10801068

10811069
/**
10821070
* PageHuge - Determine if the page belongs to hugetlbfs

mm/kasan/common.c

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -520,24 +520,20 @@ void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
520520

521521
bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
522522
{
523-
struct folio *folio = virt_to_folio(ptr);
523+
struct page *page = virt_to_page(ptr);
524524
struct slab *slab;
525525

526-
/*
527-
* This function can be called for large kmalloc allocation that get
528-
* their memory from page_alloc. Thus, the folio might not be a slab.
529-
*/
530-
if (unlikely(!folio_test_slab(folio))) {
526+
if (unlikely(PageLargeKmalloc(page))) {
531527
if (check_page_allocation(ptr, ip))
532528
return false;
533-
kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
529+
kasan_poison(ptr, page_size(page), KASAN_PAGE_FREE, false);
534530
return true;
535531
}
536532

537533
if (is_kfence_address(ptr))
538534
return true;
539535

540-
slab = folio_slab(folio);
536+
slab = page_slab(page);
541537

542538
if (check_slab_allocation(slab->slab_cache, ptr, ip))
543539
return false;

mm/kfence/core.c

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -612,14 +612,15 @@ static unsigned long kfence_init_pool(void)
612612
* enters __slab_free() slow-path.
613613
*/
614614
for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
615-
struct slab *slab;
615+
struct page *page;
616616

617617
if (!i || (i % 2))
618618
continue;
619619

620-
slab = page_slab(pfn_to_page(start_pfn + i));
621-
__folio_set_slab(slab_folio(slab));
620+
page = pfn_to_page(start_pfn + i);
621+
__SetPageSlab(page);
622622
#ifdef CONFIG_MEMCG
623+
struct slab *slab = page_slab(page);
623624
slab->obj_exts = (unsigned long)&kfence_metadata_init[i / 2 - 1].obj_exts |
624625
MEMCG_DATA_OBJEXTS;
625626
#endif
@@ -665,16 +666,17 @@ static unsigned long kfence_init_pool(void)
665666

666667
reset_slab:
667668
for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
668-
struct slab *slab;
669+
struct page *page;
669670

670671
if (!i || (i % 2))
671672
continue;
672673

673-
slab = page_slab(pfn_to_page(start_pfn + i));
674+
page = pfn_to_page(start_pfn + i);
674675
#ifdef CONFIG_MEMCG
676+
struct slab *slab = page_slab(page);
675677
slab->obj_exts = 0;
676678
#endif
677-
__folio_clear_slab(slab_folio(slab));
679+
__ClearPageSlab(page);
678680
}
679681

680682
return addr;

mm/memcontrol.c

Lines changed: 16 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -2557,38 +2557,25 @@ static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
25572557
}
25582558

25592559
static __always_inline
2560-
struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
2560+
struct mem_cgroup *mem_cgroup_from_obj_slab(struct slab *slab, void *p)
25612561
{
25622562
/*
25632563
* Slab objects are accounted individually, not per-page.
25642564
* Memcg membership data for each individual object is saved in
25652565
* slab->obj_exts.
25662566
*/
2567-
if (folio_test_slab(folio)) {
2568-
struct slabobj_ext *obj_exts;
2569-
struct slab *slab;
2570-
unsigned int off;
2571-
2572-
slab = folio_slab(folio);
2573-
obj_exts = slab_obj_exts(slab);
2574-
if (!obj_exts)
2575-
return NULL;
2576-
2577-
off = obj_to_index(slab->slab_cache, slab, p);
2578-
if (obj_exts[off].objcg)
2579-
return obj_cgroup_memcg(obj_exts[off].objcg);
2567+
struct slabobj_ext *obj_exts;
2568+
unsigned int off;
25802569

2570+
obj_exts = slab_obj_exts(slab);
2571+
if (!obj_exts)
25812572
return NULL;
2582-
}
25832573

2584-
/*
2585-
* folio_memcg_check() is used here, because in theory we can encounter
2586-
* a folio where the slab flag has been cleared already, but
2587-
* slab->obj_exts has not been freed yet
2588-
* folio_memcg_check() will guarantee that a proper memory
2589-
* cgroup pointer or NULL will be returned.
2590-
*/
2591-
return folio_memcg_check(folio);
2574+
off = obj_to_index(slab->slab_cache, slab, p);
2575+
if (obj_exts[off].objcg)
2576+
return obj_cgroup_memcg(obj_exts[off].objcg);
2577+
2578+
return NULL;
25922579
}
25932580

25942581
/*
@@ -2602,10 +2589,15 @@ struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
26022589
*/
26032590
struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
26042591
{
2592+
struct slab *slab;
2593+
26052594
if (mem_cgroup_disabled())
26062595
return NULL;
26072596

2608-
return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
2597+
slab = virt_to_slab(p);
2598+
if (slab)
2599+
return mem_cgroup_from_obj_slab(slab, p);
2600+
return folio_memcg_check(virt_to_folio(p));
26092601
}
26102602

26112603
static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)

0 commit comments

Comments
 (0)