Skip to content

Commit a8ec08b

Browse files
committed
Merge branch 'slab/for-6.19/mempool_alloc_bulk' into slab/for-next
Merges series "mempool_alloc_bulk and various mempool improvements v3" from Christoph Hellwig. From the cover letter [1]: This series adds a bulk version of mempool_alloc that makes allocating multiple objects deadlock safe. The initial users is the blk-crypto-fallback code: https://lore.kernel.org/linux-block/20251031093517.1603379-1-hch@lst.de/ with which v1 was posted, but I also have a few other users in mind. Link: https://lore.kernel.org/all/20251113084022.1255121-1-hch@lst.de/ [1]
2 parents ed80cc7 + 4823329 commit a8ec08b

4 files changed

Lines changed: 295 additions & 195 deletions

File tree

include/linux/fault-inject.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,10 @@
88
struct dentry;
99
struct kmem_cache;
1010

11+
enum fault_flags {
12+
FAULT_NOWARN = 1 << 0,
13+
};
14+
1115
#ifdef CONFIG_FAULT_INJECTION
1216

1317
#include <linux/atomic.h>
@@ -36,10 +40,6 @@ struct fault_attr {
3640
struct dentry *dname;
3741
};
3842

39-
enum fault_flags {
40-
FAULT_NOWARN = 1 << 0,
41-
};
42-
4343
#define FAULT_ATTR_INITIALIZER { \
4444
.interval = 1, \
4545
.times = ATOMIC_INIT(1), \

include/linux/mempool.h

Lines changed: 25 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -27,48 +27,53 @@ typedef struct mempool {
2727
wait_queue_head_t wait;
2828
} mempool_t;
2929

30-
static inline bool mempool_initialized(mempool_t *pool)
30+
static inline bool mempool_initialized(struct mempool *pool)
3131
{
3232
return pool->elements != NULL;
3333
}
3434

35-
static inline bool mempool_is_saturated(mempool_t *pool)
35+
static inline bool mempool_is_saturated(struct mempool *pool)
3636
{
3737
return READ_ONCE(pool->curr_nr) >= pool->min_nr;
3838
}
3939

40-
void mempool_exit(mempool_t *pool);
41-
int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
42-
mempool_free_t *free_fn, void *pool_data,
43-
gfp_t gfp_mask, int node_id);
44-
45-
int mempool_init_noprof(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
46-
mempool_free_t *free_fn, void *pool_data);
40+
void mempool_exit(struct mempool *pool);
41+
int mempool_init_node(struct mempool *pool, int min_nr,
42+
mempool_alloc_t *alloc_fn, mempool_free_t *free_fn,
43+
void *pool_data, gfp_t gfp_mask, int node_id);
44+
int mempool_init_noprof(struct mempool *pool, int min_nr,
45+
mempool_alloc_t *alloc_fn, mempool_free_t *free_fn,
46+
void *pool_data);
4747
#define mempool_init(...) \
4848
alloc_hooks(mempool_init_noprof(__VA_ARGS__))
4949

50-
extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
51-
mempool_free_t *free_fn, void *pool_data);
52-
53-
extern mempool_t *mempool_create_node_noprof(int min_nr, mempool_alloc_t *alloc_fn,
54-
mempool_free_t *free_fn, void *pool_data,
55-
gfp_t gfp_mask, int nid);
50+
struct mempool *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
51+
mempool_free_t *free_fn, void *pool_data);
52+
struct mempool *mempool_create_node_noprof(int min_nr,
53+
mempool_alloc_t *alloc_fn, mempool_free_t *free_fn,
54+
void *pool_data, gfp_t gfp_mask, int nid);
5655
#define mempool_create_node(...) \
5756
alloc_hooks(mempool_create_node_noprof(__VA_ARGS__))
5857

5958
#define mempool_create(_min_nr, _alloc_fn, _free_fn, _pool_data) \
6059
mempool_create_node(_min_nr, _alloc_fn, _free_fn, _pool_data, \
6160
GFP_KERNEL, NUMA_NO_NODE)
6261

63-
extern int mempool_resize(mempool_t *pool, int new_min_nr);
64-
extern void mempool_destroy(mempool_t *pool);
62+
int mempool_resize(struct mempool *pool, int new_min_nr);
63+
void mempool_destroy(struct mempool *pool);
6564

66-
extern void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask) __malloc;
65+
void *mempool_alloc_noprof(struct mempool *pool, gfp_t gfp_mask) __malloc;
6766
#define mempool_alloc(...) \
6867
alloc_hooks(mempool_alloc_noprof(__VA_ARGS__))
68+
int mempool_alloc_bulk_noprof(struct mempool *pool, void **elem,
69+
unsigned int count, unsigned int allocated);
70+
#define mempool_alloc_bulk(...) \
71+
alloc_hooks(mempool_alloc_bulk_noprof(__VA_ARGS__))
6972

70-
extern void *mempool_alloc_preallocated(mempool_t *pool) __malloc;
71-
extern void mempool_free(void *element, mempool_t *pool);
73+
void *mempool_alloc_preallocated(struct mempool *pool) __malloc;
74+
void mempool_free(void *element, struct mempool *pool);
75+
unsigned int mempool_free_bulk(struct mempool *pool, void **elem,
76+
unsigned int count);
7277

7378
/*
7479
* A mempool_alloc_t and mempool_free_t that get the memory from
@@ -97,19 +102,6 @@ void mempool_kfree(void *element, void *pool_data);
97102
mempool_create((_min_nr), mempool_kmalloc, mempool_kfree, \
98103
(void *)(unsigned long)(_size))
99104

100-
void *mempool_kvmalloc(gfp_t gfp_mask, void *pool_data);
101-
void mempool_kvfree(void *element, void *pool_data);
102-
103-
static inline int mempool_init_kvmalloc_pool(mempool_t *pool, int min_nr, size_t size)
104-
{
105-
return mempool_init(pool, min_nr, mempool_kvmalloc, mempool_kvfree, (void *) size);
106-
}
107-
108-
static inline mempool_t *mempool_create_kvmalloc_pool(int min_nr, size_t size)
109-
{
110-
return mempool_create(min_nr, mempool_kvmalloc, mempool_kvfree, (void *) size);
111-
}
112-
113105
/*
114106
* A mempool_alloc_t and mempool_free_t for a simple page allocator that
115107
* allocates pages of the order specified by pool_data

0 commit comments

Comments
 (0)