Skip to content

Commit b3a4dbc

Browse files
krismanaxboe
authored andcommitted
io_uring/kbuf: Use slab for struct io_buffer objects
The allocation of struct io_buffer for metadata of provided buffers is done through a custom allocator that directly gets pages and fragments them. But, slab would do just fine, as this is not a hot path (in fact, it is a deprecated feature) and, by keeping a custom allocator implementation we lose benefits like tracking, poisoning, sanitizers. Finally, the custom code is more complex and requires keeping the list of pages in struct ctx for no good reason. This patch cleans this path up and just uses slab. I microbenchmarked it by forcing the allocation of a large number of objects with the least number of io_uring commands possible (keeping nbufs=USHRT_MAX), with and without the patch. There is a slight increase in time spent in the allocation with slab, of course, but even when allocating to system resources exhaustion, which is not very realistic and happened around 1/2 billion provided buffers for me, it wasn't a significant hit in system time. Specially if we think of a real-world scenario, an application doing register/unregister of provided buffers will hit ctx->io_buffers_cache more often than actually going to slab. Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de> Link: https://lore.kernel.org/r/20231005000531.30800-4-krisman@suse.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent f74c746 commit b3a4dbc

4 files changed

Lines changed: 30 additions & 24 deletions

File tree

include/linux/io_uring_types.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -350,8 +350,6 @@ struct io_ring_ctx {
350350
struct wait_queue_head rsrc_quiesce_wq;
351351
unsigned rsrc_quiesce;
352352

353-
struct list_head io_buffers_pages;
354-
355353
#if defined(CONFIG_UNIX)
356354
struct socket *ring_sock;
357355
#endif

io_uring/io_uring.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -339,7 +339,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
339339
spin_lock_init(&ctx->completion_lock);
340340
spin_lock_init(&ctx->timeout_lock);
341341
INIT_WQ_LIST(&ctx->iopoll_list);
342-
INIT_LIST_HEAD(&ctx->io_buffers_pages);
343342
INIT_LIST_HEAD(&ctx->io_buffers_comp);
344343
INIT_LIST_HEAD(&ctx->defer_list);
345344
INIT_LIST_HEAD(&ctx->timeout_list);
@@ -4720,6 +4719,9 @@ static int __init io_uring_init(void)
47204719
SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU,
47214720
offsetof(struct io_kiocb, cmd.data),
47224721
sizeof_field(struct io_kiocb, cmd.data), NULL);
4722+
io_buf_cachep = kmem_cache_create("io_buffer", sizeof(struct io_buffer), 0,
4723+
SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT,
4724+
NULL);
47234725

47244726
#ifdef CONFIG_SYSCTL
47254727
register_sysctl_init("kernel", kernel_io_uring_disabled_table);

io_uring/io_uring.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -330,6 +330,7 @@ static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
330330
}
331331

332332
extern struct kmem_cache *req_cachep;
333+
extern struct kmem_cache *io_buf_cachep;
333334

334335
static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx)
335336
{

io_uring/kbuf.c

Lines changed: 26 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,8 @@
2222
/* BIDs are addressed by a 16-bit field in a CQE */
2323
#define MAX_BIDS_PER_BGID (1 << 16)
2424

25+
struct kmem_cache *io_buf_cachep;
26+
2527
struct io_provide_buf {
2628
struct file *file;
2729
__u64 addr;
@@ -258,6 +260,8 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx,
258260
void io_destroy_buffers(struct io_ring_ctx *ctx)
259261
{
260262
struct io_buffer_list *bl;
263+
struct list_head *item, *tmp;
264+
struct io_buffer *buf;
261265
unsigned long index;
262266
int i;
263267

@@ -273,12 +277,9 @@ void io_destroy_buffers(struct io_ring_ctx *ctx)
273277
kfree(bl);
274278
}
275279

276-
while (!list_empty(&ctx->io_buffers_pages)) {
277-
struct page *page;
278-
279-
page = list_first_entry(&ctx->io_buffers_pages, struct page, lru);
280-
list_del_init(&page->lru);
281-
__free_page(page);
280+
list_for_each_safe(item, tmp, &ctx->io_buffers_cache) {
281+
buf = list_entry(item, struct io_buffer, list);
282+
kmem_cache_free(io_buf_cachep, buf);
282283
}
283284
}
284285

@@ -361,11 +362,12 @@ int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
361362
return 0;
362363
}
363364

365+
#define IO_BUFFER_ALLOC_BATCH 64
366+
364367
static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
365368
{
366-
struct io_buffer *buf;
367-
struct page *page;
368-
int bufs_in_page;
369+
struct io_buffer *bufs[IO_BUFFER_ALLOC_BATCH];
370+
int allocated;
369371

370372
/*
371373
* Completions that don't happen inline (eg not under uring_lock) will
@@ -385,22 +387,25 @@ static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
385387

386388
/*
387389
* No free buffers and no completion entries either. Allocate a new
388-
* page worth of buffer entries and add those to our freelist.
390+
* batch of buffer entries and add those to our freelist.
389391
*/
390-
page = alloc_page(GFP_KERNEL_ACCOUNT);
391-
if (!page)
392-
return -ENOMEM;
393392

394-
list_add(&page->lru, &ctx->io_buffers_pages);
395-
396-
buf = page_address(page);
397-
bufs_in_page = PAGE_SIZE / sizeof(*buf);
398-
while (bufs_in_page) {
399-
list_add_tail(&buf->list, &ctx->io_buffers_cache);
400-
buf++;
401-
bufs_in_page--;
393+
allocated = kmem_cache_alloc_bulk(io_buf_cachep, GFP_KERNEL_ACCOUNT,
394+
ARRAY_SIZE(bufs), (void **) bufs);
395+
if (unlikely(!allocated)) {
396+
/*
397+
* Bulk alloc is all-or-nothing. If we fail to get a batch,
398+
* retry single alloc to be on the safe side.
399+
*/
400+
bufs[0] = kmem_cache_alloc(io_buf_cachep, GFP_KERNEL);
401+
if (!bufs[0])
402+
return -ENOMEM;
403+
allocated = 1;
402404
}
403405

406+
while (allocated)
407+
list_add_tail(&bufs[--allocated]->list, &ctx->io_buffers_cache);
408+
404409
return 0;
405410
}
406411

0 commit comments

Comments
 (0)