Skip to content

Commit 1742d97

Browse files
Christoph Hellwigtehcaster
authored andcommitted
mempool: factor out a mempool_alloc_from_pool helper
Add a helper for the mempool_alloc slowpath to better separate it from the fast path, and also use it to implement mempool_alloc_preallocated which shares the same logic. [hughd@google.com: fix lack of retrying with __GFP_DIRECT_RECLAIM] [vbabka@suse.cz: really use limited flags for first mempool attempt] Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://patch.msgid.link/20251113084022.1255121-7-hch@lst.de Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
1 parent 3d24924 commit 1742d97

1 file changed

Lines changed: 62 additions & 64 deletions

File tree

mm/mempool.c

Lines changed: 62 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -380,6 +380,50 @@ int mempool_resize(mempool_t *pool, int new_min_nr)
380380
}
381381
EXPORT_SYMBOL(mempool_resize);
382382

383+
static void *mempool_alloc_from_pool(struct mempool *pool, gfp_t gfp_mask)
384+
{
385+
unsigned long flags;
386+
void *element;
387+
388+
spin_lock_irqsave(&pool->lock, flags);
389+
if (unlikely(!pool->curr_nr))
390+
goto fail;
391+
element = remove_element(pool);
392+
spin_unlock_irqrestore(&pool->lock, flags);
393+
394+
/* Paired with rmb in mempool_free(), read comment there. */
395+
smp_wmb();
396+
397+
/*
398+
* Update the allocation stack trace as this is more useful for
399+
* debugging.
400+
*/
401+
kmemleak_update_trace(element);
402+
return element;
403+
404+
fail:
405+
if (gfp_mask & __GFP_DIRECT_RECLAIM) {
406+
DEFINE_WAIT(wait);
407+
408+
prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
409+
spin_unlock_irqrestore(&pool->lock, flags);
410+
411+
/*
412+
* Wait for someone else to return an element to @pool.
413+
*
414+
* FIXME: this should be io_schedule(). The timeout is there as
415+
* a workaround for some DM problems in 2.6.18.
416+
*/
417+
io_schedule_timeout(5 * HZ);
418+
finish_wait(&pool->wait, &wait);
419+
} else {
420+
/* We must not sleep if __GFP_DIRECT_RECLAIM is not set. */
421+
spin_unlock_irqrestore(&pool->lock, flags);
422+
}
423+
424+
return NULL;
425+
}
426+
383427
/*
384428
* Adjust the gfp flags for mempool allocations, as we never want to dip into
385429
* the global emergency reserves or retry in the page allocator.
@@ -413,8 +457,6 @@ void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask)
413457
{
414458
gfp_t gfp_temp = mempool_adjust_gfp(&gfp_mask);
415459
void *element;
416-
unsigned long flags;
417-
wait_queue_entry_t wait;
418460

419461
VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
420462
might_alloc(gfp_mask);
@@ -428,53 +470,27 @@ void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask)
428470
element = pool->alloc(gfp_temp, pool->pool_data);
429471
}
430472

431-
if (likely(element))
432-
return element;
433-
434-
spin_lock_irqsave(&pool->lock, flags);
435-
if (likely(pool->curr_nr)) {
436-
element = remove_element(pool);
437-
spin_unlock_irqrestore(&pool->lock, flags);
438-
/* paired with rmb in mempool_free(), read comment there */
439-
smp_wmb();
473+
if (unlikely(!element)) {
440474
/*
441-
* Update the allocation stack trace as this is more useful
442-
* for debugging.
475+
* Try to allocate an element from the pool.
476+
*
477+
* The first pass won't have __GFP_DIRECT_RECLAIM and won't
478+
* sleep in mempool_alloc_from_pool. Retry the allocation
479+
* with all flags set in that case.
443480
*/
444-
kmemleak_update_trace(element);
445-
return element;
446-
}
447-
448-
/*
449-
* We use gfp mask w/o direct reclaim or IO for the first round. If
450-
* alloc failed with that and @pool was empty, retry immediately.
451-
*/
452-
if (gfp_temp != gfp_mask) {
453-
spin_unlock_irqrestore(&pool->lock, flags);
454-
gfp_temp = gfp_mask;
455-
goto repeat_alloc;
456-
}
457-
458-
/* We must not sleep if !__GFP_DIRECT_RECLAIM */
459-
if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
460-
spin_unlock_irqrestore(&pool->lock, flags);
461-
return NULL;
481+
element = mempool_alloc_from_pool(pool, gfp_temp);
482+
if (!element) {
483+
if (gfp_temp != gfp_mask) {
484+
gfp_temp = gfp_mask;
485+
goto repeat_alloc;
486+
}
487+
if (gfp_mask & __GFP_DIRECT_RECLAIM) {
488+
goto repeat_alloc;
489+
}
490+
}
462491
}
463492

464-
/* Let's wait for someone else to return an element to @pool */
465-
init_wait(&wait);
466-
prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
467-
468-
spin_unlock_irqrestore(&pool->lock, flags);
469-
470-
/*
471-
* FIXME: this should be io_schedule(). The timeout is there as a
472-
* workaround for some DM problems in 2.6.18.
473-
*/
474-
io_schedule_timeout(5*HZ);
475-
476-
finish_wait(&pool->wait, &wait);
477-
goto repeat_alloc;
493+
return element;
478494
}
479495
EXPORT_SYMBOL(mempool_alloc_noprof);
480496

@@ -492,25 +508,7 @@ EXPORT_SYMBOL(mempool_alloc_noprof);
492508
*/
493509
void *mempool_alloc_preallocated(mempool_t *pool)
494510
{
495-
void *element;
496-
unsigned long flags;
497-
498-
spin_lock_irqsave(&pool->lock, flags);
499-
if (likely(pool->curr_nr)) {
500-
element = remove_element(pool);
501-
spin_unlock_irqrestore(&pool->lock, flags);
502-
/* paired with rmb in mempool_free(), read comment there */
503-
smp_wmb();
504-
/*
505-
* Update the allocation stack trace as this is more useful
506-
* for debugging.
507-
*/
508-
kmemleak_update_trace(element);
509-
return element;
510-
}
511-
spin_unlock_irqrestore(&pool->lock, flags);
512-
513-
return NULL;
511+
return mempool_alloc_from_pool(pool, GFP_NOWAIT);
514512
}
515513
EXPORT_SYMBOL(mempool_alloc_preallocated);
516514

0 commit comments

Comments
 (0)