Skip to content

Commit 26fed4a

Browse files
committed
block: flush plug based on hardware and software queue order
We used to sort the plug list if we had multiple queues before dispatching requests to the IO scheduler. This usually isn't needed, but for certain workloads that interleave requests to disks, it's a less efficient to process the plug list one-by-one if everything is interleaved. Don't sort the list, but skip through it and flush out entries that have the same target at the same time. Fixes: df87eb0 ("block: get rid of plug list sorting") Reported-and-tested-by: Song Liu <song@kernel.org> Reviewed-by: Song Liu <songliubraving@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 5b20507 commit 26fed4a

1 file changed

Lines changed: 28 additions & 31 deletions

File tree

block/blk-mq.c

Lines changed: 28 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -2573,13 +2573,36 @@ static void __blk_mq_flush_plug_list(struct request_queue *q,
25732573
q->mq_ops->queue_rqs(&plug->mq_list);
25742574
}
25752575

2576+
static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
2577+
{
2578+
struct blk_mq_hw_ctx *this_hctx = NULL;
2579+
struct blk_mq_ctx *this_ctx = NULL;
2580+
struct request *requeue_list = NULL;
2581+
unsigned int depth = 0;
2582+
LIST_HEAD(list);
2583+
2584+
do {
2585+
struct request *rq = rq_list_pop(&plug->mq_list);
2586+
2587+
if (!this_hctx) {
2588+
this_hctx = rq->mq_hctx;
2589+
this_ctx = rq->mq_ctx;
2590+
} else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
2591+
rq_list_add(&requeue_list, rq);
2592+
continue;
2593+
}
2594+
list_add_tail(&rq->queuelist, &list);
2595+
depth++;
2596+
} while (!rq_list_empty(plug->mq_list));
2597+
2598+
plug->mq_list = requeue_list;
2599+
trace_block_unplug(this_hctx->queue, depth, !from_sched);
2600+
blk_mq_sched_insert_requests(this_hctx, this_ctx, &list, from_sched);
2601+
}
2602+
25762603
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
25772604
{
2578-
struct blk_mq_hw_ctx *this_hctx;
2579-
struct blk_mq_ctx *this_ctx;
25802605
struct request *rq;
2581-
unsigned int depth;
2582-
LIST_HEAD(list);
25832606

25842607
if (rq_list_empty(plug->mq_list))
25852608
return;
@@ -2615,35 +2638,9 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
26152638
return;
26162639
}
26172640

2618-
this_hctx = NULL;
2619-
this_ctx = NULL;
2620-
depth = 0;
26212641
do {
2622-
rq = rq_list_pop(&plug->mq_list);
2623-
2624-
if (!this_hctx) {
2625-
this_hctx = rq->mq_hctx;
2626-
this_ctx = rq->mq_ctx;
2627-
} else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
2628-
trace_block_unplug(this_hctx->queue, depth,
2629-
!from_schedule);
2630-
blk_mq_sched_insert_requests(this_hctx, this_ctx,
2631-
&list, from_schedule);
2632-
depth = 0;
2633-
this_hctx = rq->mq_hctx;
2634-
this_ctx = rq->mq_ctx;
2635-
2636-
}
2637-
2638-
list_add(&rq->queuelist, &list);
2639-
depth++;
2642+
blk_mq_dispatch_plug_list(plug, from_schedule);
26402643
} while (!rq_list_empty(plug->mq_list));
2641-
2642-
if (!list_empty(&list)) {
2643-
trace_block_unplug(this_hctx->queue, depth, !from_schedule);
2644-
blk_mq_sched_insert_requests(this_hctx, this_ctx, &list,
2645-
from_schedule);
2646-
}
26472644
}
26482645

26492646
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,

0 commit comments

Comments
 (0)