Skip to content

Commit cf02d7d

Browse files
hailan94axboe
authored andcommitted
blk-mq: factor out a helper blk_mq_limit_depth()
There are no functional changes, just make code cleaner. Signed-off-by: Yu Kuai <yukuai@fnnas.com> Reviewed-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 1db61b0 commit cf02d7d

1 file changed

Lines changed: 37 additions & 25 deletions

File tree

block/blk-mq.c

Lines changed: 37 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -498,6 +498,42 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data)
498498
return rq_list_pop(data->cached_rqs);
499499
}
500500

501+
static void blk_mq_limit_depth(struct blk_mq_alloc_data *data)
502+
{
503+
struct elevator_mq_ops *ops;
504+
505+
/* If no I/O scheduler has been configured, don't limit requests */
506+
if (!data->q->elevator) {
507+
blk_mq_tag_busy(data->hctx);
508+
return;
509+
}
510+
511+
/*
512+
* All requests use scheduler tags when an I/O scheduler is
513+
* enabled for the queue.
514+
*/
515+
data->rq_flags |= RQF_SCHED_TAGS;
516+
517+
/*
518+
* Flush/passthrough requests are special and go directly to the
519+
* dispatch list, they are not subject to the async_depth limit.
520+
*/
521+
if ((data->cmd_flags & REQ_OP_MASK) == REQ_OP_FLUSH ||
522+
blk_op_is_passthrough(data->cmd_flags))
523+
return;
524+
525+
WARN_ON_ONCE(data->flags & BLK_MQ_REQ_RESERVED);
526+
data->rq_flags |= RQF_USE_SCHED;
527+
528+
/*
529+
* By default, sync requests have no limit, and async requests are
530+
* limited to async_depth.
531+
*/
532+
ops = &data->q->elevator->type->ops;
533+
if (ops->limit_depth)
534+
ops->limit_depth(data->cmd_flags, data);
535+
}
536+
501537
static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
502538
{
503539
struct request_queue *q = data->q;
@@ -516,31 +552,7 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
516552
data->ctx = blk_mq_get_ctx(q);
517553
data->hctx = blk_mq_map_queue(data->cmd_flags, data->ctx);
518554

519-
if (q->elevator) {
520-
/*
521-
* All requests use scheduler tags when an I/O scheduler is
522-
* enabled for the queue.
523-
*/
524-
data->rq_flags |= RQF_SCHED_TAGS;
525-
526-
/*
527-
* Flush/passthrough requests are special and go directly to the
528-
* dispatch list.
529-
*/
530-
if ((data->cmd_flags & REQ_OP_MASK) != REQ_OP_FLUSH &&
531-
!blk_op_is_passthrough(data->cmd_flags)) {
532-
struct elevator_mq_ops *ops = &q->elevator->type->ops;
533-
534-
WARN_ON_ONCE(data->flags & BLK_MQ_REQ_RESERVED);
535-
536-
data->rq_flags |= RQF_USE_SCHED;
537-
if (ops->limit_depth)
538-
ops->limit_depth(data->cmd_flags, data);
539-
}
540-
} else {
541-
blk_mq_tag_busy(data->hctx);
542-
}
543-
555+
blk_mq_limit_depth(data);
544556
if (data->flags & BLK_MQ_REQ_RESERVED)
545557
data->rq_flags |= RQF_RESV;
546558

0 commit comments

Comments
 (0)