Skip to content

Commit 988bb1b

Browse files
hailan94axboe
authored andcommitted
mq-deadline: covert to use request_queue->async_depth
In downstream kernel, we test with mq-deadline with many fio workloads, and we found a performance regression after commit 39823b4 ("block/mq-deadline: Fix the tag reservation code") with following test: [global] rw=randread direct=1 ramp_time=1 ioengine=libaio iodepth=1024 numjobs=24 bs=1024k group_reporting=1 runtime=60 [job1] filename=/dev/sda Root cause is that mq-deadline now support configuring async_depth, although the default value is nr_request, however the minimal value is 1, hence min_shallow_depth is set to 1, causing wake_batch to be 1. For consequence, sbitmap_queue will be waken up after each IO instead of 8 IO. In this test case, sda is HDD and max_sectors is 128k, hence each submitted 1M io will be splited into 8 sequential 128k requests, however due to there are 24 jobs and total tags are exhausted, the 8 requests are unlikely to be dispatched sequentially, and changing wake_batch to 1 will make this much worse, accounting blktrace D stage, the percentage of sequential io is decreased from 8% to 0.8%. Fix this problem by converting to request_queue->async_depth, where min_shallow_depth is set each time async_depth is updated. Noted elevator attribute async_depth is now removed, queue attribute with the same name is used instead. Fixes: 39823b4 ("block/mq-deadline: Fix the tag reservation code") Signed-off-by: Yu Kuai <yukuai@fnnas.com> Reviewed-by: Nilay Shroff <nilay@linux.ibm.com> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 8cbe62f commit 988bb1b

1 file changed

Lines changed: 5 additions & 34 deletions

File tree

block/mq-deadline.c

Lines changed: 5 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,6 @@ struct deadline_data {
9898
int fifo_batch;
9999
int writes_starved;
100100
int front_merges;
101-
u32 async_depth;
102101
int prio_aging_expire;
103102

104103
spinlock_t lock;
@@ -486,32 +485,16 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
486485
return rq;
487486
}
488487

489-
/*
490-
* Called by __blk_mq_alloc_request(). The shallow_depth value set by this
491-
* function is used by __blk_mq_get_tag().
492-
*/
493488
static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
494489
{
495-
struct deadline_data *dd = data->q->elevator->elevator_data;
496-
497-
/* Do not throttle synchronous reads. */
498-
if (blk_mq_is_sync_read(opf))
499-
return;
500-
501-
/*
502-
* Throttle asynchronous requests and writes such that these requests
503-
* do not block the allocation of synchronous requests.
504-
*/
505-
data->shallow_depth = dd->async_depth;
490+
if (!blk_mq_is_sync_read(opf))
491+
data->shallow_depth = data->q->async_depth;
506492
}
507493

508-
/* Called by blk_mq_update_nr_requests(). */
494+
/* Called by blk_mq_init_sched() and blk_mq_update_nr_requests(). */
509495
static void dd_depth_updated(struct request_queue *q)
510496
{
511-
struct deadline_data *dd = q->elevator->elevator_data;
512-
513-
dd->async_depth = q->nr_requests;
514-
blk_mq_set_min_shallow_depth(q, 1);
497+
blk_mq_set_min_shallow_depth(q, q->async_depth);
515498
}
516499

517500
static void dd_exit_sched(struct elevator_queue *e)
@@ -576,6 +559,7 @@ static int dd_init_sched(struct request_queue *q, struct elevator_queue *eq)
576559
blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
577560

578561
q->elevator = eq;
562+
q->async_depth = q->nr_requests;
579563
dd_depth_updated(q);
580564
return 0;
581565
}
@@ -763,7 +747,6 @@ SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
763747
SHOW_JIFFIES(deadline_prio_aging_expire_show, dd->prio_aging_expire);
764748
SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
765749
SHOW_INT(deadline_front_merges_show, dd->front_merges);
766-
SHOW_INT(deadline_async_depth_show, dd->async_depth);
767750
SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
768751
#undef SHOW_INT
769752
#undef SHOW_JIFFIES
@@ -793,7 +776,6 @@ STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MA
793776
STORE_JIFFIES(deadline_prio_aging_expire_store, &dd->prio_aging_expire, 0, INT_MAX);
794777
STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
795778
STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
796-
STORE_INT(deadline_async_depth_store, &dd->async_depth, 1, INT_MAX);
797779
STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
798780
#undef STORE_FUNCTION
799781
#undef STORE_INT
@@ -807,7 +789,6 @@ static const struct elv_fs_entry deadline_attrs[] = {
807789
DD_ATTR(write_expire),
808790
DD_ATTR(writes_starved),
809791
DD_ATTR(front_merges),
810-
DD_ATTR(async_depth),
811792
DD_ATTR(fifo_batch),
812793
DD_ATTR(prio_aging_expire),
813794
__ATTR_NULL
@@ -894,15 +875,6 @@ static int deadline_starved_show(void *data, struct seq_file *m)
894875
return 0;
895876
}
896877

897-
static int dd_async_depth_show(void *data, struct seq_file *m)
898-
{
899-
struct request_queue *q = data;
900-
struct deadline_data *dd = q->elevator->elevator_data;
901-
902-
seq_printf(m, "%u\n", dd->async_depth);
903-
return 0;
904-
}
905-
906878
static int dd_queued_show(void *data, struct seq_file *m)
907879
{
908880
struct request_queue *q = data;
@@ -1002,7 +974,6 @@ static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
1002974
DEADLINE_NEXT_RQ_ATTR(write2),
1003975
{"batching", 0400, deadline_batching_show},
1004976
{"starved", 0400, deadline_starved_show},
1005-
{"async_depth", 0400, dd_async_depth_show},
1006977
{"dispatch", 0400, .seq_ops = &deadline_dispatch_seq_ops},
1007978
{"owned_by_driver", 0400, dd_owned_by_driver_show},
1008979
{"queued", 0400, dd_queued_show},

0 commit comments

Comments
 (0)