@@ -7112,39 +7112,29 @@ void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
71127112static void bfq_depth_updated (struct request_queue * q )
71137113{
71147114 struct bfq_data * bfqd = q -> elevator -> elevator_data ;
7115- unsigned int nr_requests = q -> nr_requests ;
7115+ unsigned int async_depth = q -> async_depth ;
71167116
71177117 /*
7118- * In-word depths if no bfq_queue is being weight-raised:
7119- * leaving 25% of tags only for sync reads.
7118+ * By default:
7119+ * - sync reads are not limited
7120+ * If bfqq is not being weight-raised:
7121+ * - sync writes are limited to 75%(async depth default value)
7122+ * - async IO are limited to 50%
7123+ * If bfqq is being weight-raised:
7124+ * - sync writes are limited to ~37%
7125+ * - async IO are limited to ~18
71207126 *
7121- * In next formulas, right-shift the value
7122- * (1U<<bt->sb.shift), instead of computing directly
7123- * (1U<<(bt->sb.shift - something)), to be robust against
7124- * any possible value of bt->sb.shift, without having to
7125- * limit 'something'.
7127+ * If request_queue->async_depth is updated by user, all limit are
7128+ * updated relatively.
71267129 */
7127- /* no more than 50% of tags for async I/O */
7128- bfqd -> async_depths [0 ][0 ] = max (nr_requests >> 1 , 1U );
7129- /*
7130- * no more than 75% of tags for sync writes (25% extra tags
7131- * w.r.t. async I/O, to prevent async I/O from starving sync
7132- * writes)
7133- */
7134- bfqd -> async_depths [0 ][1 ] = max ((nr_requests * 3 ) >> 2 , 1U );
7130+ bfqd -> async_depths [0 ][1 ] = async_depth ;
7131+ bfqd -> async_depths [0 ][0 ] = max (async_depth * 2 / 3 , 1U );
7132+ bfqd -> async_depths [1 ][1 ] = max (async_depth >> 1 , 1U );
7133+ bfqd -> async_depths [1 ][0 ] = max (async_depth >> 2 , 1U );
71357134
71367135 /*
7137- * In-word depths in case some bfq_queue is being weight-
7138- * raised: leaving ~63% of tags for sync reads. This is the
7139- * highest percentage for which, in our tests, application
7140- * start-up times didn't suffer from any regression due to tag
7141- * shortage.
7136+ * Due to cgroup qos, the allowed request for bfqq might be 1
71427137 */
7143- /* no more than ~18% of tags for async I/O */
7144- bfqd -> async_depths [1 ][0 ] = max ((nr_requests * 3 ) >> 4 , 1U );
7145- /* no more than ~37% of tags for sync writes (~20% extra tags) */
7146- bfqd -> async_depths [1 ][1 ] = max ((nr_requests * 6 ) >> 4 , 1U );
7147-
71487138 blk_mq_set_min_shallow_depth (q , 1 );
71497139}
71507140
@@ -7365,6 +7355,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_queue *eq)
73657355 blk_queue_flag_set (QUEUE_FLAG_DISABLE_WBT_DEF , q );
73667356 wbt_disable_default (q -> disk );
73677357 blk_stat_enable_accounting (q );
7358+ q -> async_depth = (q -> nr_requests * 3 ) >> 2 ;
73687359
73697360 return 0 ;
73707361
0 commit comments