4747 * asynchronous requests, we reserve 25% of requests for synchronous
4848 * operations.
4949 */
50- KYBER_ASYNC_PERCENT = 75 ,
50+ KYBER_DEFAULT_ASYNC_PERCENT = 75 ,
5151};
52-
5352/*
5453 * Maximum device-wide depth for each scheduling domain.
5554 *
@@ -157,9 +156,6 @@ struct kyber_queue_data {
157156 */
158157 struct sbitmap_queue domain_tokens [KYBER_NUM_DOMAINS ];
159158
160- /* Number of allowed async requests. */
161- unsigned int async_depth ;
162-
163159 struct kyber_cpu_latency __percpu * cpu_latency ;
164160
165161 /* Timer for stats aggregation and adjusting domain tokens. */
@@ -401,10 +397,7 @@ static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
401397
402398static void kyber_depth_updated (struct request_queue * q )
403399{
404- struct kyber_queue_data * kqd = q -> elevator -> elevator_data ;
405-
406- kqd -> async_depth = q -> nr_requests * KYBER_ASYNC_PERCENT / 100U ;
407- blk_mq_set_min_shallow_depth (q , kqd -> async_depth );
400+ blk_mq_set_min_shallow_depth (q , q -> async_depth );
408401}
409402
410403static int kyber_init_sched (struct request_queue * q , struct elevator_queue * eq )
@@ -414,6 +407,7 @@ static int kyber_init_sched(struct request_queue *q, struct elevator_queue *eq)
414407 blk_queue_flag_clear (QUEUE_FLAG_SQ_SCHED , q );
415408
416409 q -> elevator = eq ;
410+ q -> async_depth = q -> nr_requests * KYBER_DEFAULT_ASYNC_PERCENT / 100 ;
417411 kyber_depth_updated (q );
418412
419413 return 0 ;
@@ -552,15 +546,8 @@ static void rq_clear_domain_token(struct kyber_queue_data *kqd,
552546
553547static void kyber_limit_depth (blk_opf_t opf , struct blk_mq_alloc_data * data )
554548{
555- /*
556- * We use the scheduler tags as per-hardware queue queueing tokens.
557- * Async requests can be limited at this stage.
558- */
559- if (!blk_mq_is_sync_read (opf )) {
560- struct kyber_queue_data * kqd = data -> q -> elevator -> elevator_data ;
561-
562- data -> shallow_depth = kqd -> async_depth ;
563- }
549+ if (!blk_mq_is_sync_read (opf ))
550+ data -> shallow_depth = data -> q -> async_depth ;
564551}
565552
566553static bool kyber_bio_merge (struct request_queue * q , struct bio * bio ,
@@ -956,15 +943,6 @@ KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_DISCARD, discard)
956943KYBER_DEBUGFS_DOMAIN_ATTRS (KYBER_OTHER , other )
957944#undef KYBER_DEBUGFS_DOMAIN_ATTRS
958945
959- static int kyber_async_depth_show (void * data , struct seq_file * m )
960- {
961- struct request_queue * q = data ;
962- struct kyber_queue_data * kqd = q -> elevator -> elevator_data ;
963-
964- seq_printf (m , "%u\n" , kqd -> async_depth );
965- return 0 ;
966- }
967-
968946static int kyber_cur_domain_show (void * data , struct seq_file * m )
969947{
970948 struct blk_mq_hw_ctx * hctx = data ;
@@ -990,7 +968,6 @@ static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs[] = {
990968 KYBER_QUEUE_DOMAIN_ATTRS (write ),
991969 KYBER_QUEUE_DOMAIN_ATTRS (discard ),
992970 KYBER_QUEUE_DOMAIN_ATTRS (other ),
993- {"async_depth" , 0400 , kyber_async_depth_show },
994971 {},
995972};
996973#undef KYBER_QUEUE_DOMAIN_ATTRS
0 commit comments