@@ -93,7 +93,7 @@ struct rq_wb {
9393 struct rq_depth rq_depth ;
9494};
9595
96- static int wbt_init (struct gendisk * disk );
96+ static int wbt_init (struct gendisk * disk , struct rq_wb * rwb );
9797
9898static inline struct rq_wb * RQWB (struct rq_qos * rqos )
9999{
@@ -698,6 +698,41 @@ static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
698698 }
699699}
700700
701+ static int wbt_data_dir (const struct request * rq )
702+ {
703+ const enum req_op op = req_op (rq );
704+
705+ if (op == REQ_OP_READ )
706+ return READ ;
707+ else if (op_is_write (op ))
708+ return WRITE ;
709+
710+ /* don't account */
711+ return -1 ;
712+ }
713+
714+ static struct rq_wb * wbt_alloc (void )
715+ {
716+ struct rq_wb * rwb = kzalloc (sizeof (* rwb ), GFP_KERNEL );
717+
718+ if (!rwb )
719+ return NULL ;
720+
721+ rwb -> cb = blk_stat_alloc_callback (wb_timer_fn , wbt_data_dir , 2 , rwb );
722+ if (!rwb -> cb ) {
723+ kfree (rwb );
724+ return NULL ;
725+ }
726+
727+ return rwb ;
728+ }
729+
730+ static void wbt_free (struct rq_wb * rwb )
731+ {
732+ blk_stat_free_callback (rwb -> cb );
733+ kfree (rwb );
734+ }
735+
701736/*
702737 * Enable wbt if defaults are configured that way
703738 */
@@ -739,8 +774,17 @@ EXPORT_SYMBOL_GPL(wbt_enable_default);
739774
740775void wbt_init_enable_default (struct gendisk * disk )
741776{
742- if (__wbt_enable_default (disk ))
743- WARN_ON_ONCE (wbt_init (disk ));
777+ struct rq_wb * rwb ;
778+
779+ if (!__wbt_enable_default (disk ))
780+ return ;
781+
782+ rwb = wbt_alloc ();
783+ if (WARN_ON_ONCE (!rwb ))
784+ return ;
785+
786+ if (WARN_ON_ONCE (wbt_init (disk , rwb )))
787+ wbt_free (rwb );
744788}
745789
746790static u64 wbt_default_latency_nsec (struct request_queue * q )
@@ -754,19 +798,6 @@ static u64 wbt_default_latency_nsec(struct request_queue *q)
754798 return 2000000ULL ;
755799}
756800
757- static int wbt_data_dir (const struct request * rq )
758- {
759- const enum req_op op = req_op (rq );
760-
761- if (op == REQ_OP_READ )
762- return READ ;
763- else if (op_is_write (op ))
764- return WRITE ;
765-
766- /* don't account */
767- return -1 ;
768- }
769-
770801static void wbt_queue_depth_changed (struct rq_qos * rqos )
771802{
772803 RQWB (rqos )-> rq_depth .queue_depth = blk_queue_depth (rqos -> disk -> queue );
@@ -778,8 +809,7 @@ static void wbt_exit(struct rq_qos *rqos)
778809 struct rq_wb * rwb = RQWB (rqos );
779810
780811 blk_stat_remove_callback (rqos -> disk -> queue , rwb -> cb );
781- blk_stat_free_callback (rwb -> cb );
782- kfree (rwb );
812+ wbt_free (rwb );
783813}
784814
785815/*
@@ -903,22 +933,11 @@ static const struct rq_qos_ops wbt_rqos_ops = {
903933#endif
904934};
905935
906- static int wbt_init (struct gendisk * disk )
936+ static int wbt_init (struct gendisk * disk , struct rq_wb * rwb )
907937{
908938 struct request_queue * q = disk -> queue ;
909- struct rq_wb * rwb ;
910- int i ;
911939 int ret ;
912-
913- rwb = kzalloc (sizeof (* rwb ), GFP_KERNEL );
914- if (!rwb )
915- return - ENOMEM ;
916-
917- rwb -> cb = blk_stat_alloc_callback (wb_timer_fn , wbt_data_dir , 2 , rwb );
918- if (!rwb -> cb ) {
919- kfree (rwb );
920- return - ENOMEM ;
921- }
940+ int i ;
922941
923942 for (i = 0 ; i < WBT_NUM_RWQ ; i ++ )
924943 rq_wait_init (& rwb -> rq_wait [i ]);
@@ -938,38 +957,38 @@ static int wbt_init(struct gendisk *disk)
938957 ret = rq_qos_add (& rwb -> rqos , disk , RQ_QOS_WBT , & wbt_rqos_ops );
939958 mutex_unlock (& q -> rq_qos_mutex );
940959 if (ret )
941- goto err_free ;
960+ return ret ;
942961
943962 blk_stat_add_callback (q , rwb -> cb );
944-
945963 return 0 ;
946-
947- err_free :
948- blk_stat_free_callback (rwb -> cb );
949- kfree (rwb );
950- return ret ;
951-
952964}
953965
954966int wbt_set_lat (struct gendisk * disk , s64 val )
955967{
956968 struct request_queue * q = disk -> queue ;
969+ struct rq_qos * rqos = wbt_rq_qos (q );
970+ struct rq_wb * rwb = NULL ;
957971 unsigned int memflags ;
958- struct rq_qos * rqos ;
959972 int ret = 0 ;
960973
974+ if (!rqos ) {
975+ rwb = wbt_alloc ();
976+ if (!rwb )
977+ return - ENOMEM ;
978+ }
979+
961980 /*
962981 * Ensure that the queue is idled, in case the latency update
963982 * ends up either enabling or disabling wbt completely. We can't
964983 * have IO inflight if that happens.
965984 */
966985 memflags = blk_mq_freeze_queue (q );
967-
968- rqos = wbt_rq_qos (q );
969986 if (!rqos ) {
970- ret = wbt_init (disk );
971- if (ret )
987+ ret = wbt_init (disk , rwb );
988+ if (ret ) {
989+ wbt_free (rwb );
972990 goto out ;
991+ }
973992 }
974993
975994 if (val == -1 )
@@ -989,6 +1008,5 @@ int wbt_set_lat(struct gendisk *disk, s64 val)
9891008 blk_mq_unquiesce_queue (q );
9901009out :
9911010 blk_mq_unfreeze_queue (q , memflags );
992-
9931011 return ret ;
9941012}
0 commit comments