@@ -47,19 +47,6 @@ queue_var_store(unsigned long *var, const char *page, size_t count)
4747 return count ;
4848}
4949
50- static ssize_t queue_var_store64 (s64 * var , const char * page )
51- {
52- int err ;
53- s64 v ;
54-
55- err = kstrtos64 (page , 10 , & v );
56- if (err < 0 )
57- return err ;
58-
59- * var = v ;
60- return 0 ;
61- }
62-
6350static ssize_t queue_requests_show (struct request_queue * q , char * page )
6451{
6552 return queue_var_show (q -> nr_requests , page );
@@ -451,61 +438,6 @@ static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
451438 return count ;
452439}
453440
454- static ssize_t queue_wb_lat_show (struct request_queue * q , char * page )
455- {
456- if (!wbt_rq_qos (q ))
457- return - EINVAL ;
458-
459- if (wbt_disabled (q ))
460- return sprintf (page , "0\n" );
461-
462- return sprintf (page , "%llu\n" , div_u64 (wbt_get_min_lat (q ), 1000 ));
463- }
464-
465- static ssize_t queue_wb_lat_store (struct request_queue * q , const char * page ,
466- size_t count )
467- {
468- struct rq_qos * rqos ;
469- ssize_t ret ;
470- s64 val ;
471-
472- ret = queue_var_store64 (& val , page );
473- if (ret < 0 )
474- return ret ;
475- if (val < -1 )
476- return - EINVAL ;
477-
478- rqos = wbt_rq_qos (q );
479- if (!rqos ) {
480- ret = wbt_init (q -> disk );
481- if (ret )
482- return ret ;
483- }
484-
485- if (val == -1 )
486- val = wbt_default_latency_nsec (q );
487- else if (val >= 0 )
488- val *= 1000ULL ;
489-
490- if (wbt_get_min_lat (q ) == val )
491- return count ;
492-
493- /*
494- * Ensure that the queue is idled, in case the latency update
495- * ends up either enabling or disabling wbt completely. We can't
496- * have IO inflight if that happens.
497- */
498- blk_mq_freeze_queue (q );
499- blk_mq_quiesce_queue (q );
500-
501- wbt_set_min_lat (q , val );
502-
503- blk_mq_unquiesce_queue (q );
504- blk_mq_unfreeze_queue (q );
505-
506- return count ;
507- }
508-
509441static ssize_t queue_wc_show (struct request_queue * q , char * page )
510442{
511443 if (test_bit (QUEUE_FLAG_WC , & q -> queue_flags ))
@@ -598,7 +530,6 @@ QUEUE_RW_ENTRY(queue_wc, "write_cache");
598530QUEUE_RO_ENTRY (queue_fua , "fua" );
599531QUEUE_RO_ENTRY (queue_dax , "dax" );
600532QUEUE_RW_ENTRY (queue_io_timeout , "io_timeout" );
601- QUEUE_RW_ENTRY (queue_wb_lat , "wbt_lat_usec" );
602533QUEUE_RO_ENTRY (queue_virt_boundary_mask , "virt_boundary_mask" );
603534QUEUE_RO_ENTRY (queue_dma_alignment , "dma_alignment" );
604535
@@ -617,16 +548,86 @@ QUEUE_RW_ENTRY(queue_iostats, "iostats");
617548QUEUE_RW_ENTRY (queue_random , "add_random" );
618549QUEUE_RW_ENTRY (queue_stable_writes , "stable_writes" );
619550
551+ #ifdef CONFIG_BLK_WBT
552+ static ssize_t queue_var_store64 (s64 * var , const char * page )
553+ {
554+ int err ;
555+ s64 v ;
556+
557+ err = kstrtos64 (page , 10 , & v );
558+ if (err < 0 )
559+ return err ;
560+
561+ * var = v ;
562+ return 0 ;
563+ }
564+
565+ static ssize_t queue_wb_lat_show (struct request_queue * q , char * page )
566+ {
567+ if (!wbt_rq_qos (q ))
568+ return - EINVAL ;
569+
570+ if (wbt_disabled (q ))
571+ return sprintf (page , "0\n" );
572+
573+ return sprintf (page , "%llu\n" , div_u64 (wbt_get_min_lat (q ), 1000 ));
574+ }
575+
576+ static ssize_t queue_wb_lat_store (struct request_queue * q , const char * page ,
577+ size_t count )
578+ {
579+ struct rq_qos * rqos ;
580+ ssize_t ret ;
581+ s64 val ;
582+
583+ ret = queue_var_store64 (& val , page );
584+ if (ret < 0 )
585+ return ret ;
586+ if (val < -1 )
587+ return - EINVAL ;
588+
589+ rqos = wbt_rq_qos (q );
590+ if (!rqos ) {
591+ ret = wbt_init (q -> disk );
592+ if (ret )
593+ return ret ;
594+ }
595+
596+ if (val == -1 )
597+ val = wbt_default_latency_nsec (q );
598+ else if (val >= 0 )
599+ val *= 1000ULL ;
600+
601+ if (wbt_get_min_lat (q ) == val )
602+ return count ;
603+
604+ /*
605+ * Ensure that the queue is idled, in case the latency update
606+ * ends up either enabling or disabling wbt completely. We can't
607+ * have IO inflight if that happens.
608+ */
609+ blk_mq_freeze_queue (q );
610+ blk_mq_quiesce_queue (q );
611+
612+ wbt_set_min_lat (q , val );
613+
614+ blk_mq_unquiesce_queue (q );
615+ blk_mq_unfreeze_queue (q );
616+
617+ return count ;
618+ }
619+
620+ QUEUE_RW_ENTRY (queue_wb_lat , "wbt_lat_usec" );
621+ #endif
622+
620623static struct attribute * queue_attrs [] = {
621- & queue_requests_entry .attr ,
622624 & queue_ra_entry .attr ,
623625 & queue_max_hw_sectors_entry .attr ,
624626 & queue_max_sectors_entry .attr ,
625627 & queue_max_segments_entry .attr ,
626628 & queue_max_discard_segments_entry .attr ,
627629 & queue_max_integrity_segments_entry .attr ,
628630 & queue_max_segment_size_entry .attr ,
629- & elv_iosched_entry .attr ,
630631 & queue_hw_sector_size_entry .attr ,
631632 & queue_logical_block_size_entry .attr ,
632633 & queue_physical_block_size_entry .attr ,
@@ -647,17 +648,14 @@ static struct attribute *queue_attrs[] = {
647648 & queue_max_open_zones_entry .attr ,
648649 & queue_max_active_zones_entry .attr ,
649650 & queue_nomerges_entry .attr ,
650- & queue_rq_affinity_entry .attr ,
651651 & queue_iostats_entry .attr ,
652652 & queue_stable_writes_entry .attr ,
653653 & queue_random_entry .attr ,
654654 & queue_poll_entry .attr ,
655655 & queue_wc_entry .attr ,
656656 & queue_fua_entry .attr ,
657657 & queue_dax_entry .attr ,
658- & queue_wb_lat_entry .attr ,
659658 & queue_poll_delay_entry .attr ,
660- & queue_io_timeout_entry .attr ,
661659#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
662660 & blk_throtl_sample_time_entry .attr ,
663661#endif
@@ -666,16 +664,23 @@ static struct attribute *queue_attrs[] = {
666664 NULL ,
667665};
668666
667+ static struct attribute * blk_mq_queue_attrs [] = {
668+ & queue_requests_entry .attr ,
669+ & elv_iosched_entry .attr ,
670+ & queue_rq_affinity_entry .attr ,
671+ & queue_io_timeout_entry .attr ,
672+ #ifdef CONFIG_BLK_WBT
673+ & queue_wb_lat_entry .attr ,
674+ #endif
675+ NULL ,
676+ };
677+
669678static umode_t queue_attr_visible (struct kobject * kobj , struct attribute * attr ,
670679 int n )
671680{
672681 struct gendisk * disk = container_of (kobj , struct gendisk , queue_kobj );
673682 struct request_queue * q = disk -> queue ;
674683
675- if (attr == & queue_io_timeout_entry .attr &&
676- (!q -> mq_ops || !q -> mq_ops -> timeout ))
677- return 0 ;
678-
679684 if ((attr == & queue_max_open_zones_entry .attr ||
680685 attr == & queue_max_active_zones_entry .attr ) &&
681686 !blk_queue_is_zoned (q ))
@@ -684,11 +689,30 @@ static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
684689 return attr -> mode ;
685690}
686691
692+ static umode_t blk_mq_queue_attr_visible (struct kobject * kobj ,
693+ struct attribute * attr , int n )
694+ {
695+ struct gendisk * disk = container_of (kobj , struct gendisk , queue_kobj );
696+ struct request_queue * q = disk -> queue ;
697+
698+ if (!queue_is_mq (q ))
699+ return 0 ;
700+
701+ if (attr == & queue_io_timeout_entry .attr && !q -> mq_ops -> timeout )
702+ return 0 ;
703+
704+ return attr -> mode ;
705+ }
706+
687707static struct attribute_group queue_attr_group = {
688708 .attrs = queue_attrs ,
689709 .is_visible = queue_attr_visible ,
690710};
691711
712+ static struct attribute_group blk_mq_queue_attr_group = {
713+ .attrs = blk_mq_queue_attrs ,
714+ .is_visible = blk_mq_queue_attr_visible ,
715+ };
692716
693717#define to_queue (atr ) container_of((atr), struct queue_sysfs_entry, attr)
694718
@@ -733,6 +757,7 @@ static const struct sysfs_ops queue_sysfs_ops = {
733757
734758static const struct attribute_group * blk_queue_attr_groups [] = {
735759 & queue_attr_group ,
760+ & blk_mq_queue_attr_group ,
736761 NULL
737762};
738763
0 commit comments