4949#include "blk-mq.h"
5050#include "blk-mq-sched.h"
5151#include "blk-pm.h"
52- #include "blk-rq-qos.h"
5352
5453struct dentry * blk_debugfs_root ;
5554
@@ -337,23 +336,25 @@ void blk_put_queue(struct request_queue *q)
337336}
338337EXPORT_SYMBOL (blk_put_queue );
339338
340- void blk_set_queue_dying (struct request_queue * q )
339+ void blk_queue_start_drain (struct request_queue * q )
341340{
342- blk_queue_flag_set (QUEUE_FLAG_DYING , q );
343-
344341 /*
345342 * When queue DYING flag is set, we need to block new req
346343 * entering queue, so we call blk_freeze_queue_start() to
347344 * prevent I/O from crossing blk_queue_enter().
348345 */
349346 blk_freeze_queue_start (q );
350-
351347 if (queue_is_mq (q ))
352348 blk_mq_wake_waiters (q );
353-
354349 /* Make blk_queue_enter() reexamine the DYING flag. */
355350 wake_up_all (& q -> mq_freeze_wq );
356351}
352+
353+ void blk_set_queue_dying (struct request_queue * q )
354+ {
355+ blk_queue_flag_set (QUEUE_FLAG_DYING , q );
356+ blk_queue_start_drain (q );
357+ }
357358EXPORT_SYMBOL_GPL (blk_set_queue_dying );
358359
359360/**
@@ -385,13 +386,8 @@ void blk_cleanup_queue(struct request_queue *q)
385386 */
386387 blk_freeze_queue (q );
387388
388- rq_qos_exit (q );
389-
390389 blk_queue_flag_set (QUEUE_FLAG_DEAD , q );
391390
392- /* for synchronous bio-based driver finish in-flight integrity i/o */
393- blk_flush_integrity ();
394-
395391 blk_sync_queue (q );
396392 if (queue_is_mq (q ))
397393 blk_mq_exit_queue (q );
@@ -416,6 +412,30 @@ void blk_cleanup_queue(struct request_queue *q)
416412}
417413EXPORT_SYMBOL (blk_cleanup_queue );
418414
415+ static bool blk_try_enter_queue (struct request_queue * q , bool pm )
416+ {
417+ rcu_read_lock ();
418+ if (!percpu_ref_tryget_live (& q -> q_usage_counter ))
419+ goto fail ;
420+
421+ /*
422+ * The code that increments the pm_only counter must ensure that the
423+ * counter is globally visible before the queue is unfrozen.
424+ */
425+ if (blk_queue_pm_only (q ) &&
426+ (!pm || queue_rpm_status (q ) == RPM_SUSPENDED ))
427+ goto fail_put ;
428+
429+ rcu_read_unlock ();
430+ return true;
431+
432+ fail_put :
433+ percpu_ref_put (& q -> q_usage_counter );
434+ fail :
435+ rcu_read_unlock ();
436+ return false;
437+ }
438+
419439/**
420440 * blk_queue_enter() - try to increase q->q_usage_counter
421441 * @q: request queue pointer
@@ -425,64 +445,62 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
425445{
426446 const bool pm = flags & BLK_MQ_REQ_PM ;
427447
428- while (true) {
429- bool success = false;
430-
431- rcu_read_lock ();
432- if (percpu_ref_tryget_live (& q -> q_usage_counter )) {
433- /*
434- * The code that increments the pm_only counter is
435- * responsible for ensuring that that counter is
436- * globally visible before the queue is unfrozen.
437- */
438- if ((pm && queue_rpm_status (q ) != RPM_SUSPENDED ) ||
439- !blk_queue_pm_only (q )) {
440- success = true;
441- } else {
442- percpu_ref_put (& q -> q_usage_counter );
443- }
444- }
445- rcu_read_unlock ();
446-
447- if (success )
448- return 0 ;
449-
448+ while (!blk_try_enter_queue (q , pm )) {
450449 if (flags & BLK_MQ_REQ_NOWAIT )
451450 return - EBUSY ;
452451
453452 /*
454- * read pair of barrier in blk_freeze_queue_start(),
455- * we need to order reading __PERCPU_REF_DEAD flag of
456- * .q_usage_counter and reading .mq_freeze_depth or
457- * queue dying flag, otherwise the following wait may
458- * never return if the two reads are reordered.
453+ * read pair of barrier in blk_freeze_queue_start(), we need to
454+ * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
455+ * reading .mq_freeze_depth or queue dying flag, otherwise the
456+ * following wait may never return if the two reads are
457+ * reordered.
459458 */
460459 smp_rmb ();
461-
462460 wait_event (q -> mq_freeze_wq ,
463461 (!q -> mq_freeze_depth &&
464462 blk_pm_resume_queue (pm , q )) ||
465463 blk_queue_dying (q ));
466464 if (blk_queue_dying (q ))
467465 return - ENODEV ;
468466 }
467+
468+ return 0 ;
469469}
470470
471471static inline int bio_queue_enter (struct bio * bio )
472472{
473- struct request_queue * q = bio -> bi_bdev -> bd_disk -> queue ;
474- bool nowait = bio -> bi_opf & REQ_NOWAIT ;
475- int ret ;
473+ struct gendisk * disk = bio -> bi_bdev -> bd_disk ;
474+ struct request_queue * q = disk -> queue ;
476475
477- ret = blk_queue_enter (q , nowait ? BLK_MQ_REQ_NOWAIT : 0 );
478- if (unlikely (ret )) {
479- if (nowait && !blk_queue_dying (q ))
476+ while (!blk_try_enter_queue (q , false)) {
477+ if (bio -> bi_opf & REQ_NOWAIT ) {
478+ if (test_bit (GD_DEAD , & disk -> state ))
479+ goto dead ;
480480 bio_wouldblock_error (bio );
481- else
482- bio_io_error (bio );
481+ return - EBUSY ;
482+ }
483+
484+ /*
485+ * read pair of barrier in blk_freeze_queue_start(), we need to
486+ * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
487+ * reading .mq_freeze_depth or queue dying flag, otherwise the
488+ * following wait may never return if the two reads are
489+ * reordered.
490+ */
491+ smp_rmb ();
492+ wait_event (q -> mq_freeze_wq ,
493+ (!q -> mq_freeze_depth &&
494+ blk_pm_resume_queue (false, q )) ||
495+ test_bit (GD_DEAD , & disk -> state ));
496+ if (test_bit (GD_DEAD , & disk -> state ))
497+ goto dead ;
483498 }
484499
485- return ret ;
500+ return 0 ;
501+ dead :
502+ bio_io_error (bio );
503+ return - ENODEV ;
486504}
487505
488506void blk_queue_exit (struct request_queue * q )
@@ -899,11 +917,18 @@ static blk_qc_t __submit_bio(struct bio *bio)
899917 struct gendisk * disk = bio -> bi_bdev -> bd_disk ;
900918 blk_qc_t ret = BLK_QC_T_NONE ;
901919
902- if (blk_crypto_bio_prep (& bio )) {
903- if (!disk -> fops -> submit_bio )
904- return blk_mq_submit_bio (bio );
920+ if (unlikely (bio_queue_enter (bio ) != 0 ))
921+ return BLK_QC_T_NONE ;
922+
923+ if (!submit_bio_checks (bio ) || !blk_crypto_bio_prep (& bio ))
924+ goto queue_exit ;
925+ if (disk -> fops -> submit_bio ) {
905926 ret = disk -> fops -> submit_bio (bio );
927+ goto queue_exit ;
906928 }
929+ return blk_mq_submit_bio (bio );
930+
931+ queue_exit :
907932 blk_queue_exit (disk -> queue );
908933 return ret ;
909934}
@@ -941,9 +966,6 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio)
941966 struct request_queue * q = bio -> bi_bdev -> bd_disk -> queue ;
942967 struct bio_list lower , same ;
943968
944- if (unlikely (bio_queue_enter (bio ) != 0 ))
945- continue ;
946-
947969 /*
948970 * Create a fresh bio_list for all subordinate requests.
949971 */
@@ -979,23 +1001,12 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio)
9791001static blk_qc_t __submit_bio_noacct_mq (struct bio * bio )
9801002{
9811003 struct bio_list bio_list [2 ] = { };
982- blk_qc_t ret = BLK_QC_T_NONE ;
1004+ blk_qc_t ret ;
9831005
9841006 current -> bio_list = bio_list ;
9851007
9861008 do {
987- struct gendisk * disk = bio -> bi_bdev -> bd_disk ;
988-
989- if (unlikely (bio_queue_enter (bio ) != 0 ))
990- continue ;
991-
992- if (!blk_crypto_bio_prep (& bio )) {
993- blk_queue_exit (disk -> queue );
994- ret = BLK_QC_T_NONE ;
995- continue ;
996- }
997-
998- ret = blk_mq_submit_bio (bio );
1009+ ret = __submit_bio (bio );
9991010 } while ((bio = bio_list_pop (& bio_list [0 ])));
10001011
10011012 current -> bio_list = NULL ;
@@ -1013,9 +1024,6 @@ static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
10131024 */
10141025blk_qc_t submit_bio_noacct (struct bio * bio )
10151026{
1016- if (!submit_bio_checks (bio ))
1017- return BLK_QC_T_NONE ;
1018-
10191027 /*
10201028 * We only want one ->submit_bio to be active at a time, else stack
10211029 * usage with stacked devices could be a problem. Use current->bio_list
0 commit comments