Skip to content

Commit bd23f6c

Browse files
Ming Leiaxboe
authored andcommitted
ublk: quiesce request queue when aborting queue
So far aborting queue ends request when the ubq daemon is exiting, and it can be run concurrently with ublk_queue_rq(), this way is fragile and we depend on the tricky usage of UBLK_IO_FLAG_ABORTED for avoiding such race. Quiesce queue when aborting queue, and the two code paths can be run completely exclusively, then it becomes easier to add new ublk feature, such as relaxing single same task limit for each queue. Signed-off-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20231009093324.957829-6-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 28dde8c commit bd23f6c

1 file changed

Lines changed: 50 additions & 9 deletions

File tree

drivers/block/ublk_drv.c

Lines changed: 50 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1441,25 +1441,59 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
14411441
}
14421442
}
14431443

1444-
static void ublk_daemon_monitor_work(struct work_struct *work)
1444+
static bool ublk_abort_requests(struct ublk_device *ub)
14451445
{
1446-
struct ublk_device *ub =
1447-
container_of(work, struct ublk_device, monitor_work.work);
1446+
struct gendisk *disk;
14481447
int i;
14491448

1449+
spin_lock(&ub->lock);
1450+
disk = ub->ub_disk;
1451+
if (disk)
1452+
get_device(disk_to_dev(disk));
1453+
spin_unlock(&ub->lock);
1454+
1455+
/* Our disk has been dead */
1456+
if (!disk)
1457+
return false;
1458+
1459+
/* Now we are serialized with ublk_queue_rq() */
1460+
blk_mq_quiesce_queue(disk->queue);
14501461
for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
14511462
struct ublk_queue *ubq = ublk_get_queue(ub, i);
14521463

14531464
if (ubq_daemon_is_dying(ubq)) {
1454-
if (ublk_queue_can_use_recovery(ubq))
1455-
schedule_work(&ub->quiesce_work);
1456-
else
1457-
schedule_work(&ub->stop_work);
1458-
14591465
/* abort queue is for making forward progress */
14601466
ublk_abort_queue(ub, ubq);
14611467
}
14621468
}
1469+
blk_mq_unquiesce_queue(disk->queue);
1470+
put_device(disk_to_dev(disk));
1471+
1472+
return true;
1473+
}
1474+
1475+
static void ublk_daemon_monitor_work(struct work_struct *work)
1476+
{
1477+
struct ublk_device *ub =
1478+
container_of(work, struct ublk_device, monitor_work.work);
1479+
int i;
1480+
1481+
for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
1482+
struct ublk_queue *ubq = ublk_get_queue(ub, i);
1483+
1484+
if (ubq_daemon_is_dying(ubq))
1485+
goto found;
1486+
}
1487+
return;
1488+
1489+
found:
1490+
if (!ublk_abort_requests(ub))
1491+
return;
1492+
1493+
if (ublk_can_use_recovery(ub))
1494+
schedule_work(&ub->quiesce_work);
1495+
else
1496+
schedule_work(&ub->stop_work);
14631497

14641498
/*
14651499
* We can't schedule monitor work after ub's state is not UBLK_S_DEV_LIVE.
@@ -1594,6 +1628,8 @@ static void ublk_unquiesce_dev(struct ublk_device *ub)
15941628

15951629
static void ublk_stop_dev(struct ublk_device *ub)
15961630
{
1631+
struct gendisk *disk;
1632+
15971633
mutex_lock(&ub->mutex);
15981634
if (ub->dev_info.state == UBLK_S_DEV_DEAD)
15991635
goto unlock;
@@ -1603,10 +1639,15 @@ static void ublk_stop_dev(struct ublk_device *ub)
16031639
ublk_unquiesce_dev(ub);
16041640
}
16051641
del_gendisk(ub->ub_disk);
1642+
1643+
/* Sync with ublk_abort_queue() by holding the lock */
1644+
spin_lock(&ub->lock);
1645+
disk = ub->ub_disk;
16061646
ub->dev_info.state = UBLK_S_DEV_DEAD;
16071647
ub->dev_info.ublksrv_pid = -1;
1608-
put_disk(ub->ub_disk);
16091648
ub->ub_disk = NULL;
1649+
spin_unlock(&ub->lock);
1650+
put_disk(disk);
16101651
unlock:
16111652
mutex_unlock(&ub->mutex);
16121653
ublk_cancel_dev(ub);

0 commit comments

Comments
 (0)