Skip to content

Commit c258f5c

Browse files
Ming Leiaxboe
authored andcommitted
ublk: fix deadlock when reading partition table
When one process(such as udev) opens ublk block device (e.g., to read the partition table via bdev_open()), a deadlock[1] can occur: 1. bdev_open() grabs disk->open_mutex 2. The process issues read I/O to ublk backend to read partition table 3. In __ublk_complete_rq(), blk_update_request() or blk_mq_end_request() runs bio->bi_end_io() callbacks 4. If this triggers fput() on file descriptor of ublk block device, the work may be deferred to current task's task work (see fput() implementation) 5. This eventually calls blkdev_release() from the same context 6. blkdev_release() tries to grab disk->open_mutex again 7. Deadlock: same task waiting for a mutex it already holds The fix is to run blk_update_request() and blk_mq_end_request() with bottom halves disabled. This forces blkdev_release() to run in kernel work-queue context instead of current task work context, and allows ublk server to make forward progress, and avoids the deadlock. Fixes: 71f28f3 ("ublk_drv: add io_uring based userspace block driver") Link: ublk-org/ublksrv#170 [1] Signed-off-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Caleb Sander Mateos <csander@purestorage.com> [axboe: rewrite comment in ublk] Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent a58383f commit c258f5c

1 file changed

Lines changed: 28 additions & 4 deletions

File tree

drivers/block/ublk_drv.c

Lines changed: 28 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1080,12 +1080,20 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
10801080
return io_uring_cmd_to_pdu(ioucmd, struct ublk_uring_cmd_pdu);
10811081
}
10821082

1083+
static void ublk_end_request(struct request *req, blk_status_t error)
1084+
{
1085+
local_bh_disable();
1086+
blk_mq_end_request(req, error);
1087+
local_bh_enable();
1088+
}
1089+
10831090
/* todo: handle partial completion */
10841091
static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io,
10851092
bool need_map)
10861093
{
10871094
unsigned int unmapped_bytes;
10881095
blk_status_t res = BLK_STS_OK;
1096+
bool requeue;
10891097

10901098
/* failed read IO if nothing is read */
10911099
if (!io->res && req_op(req) == REQ_OP_READ)
@@ -1117,14 +1125,30 @@ static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io,
11171125
if (unlikely(unmapped_bytes < io->res))
11181126
io->res = unmapped_bytes;
11191127

1120-
if (blk_update_request(req, BLK_STS_OK, io->res))
1128+
/*
1129+
* Run bio->bi_end_io() with softirqs disabled. If the final fput
1130+
* happens off this path, then that will prevent ublk's blkdev_release()
1131+
* from being called on current's task work, see fput() implementation.
1132+
*
1133+
* Otherwise, ublk server may not provide forward progress in case of
1134+
* reading the partition table from bdev_open() with disk->open_mutex
1135+
* held, and causes dead lock as we could already be holding
1136+
* disk->open_mutex here.
1137+
*
1138+
* Preferably we would not be doing IO with a mutex held that is also
1139+
* used for release, but this work-around will suffice for now.
1140+
*/
1141+
local_bh_disable();
1142+
requeue = blk_update_request(req, BLK_STS_OK, io->res);
1143+
local_bh_enable();
1144+
if (requeue)
11211145
blk_mq_requeue_request(req, true);
11221146
else if (likely(!blk_should_fake_timeout(req->q)))
11231147
__blk_mq_end_request(req, BLK_STS_OK);
11241148

11251149
return;
11261150
exit:
1127-
blk_mq_end_request(req, res);
1151+
ublk_end_request(req, res);
11281152
}
11291153

11301154
static struct io_uring_cmd *__ublk_prep_compl_io_cmd(struct ublk_io *io,
@@ -1164,7 +1188,7 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
11641188
if (ublk_nosrv_dev_should_queue_io(ubq->dev))
11651189
blk_mq_requeue_request(rq, false);
11661190
else
1167-
blk_mq_end_request(rq, BLK_STS_IOERR);
1191+
ublk_end_request(rq, BLK_STS_IOERR);
11681192
}
11691193

11701194
static void
@@ -1209,7 +1233,7 @@ __ublk_do_auto_buf_reg(const struct ublk_queue *ubq, struct request *req,
12091233
ublk_auto_buf_reg_fallback(ubq, req->tag);
12101234
return AUTO_BUF_REG_FALLBACK;
12111235
}
1212-
blk_mq_end_request(req, BLK_STS_IOERR);
1236+
ublk_end_request(req, BLK_STS_IOERR);
12131237
return AUTO_BUF_REG_FAIL;
12141238
}
12151239

0 commit comments

Comments
 (0)