Skip to content

Commit cb2076b

Browse files
committed
Merge tag 'block-6.19-20260109' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux
Pull block fixes from Jens Axboe: - Kill unlikely checks for blk-rq-qos. These checks are really all-or-nothing, either the branch is taken all the time, or it's not. Depending on the configuration, either one of those cases may be true. Just remove the annotation - Fix for merging bios with different app tags set - Fix for a recently introduced slowdown due to RCU synchronization - Fix for a status change on loop while it's in use, and then a later fix for that fix - Fix for the async partition scanning in ublk * tag 'block-6.19-20260109' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux: ublk: fix use-after-free in ublk_partition_scan_work blk-mq: avoid stall during boot due to synchronize_rcu_expedited loop: add missing bd_abort_claiming in loop_set_status block: don't merge bios with different app_tags blk-rq-qos: Remove unlikely() hints from QoS checks loop: don't change loop device under exclusive opener in loop_set_status
2 parents 68ad209 + f0d385f commit cb2076b

5 files changed

Lines changed: 83 additions & 50 deletions

File tree

block/blk-integrity.c

Lines changed: 18 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -140,14 +140,21 @@ EXPORT_SYMBOL_GPL(blk_rq_integrity_map_user);
140140
bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
141141
struct request *next)
142142
{
143+
struct bio_integrity_payload *bip, *bip_next;
144+
143145
if (blk_integrity_rq(req) == 0 && blk_integrity_rq(next) == 0)
144146
return true;
145147

146148
if (blk_integrity_rq(req) == 0 || blk_integrity_rq(next) == 0)
147149
return false;
148150

149-
if (bio_integrity(req->bio)->bip_flags !=
150-
bio_integrity(next->bio)->bip_flags)
151+
bip = bio_integrity(req->bio);
152+
bip_next = bio_integrity(next->bio);
153+
if (bip->bip_flags != bip_next->bip_flags)
154+
return false;
155+
156+
if (bip->bip_flags & BIP_CHECK_APPTAG &&
157+
bip->app_tag != bip_next->app_tag)
151158
return false;
152159

153160
if (req->nr_integrity_segments + next->nr_integrity_segments >
@@ -163,15 +170,21 @@ bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
163170
bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
164171
struct bio *bio)
165172
{
173+
struct bio_integrity_payload *bip, *bip_bio = bio_integrity(bio);
166174
int nr_integrity_segs;
167175

168-
if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL)
176+
if (blk_integrity_rq(req) == 0 && bip_bio == NULL)
169177
return true;
170178

171-
if (blk_integrity_rq(req) == 0 || bio_integrity(bio) == NULL)
179+
if (blk_integrity_rq(req) == 0 || bip_bio == NULL)
180+
return false;
181+
182+
bip = bio_integrity(req->bio);
183+
if (bip->bip_flags != bip_bio->bip_flags)
172184
return false;
173185

174-
if (bio_integrity(req->bio)->bip_flags != bio_integrity(bio)->bip_flags)
186+
if (bip->bip_flags & BIP_CHECK_APPTAG &&
187+
bip->app_tag != bip_bio->app_tag)
175188
return false;
176189

177190
nr_integrity_segs = blk_rq_count_integrity_sg(q, bio);

block/blk-mq.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4553,8 +4553,7 @@ static void __blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
45534553
* Make sure reading the old queue_hw_ctx from other
45544554
* context concurrently won't trigger uaf.
45554555
*/
4556-
synchronize_rcu_expedited();
4557-
kfree(hctxs);
4556+
kfree_rcu_mightsleep(hctxs);
45584557
hctxs = new_hctxs;
45594558
}
45604559

block/blk-rq-qos.h

Lines changed: 9 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -112,29 +112,26 @@ void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
112112

113113
static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
114114
{
115-
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
116-
q->rq_qos)
115+
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
117116
__rq_qos_cleanup(q->rq_qos, bio);
118117
}
119118

120119
static inline void rq_qos_done(struct request_queue *q, struct request *rq)
121120
{
122-
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
123-
q->rq_qos && !blk_rq_is_passthrough(rq))
121+
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) &&
122+
q->rq_qos && !blk_rq_is_passthrough(rq))
124123
__rq_qos_done(q->rq_qos, rq);
125124
}
126125

127126
static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
128127
{
129-
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
130-
q->rq_qos)
128+
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
131129
__rq_qos_issue(q->rq_qos, rq);
132130
}
133131

134132
static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
135133
{
136-
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
137-
q->rq_qos)
134+
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
138135
__rq_qos_requeue(q->rq_qos, rq);
139136
}
140137

@@ -162,8 +159,7 @@ static inline void rq_qos_done_bio(struct bio *bio)
162159

163160
static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
164161
{
165-
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
166-
q->rq_qos) {
162+
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) {
167163
bio_set_flag(bio, BIO_QOS_THROTTLED);
168164
__rq_qos_throttle(q->rq_qos, bio);
169165
}
@@ -172,25 +168,22 @@ static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
172168
static inline void rq_qos_track(struct request_queue *q, struct request *rq,
173169
struct bio *bio)
174170
{
175-
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
176-
q->rq_qos)
171+
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
177172
__rq_qos_track(q->rq_qos, rq, bio);
178173
}
179174

180175
static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
181176
struct bio *bio)
182177
{
183-
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
184-
q->rq_qos) {
178+
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) {
185179
bio_set_flag(bio, BIO_QOS_MERGED);
186180
__rq_qos_merge(q->rq_qos, rq, bio);
187181
}
188182
}
189183

190184
static inline void rq_qos_queue_depth_changed(struct request_queue *q)
191185
{
192-
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
193-
q->rq_qos)
186+
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
194187
__rq_qos_queue_depth_changed(q->rq_qos);
195188
}
196189

drivers/block/loop.c

Lines changed: 33 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1225,16 +1225,28 @@ static int loop_clr_fd(struct loop_device *lo)
12251225
}
12261226

12271227
static int
1228-
loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1228+
loop_set_status(struct loop_device *lo, blk_mode_t mode,
1229+
struct block_device *bdev, const struct loop_info64 *info)
12291230
{
12301231
int err;
12311232
bool partscan = false;
12321233
bool size_changed = false;
12331234
unsigned int memflags;
12341235

1236+
/*
1237+
* If we don't hold exclusive handle for the device, upgrade to it
1238+
* here to avoid changing device under exclusive owner.
1239+
*/
1240+
if (!(mode & BLK_OPEN_EXCL)) {
1241+
err = bd_prepare_to_claim(bdev, loop_set_status, NULL);
1242+
if (err)
1243+
goto out_reread_partitions;
1244+
}
1245+
12351246
err = mutex_lock_killable(&lo->lo_mutex);
12361247
if (err)
1237-
return err;
1248+
goto out_abort_claiming;
1249+
12381250
if (lo->lo_state != Lo_bound) {
12391251
err = -ENXIO;
12401252
goto out_unlock;
@@ -1273,6 +1285,10 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
12731285
}
12741286
out_unlock:
12751287
mutex_unlock(&lo->lo_mutex);
1288+
out_abort_claiming:
1289+
if (!(mode & BLK_OPEN_EXCL))
1290+
bd_abort_claiming(bdev, loop_set_status);
1291+
out_reread_partitions:
12761292
if (partscan)
12771293
loop_reread_partitions(lo);
12781294

@@ -1352,25 +1368,29 @@ loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
13521368
}
13531369

13541370
static int
1355-
loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
1371+
loop_set_status_old(struct loop_device *lo, blk_mode_t mode,
1372+
struct block_device *bdev,
1373+
const struct loop_info __user *arg)
13561374
{
13571375
struct loop_info info;
13581376
struct loop_info64 info64;
13591377

13601378
if (copy_from_user(&info, arg, sizeof (struct loop_info)))
13611379
return -EFAULT;
13621380
loop_info64_from_old(&info, &info64);
1363-
return loop_set_status(lo, &info64);
1381+
return loop_set_status(lo, mode, bdev, &info64);
13641382
}
13651383

13661384
static int
1367-
loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
1385+
loop_set_status64(struct loop_device *lo, blk_mode_t mode,
1386+
struct block_device *bdev,
1387+
const struct loop_info64 __user *arg)
13681388
{
13691389
struct loop_info64 info64;
13701390

13711391
if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
13721392
return -EFAULT;
1373-
return loop_set_status(lo, &info64);
1393+
return loop_set_status(lo, mode, bdev, &info64);
13741394
}
13751395

13761396
static int
@@ -1549,14 +1569,14 @@ static int lo_ioctl(struct block_device *bdev, blk_mode_t mode,
15491569
case LOOP_SET_STATUS:
15501570
err = -EPERM;
15511571
if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN))
1552-
err = loop_set_status_old(lo, argp);
1572+
err = loop_set_status_old(lo, mode, bdev, argp);
15531573
break;
15541574
case LOOP_GET_STATUS:
15551575
return loop_get_status_old(lo, argp);
15561576
case LOOP_SET_STATUS64:
15571577
err = -EPERM;
15581578
if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN))
1559-
err = loop_set_status64(lo, argp);
1579+
err = loop_set_status64(lo, mode, bdev, argp);
15601580
break;
15611581
case LOOP_GET_STATUS64:
15621582
return loop_get_status64(lo, argp);
@@ -1650,16 +1670,17 @@ loop_info64_to_compat(const struct loop_info64 *info64,
16501670
}
16511671

16521672
static int
1653-
loop_set_status_compat(struct loop_device *lo,
1654-
const struct compat_loop_info __user *arg)
1673+
loop_set_status_compat(struct loop_device *lo, blk_mode_t mode,
1674+
struct block_device *bdev,
1675+
const struct compat_loop_info __user *arg)
16551676
{
16561677
struct loop_info64 info64;
16571678
int ret;
16581679

16591680
ret = loop_info64_from_compat(arg, &info64);
16601681
if (ret < 0)
16611682
return ret;
1662-
return loop_set_status(lo, &info64);
1683+
return loop_set_status(lo, mode, bdev, &info64);
16631684
}
16641685

16651686
static int
@@ -1685,7 +1706,7 @@ static int lo_compat_ioctl(struct block_device *bdev, blk_mode_t mode,
16851706

16861707
switch(cmd) {
16871708
case LOOP_SET_STATUS:
1688-
err = loop_set_status_compat(lo,
1709+
err = loop_set_status_compat(lo, mode, bdev,
16891710
(const struct compat_loop_info __user *)arg);
16901711
break;
16911712
case LOOP_GET_STATUS:

drivers/block/ublk_drv.c

Lines changed: 22 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -255,20 +255,6 @@ static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
255255
u16 q_id, u16 tag, struct ublk_io *io, size_t offset);
256256
static inline unsigned int ublk_req_build_flags(struct request *req);
257257

258-
static void ublk_partition_scan_work(struct work_struct *work)
259-
{
260-
struct ublk_device *ub =
261-
container_of(work, struct ublk_device, partition_scan_work);
262-
263-
if (WARN_ON_ONCE(!test_and_clear_bit(GD_SUPPRESS_PART_SCAN,
264-
&ub->ub_disk->state)))
265-
return;
266-
267-
mutex_lock(&ub->ub_disk->open_mutex);
268-
bdev_disk_changed(ub->ub_disk, false);
269-
mutex_unlock(&ub->ub_disk->open_mutex);
270-
}
271-
272258
static inline struct ublksrv_io_desc *
273259
ublk_get_iod(const struct ublk_queue *ubq, unsigned tag)
274260
{
@@ -1597,6 +1583,27 @@ static void ublk_put_disk(struct gendisk *disk)
15971583
put_device(disk_to_dev(disk));
15981584
}
15991585

1586+
static void ublk_partition_scan_work(struct work_struct *work)
1587+
{
1588+
struct ublk_device *ub =
1589+
container_of(work, struct ublk_device, partition_scan_work);
1590+
/* Hold disk reference to prevent UAF during concurrent teardown */
1591+
struct gendisk *disk = ublk_get_disk(ub);
1592+
1593+
if (!disk)
1594+
return;
1595+
1596+
if (WARN_ON_ONCE(!test_and_clear_bit(GD_SUPPRESS_PART_SCAN,
1597+
&disk->state)))
1598+
goto out;
1599+
1600+
mutex_lock(&disk->open_mutex);
1601+
bdev_disk_changed(disk, false);
1602+
mutex_unlock(&disk->open_mutex);
1603+
out:
1604+
ublk_put_disk(disk);
1605+
}
1606+
16001607
/*
16011608
* Use this function to ensure that ->canceling is consistently set for
16021609
* the device and all queues. Do not set these flags directly.
@@ -2041,7 +2048,7 @@ static void ublk_stop_dev(struct ublk_device *ub)
20412048
mutex_lock(&ub->mutex);
20422049
ublk_stop_dev_unlocked(ub);
20432050
mutex_unlock(&ub->mutex);
2044-
flush_work(&ub->partition_scan_work);
2051+
cancel_work_sync(&ub->partition_scan_work);
20452052
ublk_cancel_dev(ub);
20462053
}
20472054

0 commit comments

Comments
 (0)