Skip to content

Commit d589ae0

Browse files
committed
Merge tag 'for-5.18/block-2022-04-01' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "Either fixes or a few additions that got missed in the initial merge window pull. In detail: - List iterator fix to avoid leaking value post loop (Jakob) - One-off fix in minor count (Christophe) - Fix for a regression in how io priority setting works for an exiting task (Jiri) - Fix a regression in this merge window with blkg_free() being called in an inappropriate context (Ming) - Misc fixes (Ming, Tom)" * tag 'for-5.18/block-2022-04-01' of git://git.kernel.dk/linux-block: blk-wbt: remove wbt_track stub block: use dedicated list iterator variable block: Fix the maximum minor value is blk_alloc_ext_minor() block: restore the old set_task_ioprio() behaviour wrt PF_EXITING block: avoid calling blkg_free() in atomic context lib/sbitmap: allocate sb->map via kvzalloc_node
2 parents 3b1509f + 8d7829e commit d589ae0

8 files changed

Lines changed: 46 additions & 28 deletions

File tree

block/blk-cgroup.c

Lines changed: 22 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -65,19 +65,12 @@ static bool blkcg_policy_enabled(struct request_queue *q,
6565
return pol && test_bit(pol->plid, q->blkcg_pols);
6666
}
6767

68-
/**
69-
* blkg_free - free a blkg
70-
* @blkg: blkg to free
71-
*
72-
* Free @blkg which may be partially allocated.
73-
*/
74-
static void blkg_free(struct blkcg_gq *blkg)
68+
static void blkg_free_workfn(struct work_struct *work)
7569
{
70+
struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
71+
free_work);
7672
int i;
7773

78-
if (!blkg)
79-
return;
80-
8174
for (i = 0; i < BLKCG_MAX_POLS; i++)
8275
if (blkg->pd[i])
8376
blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
@@ -89,6 +82,25 @@ static void blkg_free(struct blkcg_gq *blkg)
8982
kfree(blkg);
9083
}
9184

85+
/**
86+
* blkg_free - free a blkg
87+
* @blkg: blkg to free
88+
*
89+
* Free @blkg which may be partially allocated.
90+
*/
91+
static void blkg_free(struct blkcg_gq *blkg)
92+
{
93+
if (!blkg)
94+
return;
95+
96+
/*
97+
* Both ->pd_free_fn() and request queue's release handler may
98+
* sleep, so free us by scheduling one work func
99+
*/
100+
INIT_WORK(&blkg->free_work, blkg_free_workfn);
101+
schedule_work(&blkg->free_work);
102+
}
103+
92104
static void __blkg_release(struct rcu_head *rcu)
93105
{
94106
struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);

block/blk-ioc.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -280,7 +280,6 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
280280

281281
task_lock(task);
282282
if (task->flags & PF_EXITING) {
283-
err = -ESRCH;
284283
kmem_cache_free(iocontext_cachep, ioc);
285284
goto out;
286285
}
@@ -292,7 +291,7 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
292291
task->io_context->ioprio = ioprio;
293292
out:
294293
task_unlock(task);
295-
return err;
294+
return 0;
296295
}
297296
EXPORT_SYMBOL_GPL(set_task_ioprio);
298297

block/blk-mq.c

Lines changed: 16 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -4462,21 +4462,28 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
44624462
return true;
44634463
}
44644464

4465-
static void blk_mq_elv_switch_back(struct list_head *head,
4466-
struct request_queue *q)
4465+
static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head,
4466+
struct request_queue *q)
44674467
{
44684468
struct blk_mq_qe_pair *qe;
4469-
struct elevator_type *t = NULL;
44704469

44714470
list_for_each_entry(qe, head, node)
4472-
if (qe->q == q) {
4473-
t = qe->type;
4474-
break;
4475-
}
4471+
if (qe->q == q)
4472+
return qe;
44764473

4477-
if (!t)
4478-
return;
4474+
return NULL;
4475+
}
44794476

4477+
static void blk_mq_elv_switch_back(struct list_head *head,
4478+
struct request_queue *q)
4479+
{
4480+
struct blk_mq_qe_pair *qe;
4481+
struct elevator_type *t;
4482+
4483+
qe = blk_lookup_qe_pair(head, q);
4484+
if (!qe)
4485+
return;
4486+
t = qe->type;
44804487
list_del(&qe->node);
44814488
kfree(qe);
44824489

block/blk-wbt.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -101,9 +101,6 @@ u64 wbt_default_latency_nsec(struct request_queue *);
101101

102102
#else
103103

104-
static inline void wbt_track(struct request *rq, enum wbt_flags flags)
105-
{
106-
}
107104
static inline int wbt_init(struct request_queue *q)
108105
{
109106
return -EINVAL;

block/genhd.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -335,7 +335,7 @@ int blk_alloc_ext_minor(void)
335335
{
336336
int idx;
337337

338-
idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT, GFP_KERNEL);
338+
idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT - 1, GFP_KERNEL);
339339
if (idx == -ENOSPC)
340340
return -EBUSY;
341341
return idx;

include/linux/blk-cgroup.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,10 @@ struct blkcg_gq {
9595

9696
spinlock_t async_bio_lock;
9797
struct bio_list async_bios;
98-
struct work_struct async_bio_work;
98+
union {
99+
struct work_struct async_bio_work;
100+
struct work_struct free_work;
101+
};
99102

100103
atomic_t use_delay;
101104
atomic64_t delay_nsec;

include/linux/sbitmap.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,7 @@ static inline unsigned int __map_depth(const struct sbitmap *sb, int index)
174174
static inline void sbitmap_free(struct sbitmap *sb)
175175
{
176176
free_percpu(sb->alloc_hint);
177-
kfree(sb->map);
177+
kvfree(sb->map);
178178
sb->map = NULL;
179179
}
180180

lib/sbitmap.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
110110
sb->alloc_hint = NULL;
111111
}
112112

113-
sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
113+
sb->map = kvzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node);
114114
if (!sb->map) {
115115
free_percpu(sb->alloc_hint);
116116
return -ENOMEM;

0 commit comments

Comments
 (0)