Skip to content

Commit e50df24

Browse files
committed
Merge tag 'block-6.5-2023-07-03' of git://git.kernel.dk/linux
Pull more block updates from Jens Axboe: "Mostly items that came in a bit late for the initial pull request, wanted to make sure they had the appropriate amount of linux-next soak before going upstream. Outside of stragglers, just generic fixes for either merge window items, or longer standing bugs" * tag 'block-6.5-2023-07-03' of git://git.kernel.dk/linux: (25 commits) md/raid0: add discard support for the 'original' layout nvme: disable controller on reset state failure nvme: sync timeout work on failed reset nvme: ensure unquiesce on teardown cdrom/gdrom: Fix build error nvme: improved uring polling block: add request polling helper nvme-mpath: fix I/O failure with EAGAIN when failing over I/O nvme: host: fix command name spelling blk-sysfs: add a new attr_group for blk_mq blk-iocost: move wbt_enable/disable_default() out of spinlock blk-wbt: cleanup rwb_enabled() and wbt_disabled() blk-wbt: remove dead code to handle wbt enable/disable with io inflight blk-wbt: don't create wbt sysfs entry if CONFIG_BLK_WBT is disabled blk-mq: fix two misuses on RQF_USE_SCHED blk-throttle: Fix io statistics for cgroup v1 bcache: Fix bcache device claiming bcache: Alloc holder object before async registration raid10: avoid spin_lock from fastpath from raid10_unplug() md: fix 'delete_mutex' deadlock ...
2 parents 4f52875 + 3c2f765 commit e50df24

25 files changed

Lines changed: 341 additions & 304 deletions

block/blk-cgroup.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2086,6 +2086,9 @@ void blk_cgroup_bio_start(struct bio *bio)
20862086
struct blkg_iostat_set *bis;
20872087
unsigned long flags;
20882088

2089+
if (!cgroup_subsys_on_dfl(io_cgrp_subsys))
2090+
return;
2091+
20892092
/* Root-level stats are sourced from system-wide IO stats */
20902093
if (!cgroup_parent(blkcg->css.cgroup))
20912094
return;
@@ -2116,8 +2119,7 @@ void blk_cgroup_bio_start(struct bio *bio)
21162119
}
21172120

21182121
u64_stats_update_end_irqrestore(&bis->sync, flags);
2119-
if (cgroup_subsys_on_dfl(io_cgrp_subsys))
2120-
cgroup_rstat_updated(blkcg->css.cgroup, cpu);
2122+
cgroup_rstat_updated(blkcg->css.cgroup, cpu);
21212123
put_cpu();
21222124
}
21232125

block/blk-iocost.c

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3301,11 +3301,9 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
33013301
blk_stat_enable_accounting(disk->queue);
33023302
blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
33033303
ioc->enabled = true;
3304-
wbt_disable_default(disk);
33053304
} else {
33063305
blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
33073306
ioc->enabled = false;
3308-
wbt_enable_default(disk);
33093307
}
33103308

33113309
if (user) {
@@ -3318,6 +3316,11 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
33183316
ioc_refresh_params(ioc, true);
33193317
spin_unlock_irq(&ioc->lock);
33203318

3319+
if (enable)
3320+
wbt_disable_default(disk);
3321+
else
3322+
wbt_enable_default(disk);
3323+
33213324
blk_mq_unquiesce_queue(disk->queue);
33223325
blk_mq_unfreeze_queue(disk->queue);
33233326

block/blk-mq.c

Lines changed: 37 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -49,17 +49,8 @@ static void blk_mq_request_bypass_insert(struct request *rq,
4949
blk_insert_t flags);
5050
static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
5151
struct list_head *list);
52-
53-
static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q,
54-
blk_qc_t qc)
55-
{
56-
return xa_load(&q->hctx_table, qc);
57-
}
58-
59-
static inline blk_qc_t blk_rq_to_qc(struct request *rq)
60-
{
61-
return rq->mq_hctx->queue_num;
62-
}
52+
static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
53+
struct io_comp_batch *iob, unsigned int flags);
6354

6455
/*
6556
* Check if any of the ctx, dispatch list or elevator
@@ -1248,7 +1239,7 @@ void blk_mq_start_request(struct request *rq)
12481239
q->integrity.profile->prepare_fn(rq);
12491240
#endif
12501241
if (rq->bio && rq->bio->bi_opf & REQ_POLLED)
1251-
WRITE_ONCE(rq->bio->bi_cookie, blk_rq_to_qc(rq));
1242+
WRITE_ONCE(rq->bio->bi_cookie, rq->mq_hctx->queue_num);
12521243
}
12531244
EXPORT_SYMBOL(blk_mq_start_request);
12541245

@@ -1280,7 +1271,11 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
12801271

12811272
if (!plug->multiple_queues && last && last->q != rq->q)
12821273
plug->multiple_queues = true;
1283-
if (!plug->has_elevator && (rq->rq_flags & RQF_USE_SCHED))
1274+
/*
1275+
* Any request allocated from sched tags can't be issued to
1276+
* ->queue_rqs() directly
1277+
*/
1278+
if (!plug->has_elevator && (rq->rq_flags & RQF_SCHED_TAGS))
12841279
plug->has_elevator = true;
12851280
rq->rq_next = NULL;
12861281
rq_list_add(&plug->mq_list, rq);
@@ -1350,7 +1345,7 @@ EXPORT_SYMBOL_GPL(blk_rq_is_poll);
13501345
static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
13511346
{
13521347
do {
1353-
blk_mq_poll(rq->q, blk_rq_to_qc(rq), NULL, 0);
1348+
blk_hctx_poll(rq->q, rq->mq_hctx, NULL, 0);
13541349
cond_resched();
13551350
} while (!completion_done(wait));
13561351
}
@@ -4745,10 +4740,9 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
47454740
}
47464741
EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
47474742

4748-
int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
4749-
unsigned int flags)
4743+
static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
4744+
struct io_comp_batch *iob, unsigned int flags)
47504745
{
4751-
struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie);
47524746
long state = get_current_state();
47534747
int ret;
47544748

@@ -4773,6 +4767,32 @@ int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *
47734767
return 0;
47744768
}
47754769

4770+
int blk_mq_poll(struct request_queue *q, blk_qc_t cookie,
4771+
struct io_comp_batch *iob, unsigned int flags)
4772+
{
4773+
struct blk_mq_hw_ctx *hctx = xa_load(&q->hctx_table, cookie);
4774+
4775+
return blk_hctx_poll(q, hctx, iob, flags);
4776+
}
4777+
4778+
int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
4779+
unsigned int poll_flags)
4780+
{
4781+
struct request_queue *q = rq->q;
4782+
int ret;
4783+
4784+
if (!blk_rq_is_poll(rq))
4785+
return 0;
4786+
if (!percpu_ref_tryget(&q->q_usage_counter))
4787+
return 0;
4788+
4789+
ret = blk_hctx_poll(q, rq->mq_hctx, iob, poll_flags);
4790+
blk_queue_exit(q);
4791+
4792+
return ret;
4793+
}
4794+
EXPORT_SYMBOL_GPL(blk_rq_poll);
4795+
47764796
unsigned int blk_mq_rq_cpu(struct request *rq)
47774797
{
47784798
return rq->mq_ctx->cpu;

0 commit comments

Comments
 (0)