Skip to content

Commit 3a08284

Browse files
committed
Merge branch 'for-6.5/block-late' into block-6.5
* for-6.5/block-late: blk-sysfs: add a new attr_group for blk_mq blk-iocost: move wbt_enable/disable_default() out of spinlock blk-wbt: cleanup rwb_enabled() and wbt_disabled() blk-wbt: remove dead code to handle wbt enable/disable with io inflight blk-wbt: don't create wbt sysfs entry if CONFIG_BLK_WBT is disabled blk-mq: fix two misuses on RQF_USE_SCHED blk-throttle: Fix io statistics for cgroup v1 bcache: Fix bcache device claiming bcache: Alloc holder object before async registration raid10: avoid spin_lock from fastpath from raid10_unplug() md: fix 'delete_mutex' deadlock md: use mddev->external to select holder in export_rdev() md/raid1-10: fix casting from randomized structure in raid1_submit_write() md/raid10: fix the condition to call bio_end_io_acct()
2 parents 89181f5 + 6d85ebf commit 3a08284

14 files changed

Lines changed: 208 additions & 220 deletions

File tree

block/blk-cgroup.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2086,6 +2086,9 @@ void blk_cgroup_bio_start(struct bio *bio)
20862086
struct blkg_iostat_set *bis;
20872087
unsigned long flags;
20882088

2089+
if (!cgroup_subsys_on_dfl(io_cgrp_subsys))
2090+
return;
2091+
20892092
/* Root-level stats are sourced from system-wide IO stats */
20902093
if (!cgroup_parent(blkcg->css.cgroup))
20912094
return;
@@ -2116,8 +2119,7 @@ void blk_cgroup_bio_start(struct bio *bio)
21162119
}
21172120

21182121
u64_stats_update_end_irqrestore(&bis->sync, flags);
2119-
if (cgroup_subsys_on_dfl(io_cgrp_subsys))
2120-
cgroup_rstat_updated(blkcg->css.cgroup, cpu);
2122+
cgroup_rstat_updated(blkcg->css.cgroup, cpu);
21212123
put_cpu();
21222124
}
21232125

block/blk-iocost.c

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3301,11 +3301,9 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
33013301
blk_stat_enable_accounting(disk->queue);
33023302
blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
33033303
ioc->enabled = true;
3304-
wbt_disable_default(disk);
33053304
} else {
33063305
blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
33073306
ioc->enabled = false;
3308-
wbt_enable_default(disk);
33093307
}
33103308

33113309
if (user) {
@@ -3318,6 +3316,11 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
33183316
ioc_refresh_params(ioc, true);
33193317
spin_unlock_irq(&ioc->lock);
33203318

3319+
if (enable)
3320+
wbt_disable_default(disk);
3321+
else
3322+
wbt_enable_default(disk);
3323+
33213324
blk_mq_unquiesce_queue(disk->queue);
33223325
blk_mq_unfreeze_queue(disk->queue);
33233326

block/blk-mq.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1280,7 +1280,11 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
12801280

12811281
if (!plug->multiple_queues && last && last->q != rq->q)
12821282
plug->multiple_queues = true;
1283-
if (!plug->has_elevator && (rq->rq_flags & RQF_USE_SCHED))
1283+
/*
1284+
* Any request allocated from sched tags can't be issued to
1285+
* ->queue_rqs() directly
1286+
*/
1287+
if (!plug->has_elevator && (rq->rq_flags & RQF_SCHED_TAGS))
12841288
plug->has_elevator = true;
12851289
rq->rq_next = NULL;
12861290
rq_list_add(&plug->mq_list, rq);

block/blk-sysfs.c

Lines changed: 103 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -47,19 +47,6 @@ queue_var_store(unsigned long *var, const char *page, size_t count)
4747
return count;
4848
}
4949

50-
static ssize_t queue_var_store64(s64 *var, const char *page)
51-
{
52-
int err;
53-
s64 v;
54-
55-
err = kstrtos64(page, 10, &v);
56-
if (err < 0)
57-
return err;
58-
59-
*var = v;
60-
return 0;
61-
}
62-
6350
static ssize_t queue_requests_show(struct request_queue *q, char *page)
6451
{
6552
return queue_var_show(q->nr_requests, page);
@@ -451,61 +438,6 @@ static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
451438
return count;
452439
}
453440

454-
static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
455-
{
456-
if (!wbt_rq_qos(q))
457-
return -EINVAL;
458-
459-
if (wbt_disabled(q))
460-
return sprintf(page, "0\n");
461-
462-
return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
463-
}
464-
465-
static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
466-
size_t count)
467-
{
468-
struct rq_qos *rqos;
469-
ssize_t ret;
470-
s64 val;
471-
472-
ret = queue_var_store64(&val, page);
473-
if (ret < 0)
474-
return ret;
475-
if (val < -1)
476-
return -EINVAL;
477-
478-
rqos = wbt_rq_qos(q);
479-
if (!rqos) {
480-
ret = wbt_init(q->disk);
481-
if (ret)
482-
return ret;
483-
}
484-
485-
if (val == -1)
486-
val = wbt_default_latency_nsec(q);
487-
else if (val >= 0)
488-
val *= 1000ULL;
489-
490-
if (wbt_get_min_lat(q) == val)
491-
return count;
492-
493-
/*
494-
* Ensure that the queue is idled, in case the latency update
495-
* ends up either enabling or disabling wbt completely. We can't
496-
* have IO inflight if that happens.
497-
*/
498-
blk_mq_freeze_queue(q);
499-
blk_mq_quiesce_queue(q);
500-
501-
wbt_set_min_lat(q, val);
502-
503-
blk_mq_unquiesce_queue(q);
504-
blk_mq_unfreeze_queue(q);
505-
506-
return count;
507-
}
508-
509441
static ssize_t queue_wc_show(struct request_queue *q, char *page)
510442
{
511443
if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
@@ -598,7 +530,6 @@ QUEUE_RW_ENTRY(queue_wc, "write_cache");
598530
QUEUE_RO_ENTRY(queue_fua, "fua");
599531
QUEUE_RO_ENTRY(queue_dax, "dax");
600532
QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
601-
QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
602533
QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
603534
QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment");
604535

@@ -617,16 +548,86 @@ QUEUE_RW_ENTRY(queue_iostats, "iostats");
617548
QUEUE_RW_ENTRY(queue_random, "add_random");
618549
QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
619550

551+
#ifdef CONFIG_BLK_WBT
552+
static ssize_t queue_var_store64(s64 *var, const char *page)
553+
{
554+
int err;
555+
s64 v;
556+
557+
err = kstrtos64(page, 10, &v);
558+
if (err < 0)
559+
return err;
560+
561+
*var = v;
562+
return 0;
563+
}
564+
565+
static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
566+
{
567+
if (!wbt_rq_qos(q))
568+
return -EINVAL;
569+
570+
if (wbt_disabled(q))
571+
return sprintf(page, "0\n");
572+
573+
return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
574+
}
575+
576+
static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
577+
size_t count)
578+
{
579+
struct rq_qos *rqos;
580+
ssize_t ret;
581+
s64 val;
582+
583+
ret = queue_var_store64(&val, page);
584+
if (ret < 0)
585+
return ret;
586+
if (val < -1)
587+
return -EINVAL;
588+
589+
rqos = wbt_rq_qos(q);
590+
if (!rqos) {
591+
ret = wbt_init(q->disk);
592+
if (ret)
593+
return ret;
594+
}
595+
596+
if (val == -1)
597+
val = wbt_default_latency_nsec(q);
598+
else if (val >= 0)
599+
val *= 1000ULL;
600+
601+
if (wbt_get_min_lat(q) == val)
602+
return count;
603+
604+
/*
605+
* Ensure that the queue is idled, in case the latency update
606+
* ends up either enabling or disabling wbt completely. We can't
607+
* have IO inflight if that happens.
608+
*/
609+
blk_mq_freeze_queue(q);
610+
blk_mq_quiesce_queue(q);
611+
612+
wbt_set_min_lat(q, val);
613+
614+
blk_mq_unquiesce_queue(q);
615+
blk_mq_unfreeze_queue(q);
616+
617+
return count;
618+
}
619+
620+
QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
621+
#endif
622+
620623
static struct attribute *queue_attrs[] = {
621-
&queue_requests_entry.attr,
622624
&queue_ra_entry.attr,
623625
&queue_max_hw_sectors_entry.attr,
624626
&queue_max_sectors_entry.attr,
625627
&queue_max_segments_entry.attr,
626628
&queue_max_discard_segments_entry.attr,
627629
&queue_max_integrity_segments_entry.attr,
628630
&queue_max_segment_size_entry.attr,
629-
&elv_iosched_entry.attr,
630631
&queue_hw_sector_size_entry.attr,
631632
&queue_logical_block_size_entry.attr,
632633
&queue_physical_block_size_entry.attr,
@@ -647,17 +648,14 @@ static struct attribute *queue_attrs[] = {
647648
&queue_max_open_zones_entry.attr,
648649
&queue_max_active_zones_entry.attr,
649650
&queue_nomerges_entry.attr,
650-
&queue_rq_affinity_entry.attr,
651651
&queue_iostats_entry.attr,
652652
&queue_stable_writes_entry.attr,
653653
&queue_random_entry.attr,
654654
&queue_poll_entry.attr,
655655
&queue_wc_entry.attr,
656656
&queue_fua_entry.attr,
657657
&queue_dax_entry.attr,
658-
&queue_wb_lat_entry.attr,
659658
&queue_poll_delay_entry.attr,
660-
&queue_io_timeout_entry.attr,
661659
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
662660
&blk_throtl_sample_time_entry.attr,
663661
#endif
@@ -666,16 +664,23 @@ static struct attribute *queue_attrs[] = {
666664
NULL,
667665
};
668666

667+
static struct attribute *blk_mq_queue_attrs[] = {
668+
&queue_requests_entry.attr,
669+
&elv_iosched_entry.attr,
670+
&queue_rq_affinity_entry.attr,
671+
&queue_io_timeout_entry.attr,
672+
#ifdef CONFIG_BLK_WBT
673+
&queue_wb_lat_entry.attr,
674+
#endif
675+
NULL,
676+
};
677+
669678
static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
670679
int n)
671680
{
672681
struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
673682
struct request_queue *q = disk->queue;
674683

675-
if (attr == &queue_io_timeout_entry.attr &&
676-
(!q->mq_ops || !q->mq_ops->timeout))
677-
return 0;
678-
679684
if ((attr == &queue_max_open_zones_entry.attr ||
680685
attr == &queue_max_active_zones_entry.attr) &&
681686
!blk_queue_is_zoned(q))
@@ -684,11 +689,30 @@ static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
684689
return attr->mode;
685690
}
686691

692+
static umode_t blk_mq_queue_attr_visible(struct kobject *kobj,
693+
struct attribute *attr, int n)
694+
{
695+
struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
696+
struct request_queue *q = disk->queue;
697+
698+
if (!queue_is_mq(q))
699+
return 0;
700+
701+
if (attr == &queue_io_timeout_entry.attr && !q->mq_ops->timeout)
702+
return 0;
703+
704+
return attr->mode;
705+
}
706+
687707
static struct attribute_group queue_attr_group = {
688708
.attrs = queue_attrs,
689709
.is_visible = queue_attr_visible,
690710
};
691711

712+
static struct attribute_group blk_mq_queue_attr_group = {
713+
.attrs = blk_mq_queue_attrs,
714+
.is_visible = blk_mq_queue_attr_visible,
715+
};
692716

693717
#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
694718

@@ -733,6 +757,7 @@ static const struct sysfs_ops queue_sysfs_ops = {
733757

734758
static const struct attribute_group *blk_queue_attr_groups[] = {
735759
&queue_attr_group,
760+
&blk_mq_queue_attr_group,
736761
NULL
737762
};
738763

block/blk-throttle.c

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2178,12 +2178,6 @@ bool __blk_throtl_bio(struct bio *bio)
21782178

21792179
rcu_read_lock();
21802180

2181-
if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) {
2182-
blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf,
2183-
bio->bi_iter.bi_size);
2184-
blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1);
2185-
}
2186-
21872181
spin_lock_irq(&q->queue_lock);
21882182

21892183
throtl_update_latency_buckets(td);

block/blk-throttle.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -185,6 +185,15 @@ static inline bool blk_should_throtl(struct bio *bio)
185185
struct throtl_grp *tg = blkg_to_tg(bio->bi_blkg);
186186
int rw = bio_data_dir(bio);
187187

188+
if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) {
189+
if (!bio_flagged(bio, BIO_CGROUP_ACCT)) {
190+
bio_set_flag(bio, BIO_CGROUP_ACCT);
191+
blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf,
192+
bio->bi_iter.bi_size);
193+
}
194+
blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1);
195+
}
196+
188197
/* iops limit is always counted */
189198
if (tg->has_rules_iops[rw])
190199
return true;

block/blk-wbt.c

Lines changed: 2 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,7 @@ enum {
146146
static inline bool rwb_enabled(struct rq_wb *rwb)
147147
{
148148
return rwb && rwb->enable_state != WBT_STATE_OFF_DEFAULT &&
149-
rwb->wb_normal != 0;
149+
rwb->enable_state != WBT_STATE_OFF_MANUAL;
150150
}
151151

152152
static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
@@ -200,15 +200,6 @@ static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
200200

201201
inflight = atomic_dec_return(&rqw->inflight);
202202

203-
/*
204-
* wbt got disabled with IO in flight. Wake up any potential
205-
* waiters, we don't have to do more than that.
206-
*/
207-
if (unlikely(!rwb_enabled(rwb))) {
208-
rwb_wake_all(rwb);
209-
return;
210-
}
211-
212203
/*
213204
* For discards, our limit is always the background. For writes, if
214205
* the device does write back caching, drop further down before we
@@ -503,8 +494,7 @@ bool wbt_disabled(struct request_queue *q)
503494
{
504495
struct rq_qos *rqos = wbt_rq_qos(q);
505496

506-
return !rqos || RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT ||
507-
RQWB(rqos)->enable_state == WBT_STATE_OFF_MANUAL;
497+
return !rqos || !rwb_enabled(RQWB(rqos));
508498
}
509499

510500
u64 wbt_get_min_lat(struct request_queue *q)
@@ -545,13 +535,6 @@ static inline unsigned int get_limit(struct rq_wb *rwb, blk_opf_t opf)
545535
{
546536
unsigned int limit;
547537

548-
/*
549-
* If we got disabled, just return UINT_MAX. This ensures that
550-
* we'll properly inc a new IO, and dec+wakeup at the end.
551-
*/
552-
if (!rwb_enabled(rwb))
553-
return UINT_MAX;
554-
555538
if ((opf & REQ_OP_MASK) == REQ_OP_DISCARD)
556539
return rwb->wb_background;
557540

0 commit comments

Comments
 (0)