Skip to content

Commit 34841e6

Browse files
Ming Leiaxboe
authored andcommitted
block: revert 4f1e963 ("blk-throtl: optimize IOPS throttle for large IO scenarios")
Revert commit 4f1e963 ("blk-throtl: optimize IOPS throttle for large IO scenarios") since we have another easier way to address this issue and get better iops throttling result. Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20220216044514.2903784-9-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 5a93b60 commit 34841e6

2 files changed

Lines changed: 0 additions & 33 deletions

File tree

block/blk-throttle.c

Lines changed: 0 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -640,8 +640,6 @@ static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
640640
tg->bytes_disp[rw] = 0;
641641
tg->io_disp[rw] = 0;
642642

643-
atomic_set(&tg->io_split_cnt[rw], 0);
644-
645643
/*
646644
* Previous slice has expired. We must have trimmed it after last
647645
* bio dispatch. That means since start of last slice, we never used
@@ -665,8 +663,6 @@ static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
665663
tg->slice_start[rw] = jiffies;
666664
tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
667665

668-
atomic_set(&tg->io_split_cnt[rw], 0);
669-
670666
throtl_log(&tg->service_queue,
671667
"[%c] new slice start=%lu end=%lu jiffies=%lu",
672668
rw == READ ? 'R' : 'W', tg->slice_start[rw],
@@ -900,9 +896,6 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
900896
jiffies + tg->td->throtl_slice);
901897
}
902898

903-
if (iops_limit != UINT_MAX)
904-
tg->io_disp[rw] += atomic_xchg(&tg->io_split_cnt[rw], 0);
905-
906899
if (tg_with_in_bps_limit(tg, bio, bps_limit, &bps_wait) &&
907900
tg_with_in_iops_limit(tg, bio, iops_limit, &iops_wait)) {
908901
if (wait)
@@ -1927,14 +1920,12 @@ static void throtl_downgrade_check(struct throtl_grp *tg)
19271920
}
19281921

19291922
if (tg->iops[READ][LIMIT_LOW]) {
1930-
tg->last_io_disp[READ] += atomic_xchg(&tg->last_io_split_cnt[READ], 0);
19311923
iops = tg->last_io_disp[READ] * HZ / elapsed_time;
19321924
if (iops >= tg->iops[READ][LIMIT_LOW])
19331925
tg->last_low_overflow_time[READ] = now;
19341926
}
19351927

19361928
if (tg->iops[WRITE][LIMIT_LOW]) {
1937-
tg->last_io_disp[WRITE] += atomic_xchg(&tg->last_io_split_cnt[WRITE], 0);
19381929
iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
19391930
if (iops >= tg->iops[WRITE][LIMIT_LOW])
19401931
tg->last_low_overflow_time[WRITE] = now;
@@ -2053,25 +2044,6 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
20532044
}
20542045
#endif
20552046

2056-
void blk_throtl_charge_bio_split(struct bio *bio)
2057-
{
2058-
struct blkcg_gq *blkg = bio->bi_blkg;
2059-
struct throtl_grp *parent = blkg_to_tg(blkg);
2060-
struct throtl_service_queue *parent_sq;
2061-
bool rw = bio_data_dir(bio);
2062-
2063-
do {
2064-
if (!parent->has_rules[rw])
2065-
break;
2066-
2067-
atomic_inc(&parent->io_split_cnt[rw]);
2068-
atomic_inc(&parent->last_io_split_cnt[rw]);
2069-
2070-
parent_sq = parent->service_queue.parent_sq;
2071-
parent = sq_to_tg(parent_sq);
2072-
} while (parent);
2073-
}
2074-
20752047
bool __blk_throtl_bio(struct bio *bio)
20762048
{
20772049
struct request_queue *q = bdev_get_queue(bio->bi_bdev);

block/blk-throttle.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -138,9 +138,6 @@ struct throtl_grp {
138138
unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
139139
unsigned long bio_cnt_reset_time;
140140

141-
atomic_t io_split_cnt[2];
142-
atomic_t last_io_split_cnt[2];
143-
144141
struct blkg_rwstat stat_bytes;
145142
struct blkg_rwstat stat_ios;
146143
};
@@ -164,13 +161,11 @@ static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
164161
static inline int blk_throtl_init(struct request_queue *q) { return 0; }
165162
static inline void blk_throtl_exit(struct request_queue *q) { }
166163
static inline void blk_throtl_register_queue(struct request_queue *q) { }
167-
static inline void blk_throtl_charge_bio_split(struct bio *bio) { }
168164
static inline bool blk_throtl_bio(struct bio *bio) { return false; }
169165
#else /* CONFIG_BLK_DEV_THROTTLING */
170166
int blk_throtl_init(struct request_queue *q);
171167
void blk_throtl_exit(struct request_queue *q);
172168
void blk_throtl_register_queue(struct request_queue *q);
173-
void blk_throtl_charge_bio_split(struct bio *bio);
174169
bool __blk_throtl_bio(struct bio *bio);
175170
static inline bool blk_throtl_bio(struct bio *bio)
176171
{

0 commit comments

Comments
 (0)