Skip to content

Commit 845f1f2

Browse files
visitorckwakpm00
authored andcommitted
Revert "bcache: update min_heap_callbacks to use default builtin swap"
Patch series "bcache: Revert min_heap migration due to performance regression". This patch series reverts the migration of bcache from its original heap implementation to the generic min_heap library. While the original change aimed to simplify the code and improve maintainability, it introduced a severe performance regression in real-world scenarios. As reported by Robert, systems using bcache now suffer from periodic latency spikes, with P100 (max) latency increasing from 600 ms to 2.4 seconds every 5 minutes. This degrades bcache's value as a low-latency caching layer, and leads to frequent timeouts and application stalls in production environments. The primary cause of this regression is the behavior of the generic min_heap implementation's bottom-up sift_down, which performs up to 2 * log2(n) comparisons when many elements are equal. The original top-down variant used by bcache only required O(1) comparisons in such cases. The issue was further exacerbated by commit 92a8b22 ("lib/min_heap: introduce non-inline versions of min heap API functions"), which introduced non-inlined versions of the min_heap API, adding function call overhead to a performance-critical hot path. This patch (of 3): This reverts commit 3d8a9a1. Although removing the custom swap function simplified the code, this change is part of a broader migration to the generic min_heap API that introduced significant performance regressions in bcache. As reported by Robert, bcache now suffers from latency spikes, with P100 (max) latency increasing from 600 ms to 2.4 seconds every 5 minutes. These regressions degrade bcache's effectiveness as a low-latency cache layer and lead to frequent timeouts and application stalls in production environments. This revert is part of a series of changes to restore previous performance by undoing the min_heap transition. Link: https://lkml.kernel.org/r/20250614202353.1632957-1-visitorckw@gmail.com Link: https://lore.kernel.org/lkml/CAJhEC05+0S69z+3+FB2Cd0hD+pCRyWTKLEOsc8BOmH73p1m+KQ@mail.gmail.com Link: https://lkml.kernel.org/r/20250614202353.1632957-2-visitorckw@gmail.com Fixes: 866898e ("bcache: remove heap-related macros and switch to generic min_heap") Fixes: 92a8b22 ("lib/min_heap: introduce non-inline versions of min heap API functions") Signed-off-by: Kuan-Wei Chiu <visitorckw@gmail.com> Reported-by: Robert Pang <robertpang@google.com> Closes: https://lore.kernel.org/linux-bcache/CAJhEC06F_AtrPgw2-7CvCqZgeStgCtitbD-ryuPpXQA-JG5XXw@mail.gmail.com Acked-by: Coly Li <colyli@kernel.org> Cc: Ching-Chun (Jim) Huang <jserv@ccns.ncku.edu.tw> Cc: Kent Overstreet <kent.overstreet@linux.dev> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent 223731c commit 845f1f2

4 files changed

Lines changed: 38 additions & 7 deletions

File tree

drivers/md/bcache/alloc.c

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -189,16 +189,23 @@ static inline bool new_bucket_min_cmp(const void *l, const void *r, void *args)
189189
return new_bucket_prio(ca, *lhs) < new_bucket_prio(ca, *rhs);
190190
}
191191

192+
static inline void new_bucket_swap(void *l, void *r, void __always_unused *args)
193+
{
194+
struct bucket **lhs = l, **rhs = r;
195+
196+
swap(*lhs, *rhs);
197+
}
198+
192199
static void invalidate_buckets_lru(struct cache *ca)
193200
{
194201
struct bucket *b;
195202
const struct min_heap_callbacks bucket_max_cmp_callback = {
196203
.less = new_bucket_max_cmp,
197-
.swp = NULL,
204+
.swp = new_bucket_swap,
198205
};
199206
const struct min_heap_callbacks bucket_min_cmp_callback = {
200207
.less = new_bucket_min_cmp,
201-
.swp = NULL,
208+
.swp = new_bucket_swap,
202209
};
203210

204211
ca->heap.nr = 0;

drivers/md/bcache/bset.c

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1093,6 +1093,14 @@ static inline bool new_btree_iter_cmp(const void *l, const void *r, void __alway
10931093
return bkey_cmp(_l->k, _r->k) <= 0;
10941094
}
10951095

1096+
static inline void new_btree_iter_swap(void *iter1, void *iter2, void __always_unused *args)
1097+
{
1098+
struct btree_iter_set *_iter1 = iter1;
1099+
struct btree_iter_set *_iter2 = iter2;
1100+
1101+
swap(*_iter1, *_iter2);
1102+
}
1103+
10961104
static inline bool btree_iter_end(struct btree_iter *iter)
10971105
{
10981106
return !iter->heap.nr;
@@ -1103,7 +1111,7 @@ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
11031111
{
11041112
const struct min_heap_callbacks callbacks = {
11051113
.less = new_btree_iter_cmp,
1106-
.swp = NULL,
1114+
.swp = new_btree_iter_swap,
11071115
};
11081116

11091117
if (k != end)
@@ -1149,7 +1157,7 @@ static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
11491157
struct bkey *ret = NULL;
11501158
const struct min_heap_callbacks callbacks = {
11511159
.less = cmp,
1152-
.swp = NULL,
1160+
.swp = new_btree_iter_swap,
11531161
};
11541162

11551163
if (!btree_iter_end(iter)) {
@@ -1223,7 +1231,7 @@ static void btree_mergesort(struct btree_keys *b, struct bset *out,
12231231
: bch_ptr_invalid;
12241232
const struct min_heap_callbacks callbacks = {
12251233
.less = b->ops->sort_cmp,
1226-
.swp = NULL,
1234+
.swp = new_btree_iter_swap,
12271235
};
12281236

12291237
/* Heapify the iterator, using our comparison function */

drivers/md/bcache/extents.c

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -266,12 +266,20 @@ static bool new_bch_extent_sort_cmp(const void *l, const void *r, void __always_
266266
return !(c ? c > 0 : _l->k < _r->k);
267267
}
268268

269+
static inline void new_btree_iter_swap(void *iter1, void *iter2, void __always_unused *args)
270+
{
271+
struct btree_iter_set *_iter1 = iter1;
272+
struct btree_iter_set *_iter2 = iter2;
273+
274+
swap(*_iter1, *_iter2);
275+
}
276+
269277
static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
270278
struct bkey *tmp)
271279
{
272280
const struct min_heap_callbacks callbacks = {
273281
.less = new_bch_extent_sort_cmp,
274-
.swp = NULL,
282+
.swp = new_btree_iter_swap,
275283
};
276284
while (iter->heap.nr > 1) {
277285
struct btree_iter_set *top = iter->heap.data, *i = top + 1;

drivers/md/bcache/movinggc.c

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -190,6 +190,14 @@ static bool new_bucket_cmp(const void *l, const void *r, void __always_unused *a
190190
return GC_SECTORS_USED(*_l) >= GC_SECTORS_USED(*_r);
191191
}
192192

193+
static void new_bucket_swap(void *l, void *r, void __always_unused *args)
194+
{
195+
struct bucket **_l = l;
196+
struct bucket **_r = r;
197+
198+
swap(*_l, *_r);
199+
}
200+
193201
static unsigned int bucket_heap_top(struct cache *ca)
194202
{
195203
struct bucket *b;
@@ -204,7 +212,7 @@ void bch_moving_gc(struct cache_set *c)
204212
unsigned long sectors_to_move, reserve_sectors;
205213
const struct min_heap_callbacks callbacks = {
206214
.less = new_bucket_cmp,
207-
.swp = NULL,
215+
.swp = new_bucket_swap,
208216
};
209217

210218
if (!c->copy_gc_enabled)

0 commit comments

Comments
 (0)