Skip to content

Commit c65c13f

Browse files
author
Kent Overstreet
committed
bcachefs: Run btree key cache shrinker less aggressively
The btree key cache maintains lists of items that have been freed, but can't yet be reclaimed because a bch2_trans_relock() call might find them - we're waiting for SRCU readers to release. Previously, we wouldn't count these items against the number we're attempting to scan for, which would mean we'd evict more live key cache entries - doing quite a bit of potentially unecessary work. With recent work to make sure we don't hold SRCU locks for too long, it should be safe to count all the items on the freelists against number to scan - even if we can't reclaim them yet, we will be able to soon. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
1 parent 1bd5bcc commit c65c13f

2 files changed

Lines changed: 23 additions & 4 deletions

File tree

fs/bcachefs/btree_key_cache.c

Lines changed: 19 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -89,10 +89,13 @@ static void bkey_cached_free(struct btree_key_cache *bc,
8989
ck->btree_trans_barrier_seq =
9090
start_poll_synchronize_srcu(&c->btree_trans_barrier);
9191

92-
if (ck->c.lock.readers)
92+
if (ck->c.lock.readers) {
9393
list_move_tail(&ck->list, &bc->freed_pcpu);
94-
else
94+
bc->nr_freed_pcpu++;
95+
} else {
9596
list_move_tail(&ck->list, &bc->freed_nonpcpu);
97+
bc->nr_freed_nonpcpu++;
98+
}
9699
atomic_long_inc(&bc->nr_freed);
97100

98101
kfree(ck->k);
@@ -109,6 +112,8 @@ static void __bkey_cached_move_to_freelist_ordered(struct btree_key_cache *bc,
109112
{
110113
struct bkey_cached *pos;
111114

115+
bc->nr_freed_nonpcpu++;
116+
112117
list_for_each_entry_reverse(pos, &bc->freed_nonpcpu, list) {
113118
if (ULONG_CMP_GE(ck->btree_trans_barrier_seq,
114119
pos->btree_trans_barrier_seq)) {
@@ -158,6 +163,7 @@ static void bkey_cached_move_to_freelist(struct btree_key_cache *bc,
158163
#else
159164
mutex_lock(&bc->lock);
160165
list_move_tail(&ck->list, &bc->freed_nonpcpu);
166+
bc->nr_freed_nonpcpu++;
161167
mutex_unlock(&bc->lock);
162168
#endif
163169
} else {
@@ -217,6 +223,7 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path,
217223
f->nr < ARRAY_SIZE(f->objs) / 2) {
218224
ck = list_last_entry(&bc->freed_nonpcpu, struct bkey_cached, list);
219225
list_del_init(&ck->list);
226+
bc->nr_freed_nonpcpu--;
220227
f->objs[f->nr++] = ck;
221228
}
222229

@@ -229,6 +236,7 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path,
229236
if (!list_empty(&bc->freed_nonpcpu)) {
230237
ck = list_last_entry(&bc->freed_nonpcpu, struct bkey_cached, list);
231238
list_del_init(&ck->list);
239+
bc->nr_freed_nonpcpu--;
232240
}
233241
mutex_unlock(&bc->lock);
234242
#endif
@@ -850,6 +858,8 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
850858
* Newest freed entries are at the end of the list - once we hit one
851859
* that's too new to be freed, we can bail out:
852860
*/
861+
scanned += bc->nr_freed_nonpcpu;
862+
853863
list_for_each_entry_safe(ck, t, &bc->freed_nonpcpu, list) {
854864
if (!poll_state_synchronize_srcu(&c->btree_trans_barrier,
855865
ck->btree_trans_barrier_seq))
@@ -859,13 +869,15 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
859869
six_lock_exit(&ck->c.lock);
860870
kmem_cache_free(bch2_key_cache, ck);
861871
atomic_long_dec(&bc->nr_freed);
862-
scanned++;
863872
freed++;
873+
bc->nr_freed_nonpcpu--;
864874
}
865875

866876
if (scanned >= nr)
867877
goto out;
868878

879+
scanned += bc->nr_freed_pcpu;
880+
869881
list_for_each_entry_safe(ck, t, &bc->freed_pcpu, list) {
870882
if (!poll_state_synchronize_srcu(&c->btree_trans_barrier,
871883
ck->btree_trans_barrier_seq))
@@ -875,8 +887,8 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
875887
six_lock_exit(&ck->c.lock);
876888
kmem_cache_free(bch2_key_cache, ck);
877889
atomic_long_dec(&bc->nr_freed);
878-
scanned++;
879890
freed++;
891+
bc->nr_freed_pcpu--;
880892
}
881893

882894
if (scanned >= nr)
@@ -982,6 +994,9 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
982994
}
983995
#endif
984996

997+
BUG_ON(list_count_nodes(&bc->freed_pcpu) != bc->nr_freed_pcpu);
998+
BUG_ON(list_count_nodes(&bc->freed_nonpcpu) != bc->nr_freed_nonpcpu);
999+
9851000
list_splice(&bc->freed_pcpu, &items);
9861001
list_splice(&bc->freed_nonpcpu, &items);
9871002

fs/bcachefs/btree_key_cache_types.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,12 @@ struct btree_key_cache {
1111
struct mutex lock;
1212
struct rhashtable table;
1313
bool table_init_done;
14+
1415
struct list_head freed_pcpu;
16+
size_t nr_freed_pcpu;
1517
struct list_head freed_nonpcpu;
18+
size_t nr_freed_nonpcpu;
19+
1620
struct shrinker *shrink;
1721
unsigned shrink_iter;
1822
struct btree_key_cache_freelist __percpu *pcpu_freed;

0 commit comments

Comments
 (0)