Skip to content

Commit adfe935

Browse files
author
Kent Overstreet
committed
bcachefs: Tweak btree key cache shrinker so it actually frees
Freeing key cache items is a multi stage process; we need to wait for an SRCU grace period to elapse, and we handle this ourselves - partially to avoid callback overhead, but primarily so that when allocating we can first allocate from the freed items waiting for an SRCU grace period. Previously, the shrinker was counting the items on the 'waiting for SRCU grace period' lists as items being scanned, but this meant that too many items waiting for an SRCU grace period could prevent it from doing any work at all. After this, we're seeing that items skipped due to the accessed bit are the main cause of the shrinker not making any progress, and we actually want the key cache shrinker to run quite aggressively because reclaimed items will still generally be found (more compactly) in the btree node cache - so we also tweak the shrinker to not count those against nr_to_scan. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
1 parent 6e4d9bd commit adfe935

1 file changed

Lines changed: 4 additions & 15 deletions

File tree

fs/bcachefs/btree_key_cache.c

Lines changed: 4 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -842,8 +842,6 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
842842
* Newest freed entries are at the end of the list - once we hit one
843843
* that's too new to be freed, we can bail out:
844844
*/
845-
scanned += bc->nr_freed_nonpcpu;
846-
847845
list_for_each_entry_safe(ck, t, &bc->freed_nonpcpu, list) {
848846
if (!poll_state_synchronize_srcu(&c->btree_trans_barrier,
849847
ck->btree_trans_barrier_seq))
@@ -857,11 +855,6 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
857855
bc->nr_freed_nonpcpu--;
858856
}
859857

860-
if (scanned >= nr)
861-
goto out;
862-
863-
scanned += bc->nr_freed_pcpu;
864-
865858
list_for_each_entry_safe(ck, t, &bc->freed_pcpu, list) {
866859
if (!poll_state_synchronize_srcu(&c->btree_trans_barrier,
867860
ck->btree_trans_barrier_seq))
@@ -875,9 +868,6 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
875868
bc->nr_freed_pcpu--;
876869
}
877870

878-
if (scanned >= nr)
879-
goto out;
880-
881871
rcu_read_lock();
882872
tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
883873
if (bc->shrink_iter >= tbl->size)
@@ -893,12 +883,12 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
893883
next = rht_dereference_bucket_rcu(pos->next, tbl, bc->shrink_iter);
894884
ck = container_of(pos, struct bkey_cached, hash);
895885

896-
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags))
886+
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
897887
goto next;
898-
899-
if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
888+
} else if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags)) {
900889
clear_bit(BKEY_CACHED_ACCESSED, &ck->flags);
901-
else if (bkey_cached_lock_for_evict(ck)) {
890+
goto next;
891+
} else if (bkey_cached_lock_for_evict(ck)) {
902892
bkey_cached_evict(bc, ck);
903893
bkey_cached_free(bc, ck);
904894
}
@@ -916,7 +906,6 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
916906
} while (scanned < nr && bc->shrink_iter != start);
917907

918908
rcu_read_unlock();
919-
out:
920909
memalloc_nofs_restore(flags);
921910
srcu_read_unlock(&c->btree_trans_barrier, srcu_idx);
922911
mutex_unlock(&bc->lock);

0 commit comments

Comments
 (0)