Skip to content

Commit 4a78426

Browse files
Coly Liaxboe
authored andcommitted
bcache: remove embedded struct cache_sb from struct cache_set
Since bcache code was merged into mainline kerrnel, each cache set only as one single cache in it. The multiple caches framework is here but the code is far from completed. Considering the multiple copies of cached data can also be stored on e.g. md raid1 devices, it is unnecessary to support multiple caches in one cache set indeed. The previous preparation patches fix the dependencies of explicitly making a cache set only have single cache. Now we don't have to maintain an embedded partial super block in struct cache_set, the in-memory super block can be directly referenced from struct cache. This patch removes the embedded struct cache_sb from struct cache_set, and fixes all locations where the superb lock was referenced from this removed super block by referencing the in-memory super block of struct cache. Signed-off-by: Coly Li <colyli@suse.de> Reviewed-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 6f9414e commit 4a78426

11 files changed

Lines changed: 46 additions & 59 deletions

File tree

drivers/md/bcache/alloc.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
8787
{
8888
struct cache *ca;
8989
struct bucket *b;
90-
unsigned long next = c->nbuckets * c->sb.bucket_size / 1024;
90+
unsigned long next = c->nbuckets * c->cache->sb.bucket_size / 1024;
9191
int r;
9292

9393
atomic_sub(sectors, &c->rescale);
@@ -583,7 +583,7 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c,
583583
struct open_bucket, list);
584584
found:
585585
if (!ret->sectors_free && KEY_PTRS(alloc)) {
586-
ret->sectors_free = c->sb.bucket_size;
586+
ret->sectors_free = c->cache->sb.bucket_size;
587587
bkey_copy(&ret->key, alloc);
588588
bkey_init(alloc);
589589
}
@@ -677,7 +677,7 @@ bool bch_alloc_sectors(struct cache_set *c,
677677
&PTR_CACHE(c, &b->key, i)->sectors_written);
678678
}
679679

680-
if (b->sectors_free < c->sb.block_size)
680+
if (b->sectors_free < c->cache->sb.block_size)
681681
b->sectors_free = 0;
682682

683683
/*

drivers/md/bcache/bcache.h

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -517,8 +517,6 @@ struct cache_set {
517517
atomic_t idle_counter;
518518
atomic_t at_max_writeback_rate;
519519

520-
struct cache_sb sb;
521-
522520
struct cache *cache;
523521

524522
struct bcache_device **devices;
@@ -799,7 +797,7 @@ static inline sector_t bucket_to_sector(struct cache_set *c, size_t b)
799797

800798
static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
801799
{
802-
return s & (c->sb.bucket_size - 1);
800+
return s & (c->cache->sb.bucket_size - 1);
803801
}
804802

805803
static inline struct cache *PTR_CACHE(struct cache_set *c,

drivers/md/bcache/btree.c

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ static void bch_btree_init_next(struct btree *b)
117117

118118
if (b->written < btree_blocks(b))
119119
bch_bset_init_next(&b->keys, write_block(b),
120-
bset_magic(&b->c->sb));
120+
bset_magic(&b->c->cache->sb));
121121

122122
}
123123

@@ -155,7 +155,7 @@ void bch_btree_node_read_done(struct btree *b)
155155
* See the comment arount cache_set->fill_iter.
156156
*/
157157
iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
158-
iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
158+
iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
159159
iter->used = 0;
160160

161161
#ifdef CONFIG_BCACHE_DEBUG
@@ -178,7 +178,7 @@ void bch_btree_node_read_done(struct btree *b)
178178
goto err;
179179

180180
err = "bad magic";
181-
if (i->magic != bset_magic(&b->c->sb))
181+
if (i->magic != bset_magic(&b->c->cache->sb))
182182
goto err;
183183

184184
err = "bad checksum";
@@ -219,7 +219,7 @@ void bch_btree_node_read_done(struct btree *b)
219219

220220
if (b->written < btree_blocks(b))
221221
bch_bset_init_next(&b->keys, write_block(b),
222-
bset_magic(&b->c->sb));
222+
bset_magic(&b->c->cache->sb));
223223
out:
224224
mempool_free(iter, &b->c->fill_iter);
225225
return;
@@ -423,7 +423,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)
423423

424424
do_btree_node_write(b);
425425

426-
atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->sb.block_size,
426+
atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size,
427427
&PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
428428

429429
b->written += set_blocks(i, block_bytes(b->c->cache));
@@ -738,7 +738,7 @@ void bch_btree_cache_free(struct cache_set *c)
738738
if (c->verify_data)
739739
list_move(&c->verify_data->list, &c->btree_cache);
740740

741-
free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->sb)));
741+
free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->cache->sb)));
742742
#endif
743743

744744
list_splice(&c->btree_cache_freeable,
@@ -785,7 +785,8 @@ int bch_btree_cache_alloc(struct cache_set *c)
785785
mutex_init(&c->verify_lock);
786786

787787
c->verify_ondisk = (void *)
788-
__get_free_pages(GFP_KERNEL|__GFP_COMP, ilog2(meta_bucket_pages(&c->sb)));
788+
__get_free_pages(GFP_KERNEL|__GFP_COMP,
789+
ilog2(meta_bucket_pages(&c->cache->sb)));
789790
if (!c->verify_ondisk) {
790791
/*
791792
* Don't worry about the mca_rereserve buckets
@@ -1108,7 +1109,7 @@ struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
11081109
}
11091110

11101111
b->parent = parent;
1111-
bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
1112+
bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb));
11121113

11131114
mutex_unlock(&c->bucket_lock);
11141115

drivers/md/bcache/btree.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -194,7 +194,7 @@ static inline unsigned int bset_block_offset(struct btree *b, struct bset *i)
194194

195195
static inline void set_gc_sectors(struct cache_set *c)
196196
{
197-
atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16);
197+
atomic_set(&c->sectors_to_gc, c->cache->sb.bucket_size * c->nbuckets / 16);
198198
}
199199

200200
void bkey_put(struct cache_set *c, struct bkey *k);

drivers/md/bcache/extents.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
5454
size_t bucket = PTR_BUCKET_NR(c, k, i);
5555
size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
5656

57-
if (KEY_SIZE(k) + r > c->sb.bucket_size ||
57+
if (KEY_SIZE(k) + r > c->cache->sb.bucket_size ||
5858
bucket < ca->sb.first_bucket ||
5959
bucket >= ca->sb.nbuckets)
6060
return true;
@@ -75,7 +75,7 @@ static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
7575
size_t bucket = PTR_BUCKET_NR(c, k, i);
7676
size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
7777

78-
if (KEY_SIZE(k) + r > c->sb.bucket_size)
78+
if (KEY_SIZE(k) + r > c->cache->sb.bucket_size)
7979
return "bad, length too big";
8080
if (bucket < ca->sb.first_bucket)
8181
return "bad, short offset";
@@ -136,7 +136,7 @@ static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
136136
size_t n = PTR_BUCKET_NR(b->c, k, j);
137137

138138
pr_cont(" bucket %zu", n);
139-
if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
139+
if (n >= b->c->cache->sb.first_bucket && n < b->c->cache->sb.nbuckets)
140140
pr_cont(" prio %i",
141141
PTR_BUCKET(b->c, k, j)->prio);
142142
}

drivers/md/bcache/features.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ static struct feature feature_list[] = {
3030
for (f = &feature_list[0]; f->compat != 0; f++) { \
3131
if (f->compat != BCH_FEATURE_ ## type) \
3232
continue; \
33-
if (BCH_HAS_ ## type ## _FEATURE(&c->sb, f->mask)) { \
33+
if (BCH_HAS_ ## type ## _FEATURE(&c->cache->sb, f->mask)) { \
3434
if (first) { \
3535
out += snprintf(out, buf + size - out, \
3636
"["); \
@@ -44,7 +44,7 @@ static struct feature feature_list[] = {
4444
\
4545
out += snprintf(out, buf + size - out, "%s", f->string);\
4646
\
47-
if (BCH_HAS_ ## type ## _FEATURE(&c->sb, f->mask)) \
47+
if (BCH_HAS_ ## type ## _FEATURE(&c->cache->sb, f->mask)) \
4848
out += snprintf(out, buf + size - out, "]"); \
4949
\
5050
first = false; \

drivers/md/bcache/io.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ struct bio *bch_bbio_alloc(struct cache_set *c)
2626
struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
2727
struct bio *bio = &b->bio;
2828

29-
bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->sb));
29+
bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->cache->sb));
3030

3131
return bio;
3232
}

drivers/md/bcache/journal.c

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -666,7 +666,7 @@ static void journal_reclaim(struct cache_set *c)
666666

667667
bkey_init(k);
668668
SET_KEY_PTRS(k, 1);
669-
c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
669+
c->journal.blocks_free = ca->sb.bucket_size >> c->block_bits;
670670

671671
out:
672672
if (!journal_full(&c->journal))
@@ -735,7 +735,7 @@ static void journal_write_unlocked(struct closure *cl)
735735
struct journal_write *w = c->journal.cur;
736736
struct bkey *k = &c->journal.key;
737737
unsigned int i, sectors = set_blocks(w->data, block_bytes(ca)) *
738-
c->sb.block_size;
738+
ca->sb.block_size;
739739

740740
struct bio *bio;
741741
struct bio_list list;
@@ -762,7 +762,7 @@ static void journal_write_unlocked(struct closure *cl)
762762
bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
763763

764764
w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
765-
w->data->magic = jset_magic(&c->sb);
765+
w->data->magic = jset_magic(&ca->sb);
766766
w->data->version = BCACHE_JSET_VERSION;
767767
w->data->last_seq = last_seq(&c->journal);
768768
w->data->csum = csum_set(w->data);
@@ -838,6 +838,7 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
838838
size_t sectors;
839839
struct closure cl;
840840
bool wait = false;
841+
struct cache *ca = c->cache;
841842

842843
closure_init_stack(&cl);
843844

@@ -847,10 +848,10 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
847848
struct journal_write *w = c->journal.cur;
848849

849850
sectors = __set_blocks(w->data, w->data->keys + nkeys,
850-
block_bytes(c->cache)) * c->sb.block_size;
851+
block_bytes(ca)) * ca->sb.block_size;
851852

852853
if (sectors <= min_t(size_t,
853-
c->journal.blocks_free * c->sb.block_size,
854+
c->journal.blocks_free * ca->sb.block_size,
854855
PAGE_SECTORS << JSET_BITS))
855856
return w;
856857

drivers/md/bcache/request.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -394,8 +394,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
394394
goto skip;
395395
}
396396

397-
if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
398-
bio_sectors(bio) & (c->sb.block_size - 1)) {
397+
if (bio->bi_iter.bi_sector & (c->cache->sb.block_size - 1) ||
398+
bio_sectors(bio) & (c->cache->sb.block_size - 1)) {
399399
pr_debug("skipping unaligned io\n");
400400
goto skip;
401401
}

drivers/md/bcache/super.c

Lines changed: 17 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -350,16 +350,10 @@ void bcache_write_super(struct cache_set *c)
350350
down(&c->sb_write_mutex);
351351
closure_init(cl, &c->cl);
352352

353-
c->sb.seq++;
353+
ca->sb.seq++;
354354

355-
if (c->sb.version > version)
356-
version = c->sb.version;
357-
358-
ca->sb.version = version;
359-
ca->sb.seq = c->sb.seq;
360-
ca->sb.last_mount = c->sb.last_mount;
361-
362-
SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
355+
if (ca->sb.version < version)
356+
ca->sb.version = version;
363357

364358
bio_init(bio, ca->sb_bv, 1);
365359
bio_set_dev(bio, ca->bdev);
@@ -477,7 +471,7 @@ static int __uuid_write(struct cache_set *c)
477471
{
478472
BKEY_PADDED(key) k;
479473
struct closure cl;
480-
struct cache *ca;
474+
struct cache *ca = c->cache;
481475
unsigned int size;
482476

483477
closure_init_stack(&cl);
@@ -486,13 +480,12 @@ static int __uuid_write(struct cache_set *c)
486480
if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true))
487481
return 1;
488482

489-
size = meta_bucket_pages(&c->sb) * PAGE_SECTORS;
483+
size = meta_bucket_pages(&ca->sb) * PAGE_SECTORS;
490484
SET_KEY_SIZE(&k.key, size);
491485
uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
492486
closure_sync(&cl);
493487

494488
/* Only one bucket used for uuid write */
495-
ca = PTR_CACHE(c, &k.key, 0);
496489
atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written);
497490

498491
bkey_copy(&c->uuid_bucket, &k.key);
@@ -1205,7 +1198,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
12051198
return -EINVAL;
12061199
}
12071200

1208-
if (dc->sb.block_size < c->sb.block_size) {
1201+
if (dc->sb.block_size < c->cache->sb.block_size) {
12091202
/* Will die */
12101203
pr_err("Couldn't attach %s: block size less than set's block size\n",
12111204
dc->backing_dev_name);
@@ -1663,15 +1656,16 @@ static void cache_set_free(struct closure *cl)
16631656
bch_journal_free(c);
16641657

16651658
mutex_lock(&bch_register_lock);
1659+
bch_bset_sort_state_free(&c->sort);
1660+
free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->cache->sb)));
1661+
16661662
ca = c->cache;
16671663
if (ca) {
16681664
ca->set = NULL;
16691665
c->cache = NULL;
16701666
kobject_put(&ca->kobj);
16711667
}
16721668

1673-
bch_bset_sort_state_free(&c->sort);
1674-
free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->sb)));
16751669

16761670
if (c->moving_gc_wq)
16771671
destroy_workqueue(c->moving_gc_wq);
@@ -1837,6 +1831,7 @@ void bch_cache_set_unregister(struct cache_set *c)
18371831
struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
18381832
{
18391833
int iter_size;
1834+
struct cache *ca = container_of(sb, struct cache, sb);
18401835
struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
18411836

18421837
if (!c)
@@ -1859,23 +1854,15 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
18591854
bch_cache_accounting_init(&c->accounting, &c->cl);
18601855

18611856
memcpy(c->set_uuid, sb->set_uuid, 16);
1862-
c->sb.block_size = sb->block_size;
1863-
c->sb.bucket_size = sb->bucket_size;
1864-
c->sb.nr_in_set = sb->nr_in_set;
1865-
c->sb.last_mount = sb->last_mount;
1866-
c->sb.version = sb->version;
1867-
if (c->sb.version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
1868-
c->sb.feature_compat = sb->feature_compat;
1869-
c->sb.feature_ro_compat = sb->feature_ro_compat;
1870-
c->sb.feature_incompat = sb->feature_incompat;
1871-
}
18721857

1858+
c->cache = ca;
1859+
c->cache->set = c;
18731860
c->bucket_bits = ilog2(sb->bucket_size);
18741861
c->block_bits = ilog2(sb->block_size);
1875-
c->nr_uuids = meta_bucket_bytes(&c->sb) / sizeof(struct uuid_entry);
1862+
c->nr_uuids = meta_bucket_bytes(sb) / sizeof(struct uuid_entry);
18761863
c->devices_max_used = 0;
18771864
atomic_set(&c->attached_dev_nr, 0);
1878-
c->btree_pages = meta_bucket_pages(&c->sb);
1865+
c->btree_pages = meta_bucket_pages(sb);
18791866
if (c->btree_pages > BTREE_MAX_PAGES)
18801867
c->btree_pages = max_t(int, c->btree_pages / 4,
18811868
BTREE_MAX_PAGES);
@@ -1913,7 +1900,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
19131900

19141901
if (mempool_init_kmalloc_pool(&c->bio_meta, 2,
19151902
sizeof(struct bbio) +
1916-
sizeof(struct bio_vec) * meta_bucket_pages(&c->sb)))
1903+
sizeof(struct bio_vec) * meta_bucket_pages(sb)))
19171904
goto err;
19181905

19191906
if (mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size))
@@ -1923,7 +1910,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
19231910
BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
19241911
goto err;
19251912

1926-
c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, &c->sb);
1913+
c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, sb);
19271914
if (!c->uuids)
19281915
goto err;
19291916

@@ -2103,7 +2090,7 @@ static int run_cache_set(struct cache_set *c)
21032090
goto err;
21042091

21052092
closure_sync(&cl);
2106-
c->sb.last_mount = (u32)ktime_get_real_seconds();
2093+
c->cache->sb.last_mount = (u32)ktime_get_real_seconds();
21072094
bcache_write_super(c);
21082095

21092096
list_for_each_entry_safe(dc, t, &uncached_devices, list)

0 commit comments

Comments
 (0)