Skip to content

Commit 36c18b8

Browse files
author
Mike Snitzer
committed
dm bufio: prepare to intelligently size dm_buffer_cache's buffer_trees
Add num_locks member to dm_buffer_cache struct and use it rather than the NR_LOCKS magic value (64). Next commit will size the dm_buffer_cache's buffer_trees according to dm_num_hash_locks(). Signed-off-by: Mike Snitzer <snitzer@kernel.org>
1 parent 0bac3f2 commit 36c18b8

1 file changed

Lines changed: 26 additions & 22 deletions

File tree

drivers/md/dm-bufio.c

Lines changed: 26 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -380,45 +380,45 @@ struct dm_buffer {
380380
*/
381381

382382
#define NR_LOCKS 64
383-
#define LOCKS_MASK (NR_LOCKS - 1)
384383

385384
struct buffer_tree {
386385
struct rw_semaphore lock;
387386
struct rb_root root;
388387
} ____cacheline_aligned_in_smp;
389388

390389
struct dm_buffer_cache {
390+
struct lru lru[LIST_SIZE];
391391
/*
392392
* We spread entries across multiple trees to reduce contention
393393
* on the locks.
394394
*/
395+
unsigned int num_locks;
395396
struct buffer_tree trees[NR_LOCKS];
396-
struct lru lru[LIST_SIZE];
397397
};
398398

399-
static inline unsigned int cache_index(sector_t block)
399+
static inline unsigned int cache_index(sector_t block, unsigned int num_locks)
400400
{
401-
return block & LOCKS_MASK;
401+
return block & (num_locks - 1);
402402
}
403403

404404
static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block)
405405
{
406-
down_read(&bc->trees[cache_index(block)].lock);
406+
down_read(&bc->trees[cache_index(block, bc->num_locks)].lock);
407407
}
408408

409409
static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block)
410410
{
411-
up_read(&bc->trees[cache_index(block)].lock);
411+
up_read(&bc->trees[cache_index(block, bc->num_locks)].lock);
412412
}
413413

414414
static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block)
415415
{
416-
down_write(&bc->trees[cache_index(block)].lock);
416+
down_write(&bc->trees[cache_index(block, bc->num_locks)].lock);
417417
}
418418

419419
static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block)
420420
{
421-
up_write(&bc->trees[cache_index(block)].lock);
421+
up_write(&bc->trees[cache_index(block, bc->num_locks)].lock);
422422
}
423423

424424
/*
@@ -429,13 +429,15 @@ struct lock_history {
429429
struct dm_buffer_cache *cache;
430430
bool write;
431431
unsigned int previous;
432+
unsigned int no_previous;
432433
};
433434

434435
static void lh_init(struct lock_history *lh, struct dm_buffer_cache *cache, bool write)
435436
{
436437
lh->cache = cache;
437438
lh->write = write;
438-
lh->previous = NR_LOCKS; /* indicates no previous */
439+
lh->no_previous = cache->num_locks;
440+
lh->previous = lh->no_previous;
439441
}
440442

441443
static void __lh_lock(struct lock_history *lh, unsigned int index)
@@ -459,9 +461,9 @@ static void __lh_unlock(struct lock_history *lh, unsigned int index)
459461
*/
460462
static void lh_exit(struct lock_history *lh)
461463
{
462-
if (lh->previous != NR_LOCKS) {
464+
if (lh->previous != lh->no_previous) {
463465
__lh_unlock(lh, lh->previous);
464-
lh->previous = NR_LOCKS;
466+
lh->previous = lh->no_previous;
465467
}
466468
}
467469

@@ -471,9 +473,9 @@ static void lh_exit(struct lock_history *lh)
471473
*/
472474
static void lh_next(struct lock_history *lh, sector_t b)
473475
{
474-
unsigned int index = cache_index(b);
476+
unsigned int index = cache_index(b, lh->no_previous); /* no_previous is num_locks */
475477

476-
if (lh->previous != NR_LOCKS) {
478+
if (lh->previous != lh->no_previous) {
477479
if (lh->previous != index) {
478480
__lh_unlock(lh, lh->previous);
479481
__lh_lock(lh, index);
@@ -500,11 +502,13 @@ static struct dm_buffer *list_to_buffer(struct list_head *l)
500502
return le_to_buffer(le);
501503
}
502504

503-
static void cache_init(struct dm_buffer_cache *bc)
505+
static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks)
504506
{
505507
unsigned int i;
506508

507-
for (i = 0; i < NR_LOCKS; i++) {
509+
bc->num_locks = num_locks;
510+
511+
for (i = 0; i < bc->num_locks; i++) {
508512
init_rwsem(&bc->trees[i].lock);
509513
bc->trees[i].root = RB_ROOT;
510514
}
@@ -517,7 +521,7 @@ static void cache_destroy(struct dm_buffer_cache *bc)
517521
{
518522
unsigned int i;
519523

520-
for (i = 0; i < NR_LOCKS; i++)
524+
for (i = 0; i < bc->num_locks; i++)
521525
WARN_ON_ONCE(!RB_EMPTY_ROOT(&bc->trees[i].root));
522526

523527
lru_destroy(&bc->lru[LIST_CLEAN]);
@@ -576,7 +580,7 @@ static struct dm_buffer *cache_get(struct dm_buffer_cache *bc, sector_t block)
576580
struct dm_buffer *b;
577581

578582
cache_read_lock(bc, block);
579-
b = __cache_get(&bc->trees[cache_index(block)].root, block);
583+
b = __cache_get(&bc->trees[cache_index(block, bc->num_locks)].root, block);
580584
if (b) {
581585
lru_reference(&b->lru);
582586
__cache_inc_buffer(b);
@@ -650,7 +654,7 @@ static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode
650654

651655
b = le_to_buffer(le);
652656
/* __evict_pred will have locked the appropriate tree. */
653-
rb_erase(&b->node, &bc->trees[cache_index(b->block)].root);
657+
rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root);
654658

655659
return b;
656660
}
@@ -816,7 +820,7 @@ static bool cache_insert(struct dm_buffer_cache *bc, struct dm_buffer *b)
816820

817821
cache_write_lock(bc, b->block);
818822
BUG_ON(atomic_read(&b->hold_count) != 1);
819-
r = __cache_insert(&bc->trees[cache_index(b->block)].root, b);
823+
r = __cache_insert(&bc->trees[cache_index(b->block, bc->num_locks)].root, b);
820824
if (r)
821825
lru_insert(&bc->lru[b->list_mode], &b->lru);
822826
cache_write_unlock(bc, b->block);
@@ -842,7 +846,7 @@ static bool cache_remove(struct dm_buffer_cache *bc, struct dm_buffer *b)
842846
r = false;
843847
} else {
844848
r = true;
845-
rb_erase(&b->node, &bc->trees[cache_index(b->block)].root);
849+
rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root);
846850
lru_remove(&bc->lru[b->list_mode], &b->lru);
847851
}
848852

@@ -911,7 +915,7 @@ static void cache_remove_range(struct dm_buffer_cache *bc,
911915
{
912916
unsigned int i;
913917

914-
for (i = 0; i < NR_LOCKS; i++) {
918+
for (i = 0; i < bc->num_locks; i++) {
915919
down_write(&bc->trees[i].lock);
916920
__remove_range(bc, &bc->trees[i].root, begin, end, pred, release);
917921
up_write(&bc->trees[i].lock);
@@ -2432,7 +2436,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
24322436
r = -ENOMEM;
24332437
goto bad_client;
24342438
}
2435-
cache_init(&c->cache);
2439+
cache_init(&c->cache, NR_LOCKS);
24362440

24372441
c->bdev = bdev;
24382442
c->block_size = block_size;

0 commit comments

Comments
 (0)