Skip to content

Commit e9eec6f

Browse files
LiBaokun96tytso
authored andcommitted
ext4: add ext4_try_lock_group() to skip busy groups
When ext4 allocates blocks, we used to just go through the block groups one by one to find a good one. But when there are tons of block groups (like hundreds of thousands or even millions) and not many have free space (meaning they're mostly full), it takes a really long time to check them all, and performance gets bad. So, we added the "mb_optimize_scan" mount option (which is on by default now). It keeps track of some group lists, so when we need a free block, we can just grab a likely group from the right list. This saves time and makes block allocation much faster. But when multiple processes or containers are doing similar things, like constantly allocating 8k blocks, they all try to use the same block group in the same list. Even just two processes doing this can cut the IOPS in half. For example, one container might do 300,000 IOPS, but if you run two at the same time, the total is only 150,000. Since we can already look at block groups in a non-linear way, the first and last groups in the same list are basically the same for finding a block right now. Therefore, add an ext4_try_lock_group() helper function to skip the current group when it is locked by another process, thereby avoiding contention with other processes. This helps ext4 make better use of having multiple block groups. Also, to make sure we don't skip all the groups that have free space when allocating blocks, we won't try to skip busy groups anymore when ac_criteria is CR_ANY_FREE. Performance test data follows: Test: Running will-it-scale/fallocate2 on CPU-bound containers. Observation: Average fallocate operations per container per second. |CPU: Kunpeng 920 | P80 | |Memory: 512GB |-------------------------| |960GB SSD (0.5GB/s)| base | patched | |-------------------|-------|-----------------| |mb_optimize_scan=0 | 2667 | 4821 (+80.7%) | |mb_optimize_scan=1 | 2643 | 4784 (+81.0%) | |CPU: AMD 9654 * 2 | P96 | |Memory: 1536GB |-------------------------| |960GB SSD (1GB/s) | base | patched | |-------------------|-------|-----------------| |mb_optimize_scan=0 | 3450 | 15371 (+345%) | |mb_optimize_scan=1 | 3209 | 6101 (+90.0%) | Signed-off-by: Baokun Li <libaokun1@huawei.com> Reviewed-by: Jan Kara <jack@suse.cz> Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com> Reviewed-by: Zhang Yi <yi.zhang@huawei.com> Link: https://patch.msgid.link/20250714130327.1830534-2-libaokun1@huawei.com Signed-off-by: Theodore Ts'o <tytso@mit.edu>
1 parent 82e6381 commit e9eec6f

2 files changed

Lines changed: 30 additions & 12 deletions

File tree

fs/ext4/ext4.h

Lines changed: 14 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -3541,23 +3541,28 @@ static inline int ext4_fs_is_busy(struct ext4_sb_info *sbi)
35413541
return (atomic_read(&sbi->s_lock_busy) > EXT4_CONTENTION_THRESHOLD);
35423542
}
35433543

3544+
static inline bool ext4_try_lock_group(struct super_block *sb, ext4_group_t group)
3545+
{
3546+
if (!spin_trylock(ext4_group_lock_ptr(sb, group)))
3547+
return false;
3548+
/*
3549+
* We're able to grab the lock right away, so drop the lock
3550+
* contention counter.
3551+
*/
3552+
atomic_add_unless(&EXT4_SB(sb)->s_lock_busy, -1, 0);
3553+
return true;
3554+
}
3555+
35443556
static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
35453557
{
3546-
spinlock_t *lock = ext4_group_lock_ptr(sb, group);
3547-
if (spin_trylock(lock))
3548-
/*
3549-
* We're able to grab the lock right away, so drop the
3550-
* lock contention counter.
3551-
*/
3552-
atomic_add_unless(&EXT4_SB(sb)->s_lock_busy, -1, 0);
3553-
else {
3558+
if (!ext4_try_lock_group(sb, group)) {
35543559
/*
35553560
* The lock is busy, so bump the contention counter,
35563561
* and then wait on the spin lock.
35573562
*/
35583563
atomic_add_unless(&EXT4_SB(sb)->s_lock_busy, 1,
35593564
EXT4_MAX_CONTENTION);
3560-
spin_lock(lock);
3565+
spin_lock(ext4_group_lock_ptr(sb, group));
35613566
}
35623567
}
35633568

fs/ext4/mballoc.c

Lines changed: 16 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -896,7 +896,8 @@ static void ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context
896896
bb_largest_free_order_node) {
897897
if (sbi->s_mb_stats)
898898
atomic64_inc(&sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED]);
899-
if (likely(ext4_mb_good_group(ac, iter->bb_group, CR_POWER2_ALIGNED))) {
899+
if (!spin_is_locked(ext4_group_lock_ptr(ac->ac_sb, iter->bb_group)) &&
900+
likely(ext4_mb_good_group(ac, iter->bb_group, CR_POWER2_ALIGNED))) {
900901
*group = iter->bb_group;
901902
ac->ac_flags |= EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED;
902903
read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
@@ -932,7 +933,8 @@ ext4_mb_find_good_group_avg_frag_lists(struct ext4_allocation_context *ac, int o
932933
list_for_each_entry(iter, frag_list, bb_avg_fragment_size_node) {
933934
if (sbi->s_mb_stats)
934935
atomic64_inc(&sbi->s_bal_cX_groups_considered[cr]);
935-
if (likely(ext4_mb_good_group(ac, iter->bb_group, cr))) {
936+
if (!spin_is_locked(ext4_group_lock_ptr(ac->ac_sb, iter->bb_group)) &&
937+
likely(ext4_mb_good_group(ac, iter->bb_group, cr))) {
936938
grp = iter;
937939
break;
938940
}
@@ -2899,6 +2901,11 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
28992901
nr, &prefetch_ios);
29002902
}
29012903

2904+
/* prevent unnecessary buddy loading. */
2905+
if (cr < CR_ANY_FREE &&
2906+
spin_is_locked(ext4_group_lock_ptr(sb, group)))
2907+
continue;
2908+
29022909
/* This now checks without needing the buddy page */
29032910
ret = ext4_mb_good_group_nolock(ac, group, cr);
29042911
if (ret <= 0) {
@@ -2911,7 +2918,13 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
29112918
if (err)
29122919
goto out;
29132920

2914-
ext4_lock_group(sb, group);
2921+
/* skip busy group */
2922+
if (cr >= CR_ANY_FREE) {
2923+
ext4_lock_group(sb, group);
2924+
} else if (!ext4_try_lock_group(sb, group)) {
2925+
ext4_mb_unload_buddy(&e4b);
2926+
continue;
2927+
}
29152928

29162929
/*
29172930
* We need to check again after locking the

0 commit comments

Comments
 (0)