Skip to content

Commit 0ad55fa

Browse files
LiBaokun96tytso
authored andcommitted
ext4: support large block size in ext4_mb_init_cache()
Currently, ext4_mb_init_cache() uses blocks_per_page to calculate the folio index and offset. However, when blocksize is larger than PAGE_SIZE, blocks_per_page becomes zero, leading to a potential division-by-zero bug. Since we now have the folio, we know its exact size. This allows us to convert {blocks, groups}_per_page to {blocks, groups}_per_folio, thus supporting block sizes greater than page size. Signed-off-by: Baokun Li <libaokun1@huawei.com> Reviewed-by: Zhang Yi <yi.zhang@huawei.com> Reviewed-by: Jan Kara <jack@suse.cz> Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com> Message-ID: <20251121090654.631996-14-libaokun@huaweicloud.com> Signed-off-by: Theodore Ts'o <tytso@mit.edu>
1 parent 3938fc2 commit 0ad55fa

1 file changed

Lines changed: 20 additions & 24 deletions

File tree

fs/ext4/mballoc.c

Lines changed: 20 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1346,26 +1346,25 @@ static void mb_regenerate_buddy(struct ext4_buddy *e4b)
13461346
* block bitmap and buddy information. The information are
13471347
* stored in the inode as
13481348
*
1349-
* { page }
1349+
* { folio }
13501350
* [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
13511351
*
13521352
*
13531353
* one block each for bitmap and buddy information.
1354-
* So for each group we take up 2 blocks. A page can
1355-
* contain blocks_per_page (PAGE_SIZE / blocksize) blocks.
1356-
* So it can have information regarding groups_per_page which
1357-
* is blocks_per_page/2
1354+
* So for each group we take up 2 blocks. A folio can
1355+
* contain blocks_per_folio (folio_size / blocksize) blocks.
1356+
* So it can have information regarding groups_per_folio which
1357+
* is blocks_per_folio/2
13581358
*
13591359
* Locking note: This routine takes the block group lock of all groups
1360-
* for this page; do not hold this lock when calling this routine!
1360+
* for this folio; do not hold this lock when calling this routine!
13611361
*/
1362-
13631362
static int ext4_mb_init_cache(struct folio *folio, char *incore, gfp_t gfp)
13641363
{
13651364
ext4_group_t ngroups;
13661365
unsigned int blocksize;
1367-
int blocks_per_page;
1368-
int groups_per_page;
1366+
int blocks_per_folio;
1367+
int groups_per_folio;
13691368
int err = 0;
13701369
int i;
13711370
ext4_group_t first_group, group;
@@ -1382,35 +1381,32 @@ static int ext4_mb_init_cache(struct folio *folio, char *incore, gfp_t gfp)
13821381
sb = inode->i_sb;
13831382
ngroups = ext4_get_groups_count(sb);
13841383
blocksize = i_blocksize(inode);
1385-
blocks_per_page = PAGE_SIZE / blocksize;
1384+
blocks_per_folio = folio_size(folio) / blocksize;
1385+
WARN_ON_ONCE(!blocks_per_folio);
1386+
groups_per_folio = DIV_ROUND_UP(blocks_per_folio, 2);
13861387

13871388
mb_debug(sb, "init folio %lu\n", folio->index);
13881389

1389-
groups_per_page = blocks_per_page >> 1;
1390-
if (groups_per_page == 0)
1391-
groups_per_page = 1;
1392-
13931390
/* allocate buffer_heads to read bitmaps */
1394-
if (groups_per_page > 1) {
1395-
i = sizeof(struct buffer_head *) * groups_per_page;
1391+
if (groups_per_folio > 1) {
1392+
i = sizeof(struct buffer_head *) * groups_per_folio;
13961393
bh = kzalloc(i, gfp);
13971394
if (bh == NULL)
13981395
return -ENOMEM;
13991396
} else
14001397
bh = &bhs;
14011398

1402-
first_group = folio->index * blocks_per_page / 2;
1403-
14041399
/* read all groups the folio covers into the cache */
1405-
for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1400+
first_group = EXT4_PG_TO_LBLK(inode, folio->index) / 2;
1401+
for (i = 0, group = first_group; i < groups_per_folio; i++, group++) {
14061402
if (group >= ngroups)
14071403
break;
14081404

14091405
grinfo = ext4_get_group_info(sb, group);
14101406
if (!grinfo)
14111407
continue;
14121408
/*
1413-
* If page is uptodate then we came here after online resize
1409+
* If folio is uptodate then we came here after online resize
14141410
* which added some new uninitialized group info structs, so
14151411
* we must skip all initialized uptodate buddies on the folio,
14161412
* which may be currently in use by an allocating task.
@@ -1430,7 +1426,7 @@ static int ext4_mb_init_cache(struct folio *folio, char *incore, gfp_t gfp)
14301426
}
14311427

14321428
/* wait for I/O completion */
1433-
for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1429+
for (i = 0, group = first_group; i < groups_per_folio; i++, group++) {
14341430
int err2;
14351431

14361432
if (!bh[i])
@@ -1440,8 +1436,8 @@ static int ext4_mb_init_cache(struct folio *folio, char *incore, gfp_t gfp)
14401436
err = err2;
14411437
}
14421438

1443-
first_block = folio->index * blocks_per_page;
1444-
for (i = 0; i < blocks_per_page; i++) {
1439+
first_block = EXT4_PG_TO_LBLK(inode, folio->index);
1440+
for (i = 0; i < blocks_per_folio; i++) {
14451441
group = (first_block + i) >> 1;
14461442
if (group >= ngroups)
14471443
break;
@@ -1518,7 +1514,7 @@ static int ext4_mb_init_cache(struct folio *folio, char *incore, gfp_t gfp)
15181514

15191515
out:
15201516
if (bh) {
1521-
for (i = 0; i < groups_per_page; i++)
1517+
for (i = 0; i < groups_per_folio; i++)
15221518
brelse(bh[i]);
15231519
if (bh != &bhs)
15241520
kfree(bh);

0 commit comments

Comments
 (0)