@@ -1187,7 +1187,7 @@ int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
11871187 * offset.
11881188 */
11891189static int calculate_alloc_pointer (struct btrfs_block_group * cache ,
1190- u64 * offset_ret )
1190+ u64 * offset_ret , bool new )
11911191{
11921192 struct btrfs_fs_info * fs_info = cache -> fs_info ;
11931193 struct btrfs_root * root ;
@@ -1197,6 +1197,21 @@ static int calculate_alloc_pointer(struct btrfs_block_group *cache,
11971197 int ret ;
11981198 u64 length ;
11991199
1200+ /*
1201+ * Avoid tree lookups for a new block group, there's no use for it.
1202+ * It must always be 0.
1203+ *
1204+ * Also, we have a lock chain of extent buffer lock -> chunk mutex.
1205+ * For new a block group, this function is called from
1206+ * btrfs_make_block_group() which is already taking the chunk mutex.
1207+ * Thus, we cannot call calculate_alloc_pointer() which takes extent
1208+ * buffer locks to avoid deadlock.
1209+ */
1210+ if (new ) {
1211+ * offset_ret = 0 ;
1212+ return 0 ;
1213+ }
1214+
12001215 path = btrfs_alloc_path ();
12011216 if (!path )
12021217 return - ENOMEM ;
@@ -1332,6 +1347,13 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
13321347 else
13331348 num_conventional ++ ;
13341349
1350+ /*
1351+ * Consider a zone as active if we can allow any number of
1352+ * active zones.
1353+ */
1354+ if (!device -> zone_info -> max_active_zones )
1355+ __set_bit (i , active );
1356+
13351357 if (!is_sequential ) {
13361358 alloc_offsets [i ] = WP_CONVENTIONAL ;
13371359 continue ;
@@ -1398,45 +1420,23 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
13981420 __set_bit (i , active );
13991421 break ;
14001422 }
1401-
1402- /*
1403- * Consider a zone as active if we can allow any number of
1404- * active zones.
1405- */
1406- if (!device -> zone_info -> max_active_zones )
1407- __set_bit (i , active );
14081423 }
14091424
14101425 if (num_sequential > 0 )
14111426 cache -> seq_zone = true;
14121427
14131428 if (num_conventional > 0 ) {
1414- /*
1415- * Avoid calling calculate_alloc_pointer() for new BG. It
1416- * is no use for new BG. It must be always 0.
1417- *
1418- * Also, we have a lock chain of extent buffer lock ->
1419- * chunk mutex. For new BG, this function is called from
1420- * btrfs_make_block_group() which is already taking the
1421- * chunk mutex. Thus, we cannot call
1422- * calculate_alloc_pointer() which takes extent buffer
1423- * locks to avoid deadlock.
1424- */
1425-
14261429 /* Zone capacity is always zone size in emulation */
14271430 cache -> zone_capacity = cache -> length ;
1428- if (new ) {
1429- cache -> alloc_offset = 0 ;
1430- goto out ;
1431- }
1432- ret = calculate_alloc_pointer (cache , & last_alloc );
1433- if (ret || map -> num_stripes == num_conventional ) {
1434- if (!ret )
1435- cache -> alloc_offset = last_alloc ;
1436- else
1437- btrfs_err (fs_info ,
1431+ ret = calculate_alloc_pointer (cache , & last_alloc , new );
1432+ if (ret ) {
1433+ btrfs_err (fs_info ,
14381434 "zoned: failed to determine allocation offset of bg %llu" ,
1439- cache -> start );
1435+ cache -> start );
1436+ goto out ;
1437+ } else if (map -> num_stripes == num_conventional ) {
1438+ cache -> alloc_offset = last_alloc ;
1439+ cache -> zone_is_active = 1 ;
14401440 goto out ;
14411441 }
14421442 }
@@ -1504,13 +1504,6 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
15041504 goto out ;
15051505 }
15061506
1507- if (cache -> zone_is_active ) {
1508- btrfs_get_block_group (cache );
1509- spin_lock (& fs_info -> zone_active_bgs_lock );
1510- list_add_tail (& cache -> active_bg_list , & fs_info -> zone_active_bgs );
1511- spin_unlock (& fs_info -> zone_active_bgs_lock );
1512- }
1513-
15141507out :
15151508 if (cache -> alloc_offset > fs_info -> zone_size ) {
15161509 btrfs_err (fs_info ,
@@ -1535,10 +1528,16 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
15351528 ret = - EIO ;
15361529 }
15371530
1538- if (!ret )
1531+ if (!ret ) {
15391532 cache -> meta_write_pointer = cache -> alloc_offset + cache -> start ;
1540-
1541- if (ret ) {
1533+ if (cache -> zone_is_active ) {
1534+ btrfs_get_block_group (cache );
1535+ spin_lock (& fs_info -> zone_active_bgs_lock );
1536+ list_add_tail (& cache -> active_bg_list ,
1537+ & fs_info -> zone_active_bgs );
1538+ spin_unlock (& fs_info -> zone_active_bgs_lock );
1539+ }
1540+ } else {
15421541 kfree (cache -> physical_map );
15431542 cache -> physical_map = NULL ;
15441543 }
0 commit comments