@@ -1316,10 +1316,10 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
13161316 * should use the iteration structure like dm_table_supports_nowait() or
13171317 * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that
13181318 * uses an @anti_func that handle semantics of counter examples, e.g. not
1319- * capable of something. So: return !dm_table_any_dev_attr(t, anti_func);
1319+ * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data );
13201320 */
13211321static bool dm_table_any_dev_attr (struct dm_table * t ,
1322- iterate_devices_callout_fn func )
1322+ iterate_devices_callout_fn func , void * data )
13231323{
13241324 struct dm_target * ti ;
13251325 unsigned int i ;
@@ -1328,7 +1328,7 @@ static bool dm_table_any_dev_attr(struct dm_table *t,
13281328 ti = dm_table_get_target (t , i );
13291329
13301330 if (ti -> type -> iterate_devices &&
1331- ti -> type -> iterate_devices (ti , func , NULL ))
1331+ ti -> type -> iterate_devices (ti , func , data ))
13321332 return true;
13331333 }
13341334
@@ -1371,13 +1371,13 @@ bool dm_table_has_no_data_devices(struct dm_table *table)
13711371 return true;
13721372}
13731373
1374- static int device_is_zoned_model (struct dm_target * ti , struct dm_dev * dev ,
1375- sector_t start , sector_t len , void * data )
1374+ static int device_not_zoned_model (struct dm_target * ti , struct dm_dev * dev ,
1375+ sector_t start , sector_t len , void * data )
13761376{
13771377 struct request_queue * q = bdev_get_queue (dev -> bdev );
13781378 enum blk_zoned_model * zoned_model = data ;
13791379
1380- return q && blk_queue_zoned_model (q ) = = * zoned_model ;
1380+ return ! q || blk_queue_zoned_model (q ) ! = * zoned_model ;
13811381}
13821382
13831383static bool dm_table_supports_zoned_model (struct dm_table * t ,
@@ -1394,37 +1394,20 @@ static bool dm_table_supports_zoned_model(struct dm_table *t,
13941394 return false;
13951395
13961396 if (!ti -> type -> iterate_devices ||
1397- ! ti -> type -> iterate_devices (ti , device_is_zoned_model , & zoned_model ))
1397+ ti -> type -> iterate_devices (ti , device_not_zoned_model , & zoned_model ))
13981398 return false;
13991399 }
14001400
14011401 return true;
14021402}
14031403
1404- static int device_matches_zone_sectors (struct dm_target * ti , struct dm_dev * dev ,
1405- sector_t start , sector_t len , void * data )
1404+ static int device_not_matches_zone_sectors (struct dm_target * ti , struct dm_dev * dev ,
1405+ sector_t start , sector_t len , void * data )
14061406{
14071407 struct request_queue * q = bdev_get_queue (dev -> bdev );
14081408 unsigned int * zone_sectors = data ;
14091409
1410- return q && blk_queue_zone_sectors (q ) == * zone_sectors ;
1411- }
1412-
1413- static bool dm_table_matches_zone_sectors (struct dm_table * t ,
1414- unsigned int zone_sectors )
1415- {
1416- struct dm_target * ti ;
1417- unsigned i ;
1418-
1419- for (i = 0 ; i < dm_table_get_num_targets (t ); i ++ ) {
1420- ti = dm_table_get_target (t , i );
1421-
1422- if (!ti -> type -> iterate_devices ||
1423- !ti -> type -> iterate_devices (ti , device_matches_zone_sectors , & zone_sectors ))
1424- return false;
1425- }
1426-
1427- return true;
1410+ return !q || blk_queue_zone_sectors (q ) != * zone_sectors ;
14281411}
14291412
14301413static int validate_hardware_zoned_model (struct dm_table * table ,
@@ -1444,7 +1427,7 @@ static int validate_hardware_zoned_model(struct dm_table *table,
14441427 if (!zone_sectors || !is_power_of_2 (zone_sectors ))
14451428 return - EINVAL ;
14461429
1447- if (! dm_table_matches_zone_sectors (table , zone_sectors )) {
1430+ if (dm_table_any_dev_attr (table , device_not_matches_zone_sectors , & zone_sectors )) {
14481431 DMERR ("%s: zone sectors is not consistent across all devices" ,
14491432 dm_device_name (table -> md ));
14501433 return - EINVAL ;
@@ -1830,11 +1813,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
18301813 else
18311814 blk_queue_flag_clear (QUEUE_FLAG_DAX , q );
18321815
1833- if (dm_table_any_dev_attr (t , device_dax_write_cache_enabled ))
1816+ if (dm_table_any_dev_attr (t , device_dax_write_cache_enabled , NULL ))
18341817 dax_write_cache (t -> md -> dax_dev , true);
18351818
18361819 /* Ensure that all underlying devices are non-rotational. */
1837- if (dm_table_any_dev_attr (t , device_is_rotational ))
1820+ if (dm_table_any_dev_attr (t , device_is_rotational , NULL ))
18381821 blk_queue_flag_clear (QUEUE_FLAG_NONROT , q );
18391822 else
18401823 blk_queue_flag_set (QUEUE_FLAG_NONROT , q );
@@ -1853,7 +1836,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
18531836 * them as well. Only targets that support iterate_devices are considered:
18541837 * don't want error, zero, etc to require stable pages.
18551838 */
1856- if (dm_table_any_dev_attr (t , device_requires_stable_pages ))
1839+ if (dm_table_any_dev_attr (t , device_requires_stable_pages , NULL ))
18571840 blk_queue_flag_set (QUEUE_FLAG_STABLE_WRITES , q );
18581841 else
18591842 blk_queue_flag_clear (QUEUE_FLAG_STABLE_WRITES , q );
@@ -1864,7 +1847,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
18641847 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
18651848 * have it set.
18661849 */
1867- if (blk_queue_add_random (q ) && dm_table_any_dev_attr (t , device_is_not_random ))
1850+ if (blk_queue_add_random (q ) &&
1851+ dm_table_any_dev_attr (t , device_is_not_random , NULL ))
18681852 blk_queue_flag_clear (QUEUE_FLAG_ADD_RANDOM , q );
18691853
18701854 /*
0 commit comments