@@ -1787,40 +1787,27 @@ static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head)
17871787 return true;
17881788}
17891789
1790- static void nvme_config_discard (struct nvme_ctrl * ctrl , struct gendisk * disk ,
1791- struct nvme_ns_head * head )
1790+ static void nvme_config_discard (struct nvme_ns * ns , struct queue_limits * lim )
17921791{
1793- struct request_queue * queue = disk -> queue ;
1794- u32 max_discard_sectors ;
1795-
1796- if (ctrl -> dmrsl && ctrl -> dmrsl <= nvme_sect_to_lba (head , UINT_MAX )) {
1797- max_discard_sectors = nvme_lba_to_sect (head , ctrl -> dmrsl );
1798- } else if (ctrl -> oncs & NVME_CTRL_ONCS_DSM ) {
1799- max_discard_sectors = UINT_MAX ;
1800- } else {
1801- blk_queue_max_discard_sectors (queue , 0 );
1802- return ;
1803- }
1792+ struct nvme_ctrl * ctrl = ns -> ctrl ;
18041793
18051794 BUILD_BUG_ON (PAGE_SIZE / sizeof (struct nvme_dsm_range ) <
18061795 NVME_DSM_MAX_RANGES );
18071796
1808- /*
1809- * If discard is already enabled, don't reset queue limits.
1810- *
1811- * This works around the fact that the block layer can't cope well with
1812- * updating the hardware limits when overridden through sysfs. This is
1813- * harmless because discard limits in NVMe are purely advisory.
1814- */
1815- if ( queue -> limits . max_discard_sectors )
1816- return ;
1797+ if ( ctrl -> dmrsl && ctrl -> dmrsl <= nvme_sect_to_lba ( ns -> head , UINT_MAX ))
1798+ lim -> max_hw_discard_sectors =
1799+ nvme_lba_to_sect ( ns -> head , ctrl -> dmrsl );
1800+ else if ( ctrl -> oncs & NVME_CTRL_ONCS_DSM )
1801+ lim -> max_hw_discard_sectors = UINT_MAX ;
1802+ else
1803+ lim -> max_hw_discard_sectors = 0 ;
1804+
1805+ lim -> discard_granularity = lim -> logical_block_size ;
18171806
1818- blk_queue_max_discard_sectors (queue , max_discard_sectors );
18191807 if (ctrl -> dmrl )
1820- blk_queue_max_discard_segments ( queue , ctrl -> dmrl ) ;
1808+ lim -> max_discard_segments = ctrl -> dmrl ;
18211809 else
1822- blk_queue_max_discard_segments (queue , NVME_DSM_MAX_RANGES );
1823- queue -> limits .discard_granularity = queue_logical_block_size (queue );
1810+ lim -> max_discard_segments = NVME_DSM_MAX_RANGES ;
18241811}
18251812
18261813static bool nvme_ns_ids_equal (struct nvme_ns_ids * a , struct nvme_ns_ids * b )
@@ -1942,20 +1929,21 @@ static u32 nvme_max_drv_segments(struct nvme_ctrl *ctrl)
19421929 return ctrl -> max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> SECTOR_SHIFT ) + 1 ;
19431930}
19441931
1945- static void nvme_set_queue_limits (struct nvme_ctrl * ctrl ,
1946- struct request_queue * q )
1932+ static void nvme_set_ctrl_limits (struct nvme_ctrl * ctrl ,
1933+ struct queue_limits * lim )
19471934{
1948- blk_queue_max_hw_sectors (q , ctrl -> max_hw_sectors );
1949- blk_queue_max_segments (q , min_t (u32 , USHRT_MAX ,
1950- min_not_zero (nvme_max_drv_segments (ctrl ), ctrl -> max_segments )));
1951- blk_queue_max_integrity_segments (q , ctrl -> max_integrity_segments );
1952- blk_queue_virt_boundary (q , NVME_CTRL_PAGE_SIZE - 1 );
1953- blk_queue_dma_alignment (q , 3 );
1935+ lim -> max_hw_sectors = ctrl -> max_hw_sectors ;
1936+ lim -> max_segments = min_t (u32 , USHRT_MAX ,
1937+ min_not_zero (nvme_max_drv_segments (ctrl ), ctrl -> max_segments ));
1938+ lim -> max_integrity_segments = ctrl -> max_integrity_segments ;
1939+ lim -> virt_boundary_mask = NVME_CTRL_PAGE_SIZE - 1 ;
1940+ lim -> max_segment_size = UINT_MAX ;
1941+ lim -> dma_alignment = 3 ;
19541942}
19551943
1956- static bool nvme_update_disk_info (struct nvme_ns * ns , struct nvme_id_ns * id )
1944+ static bool nvme_update_disk_info (struct nvme_ns * ns , struct nvme_id_ns * id ,
1945+ struct queue_limits * lim )
19571946{
1958- struct gendisk * disk = ns -> disk ;
19591947 struct nvme_ns_head * head = ns -> head ;
19601948 u32 bs = 1U << head -> lba_shift ;
19611949 u32 atomic_bs , phys_bs , io_opt = 0 ;
@@ -1991,23 +1979,19 @@ static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id)
19911979 io_opt = bs * (1 + le16_to_cpu (id -> nows ));
19921980 }
19931981
1994- blk_queue_logical_block_size (disk -> queue , bs );
19951982 /*
19961983 * Linux filesystems assume writing a single physical block is
19971984 * an atomic operation. Hence limit the physical block size to the
19981985 * value of the Atomic Write Unit Power Fail parameter.
19991986 */
2000- blk_queue_physical_block_size (disk -> queue , min (phys_bs , atomic_bs ));
2001- blk_queue_io_min (disk -> queue , phys_bs );
2002- blk_queue_io_opt (disk -> queue , io_opt );
2003-
2004- nvme_config_discard (ns -> ctrl , disk , head );
2005-
1987+ lim -> logical_block_size = bs ;
1988+ lim -> physical_block_size = min (phys_bs , atomic_bs );
1989+ lim -> io_min = phys_bs ;
1990+ lim -> io_opt = io_opt ;
20061991 if (ns -> ctrl -> quirks & NVME_QUIRK_DEALLOCATE_ZEROES )
2007- blk_queue_max_write_zeroes_sectors ( disk -> queue , UINT_MAX ) ;
1992+ lim -> max_write_zeroes_sectors = UINT_MAX ;
20081993 else
2009- blk_queue_max_write_zeroes_sectors (disk -> queue ,
2010- ns -> ctrl -> max_zeroes_sectors );
1994+ lim -> max_write_zeroes_sectors = ns -> ctrl -> max_zeroes_sectors ;
20111995 return valid ;
20121996}
20131997
@@ -2022,7 +2006,8 @@ static inline bool nvme_first_scan(struct gendisk *disk)
20222006 return !disk_live (disk );
20232007}
20242008
2025- static void nvme_set_chunk_sectors (struct nvme_ns * ns , struct nvme_id_ns * id )
2009+ static void nvme_set_chunk_sectors (struct nvme_ns * ns , struct nvme_id_ns * id ,
2010+ struct queue_limits * lim )
20262011{
20272012 struct nvme_ctrl * ctrl = ns -> ctrl ;
20282013 u32 iob ;
@@ -2050,25 +2035,33 @@ static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
20502035 return ;
20512036 }
20522037
2053- blk_queue_chunk_sectors ( ns -> queue , iob ) ;
2038+ lim -> chunk_sectors = iob ;
20542039}
20552040
20562041static int nvme_update_ns_info_generic (struct nvme_ns * ns ,
20572042 struct nvme_ns_info * info )
20582043{
2044+ struct queue_limits lim ;
2045+ int ret ;
2046+
20592047 blk_mq_freeze_queue (ns -> disk -> queue );
2060- nvme_set_queue_limits (ns -> ctrl , ns -> queue );
2048+ lim = queue_limits_start_update (ns -> disk -> queue );
2049+ nvme_set_ctrl_limits (ns -> ctrl , & lim );
2050+ ret = queue_limits_commit_update (ns -> disk -> queue , & lim );
20612051 set_disk_ro (ns -> disk , nvme_ns_is_readonly (ns , info ));
20622052 blk_mq_unfreeze_queue (ns -> disk -> queue );
20632053
20642054 /* Hide the block-interface for these devices */
2065- return - ENODEV ;
2055+ if (!ret )
2056+ ret = - ENODEV ;
2057+ return ret ;
20662058}
20672059
20682060static int nvme_update_ns_info_block (struct nvme_ns * ns ,
20692061 struct nvme_ns_info * info )
20702062{
20712063 bool vwc = ns -> ctrl -> vwc & NVME_CTRL_VWC_PRESENT ;
2064+ struct queue_limits lim ;
20722065 struct nvme_id_ns_nvm * nvm = NULL ;
20732066 struct nvme_id_ns * id ;
20742067 sector_t capacity ;
@@ -2098,11 +2091,26 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
20982091 ns -> head -> nuse = le64_to_cpu (id -> nuse );
20992092 capacity = nvme_lba_to_sect (ns -> head , le64_to_cpu (id -> nsze ));
21002093
2101- nvme_set_queue_limits (ns -> ctrl , ns -> queue );
2094+ lim = queue_limits_start_update (ns -> disk -> queue );
2095+ nvme_set_ctrl_limits (ns -> ctrl , & lim );
21022096 nvme_configure_metadata (ns -> ctrl , ns -> head , id , nvm );
2103- nvme_set_chunk_sectors (ns , id );
2104- if (!nvme_update_disk_info (ns , id ))
2097+ nvme_set_chunk_sectors (ns , id , & lim );
2098+ if (!nvme_update_disk_info (ns , id , & lim ))
21052099 capacity = 0 ;
2100+ nvme_config_discard (ns , & lim );
2101+ if (IS_ENABLED (CONFIG_BLK_DEV_ZONED ) &&
2102+ ns -> head -> ids .csi == NVME_CSI_ZNS ) {
2103+ ret = nvme_update_zone_info (ns , lbaf , & lim );
2104+ if (ret ) {
2105+ blk_mq_unfreeze_queue (ns -> disk -> queue );
2106+ goto out ;
2107+ }
2108+ }
2109+ ret = queue_limits_commit_update (ns -> disk -> queue , & lim );
2110+ if (ret ) {
2111+ blk_mq_unfreeze_queue (ns -> disk -> queue );
2112+ goto out ;
2113+ }
21062114
21072115 /*
21082116 * Register a metadata profile for PI, or the plain non-integrity NVMe
@@ -2115,14 +2123,6 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
21152123
21162124 set_capacity_and_notify (ns -> disk , capacity );
21172125
2118- if (ns -> head -> ids .csi == NVME_CSI_ZNS ) {
2119- ret = nvme_update_zone_info (ns , lbaf );
2120- if (ret ) {
2121- blk_mq_unfreeze_queue (ns -> disk -> queue );
2122- goto out ;
2123- }
2124- }
2125-
21262126 /*
21272127 * Only set the DEAC bit if the device guarantees that reads from
21282128 * deallocated data return zeroes. While the DEAC bit does not
@@ -3128,6 +3128,7 @@ static int nvme_check_ctrl_fabric_info(struct nvme_ctrl *ctrl, struct nvme_id_ct
31283128
31293129static int nvme_init_identify (struct nvme_ctrl * ctrl )
31303130{
3131+ struct queue_limits lim ;
31313132 struct nvme_id_ctrl * id ;
31323133 u32 max_hw_sectors ;
31333134 bool prev_apst_enabled ;
@@ -3194,7 +3195,12 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
31943195 ctrl -> max_hw_sectors =
31953196 min_not_zero (ctrl -> max_hw_sectors , max_hw_sectors );
31963197
3197- nvme_set_queue_limits (ctrl , ctrl -> admin_q );
3198+ lim = queue_limits_start_update (ctrl -> admin_q );
3199+ nvme_set_ctrl_limits (ctrl , & lim );
3200+ ret = queue_limits_commit_update (ctrl -> admin_q , & lim );
3201+ if (ret )
3202+ goto out_free ;
3203+
31983204 ctrl -> sgls = le32_to_cpu (id -> sgls );
31993205 ctrl -> kas = le16_to_cpu (id -> kas );
32003206 ctrl -> max_namespaces = le32_to_cpu (id -> mnan );
@@ -4357,6 +4363,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_async_event);
43574363int nvme_alloc_admin_tag_set (struct nvme_ctrl * ctrl , struct blk_mq_tag_set * set ,
43584364 const struct blk_mq_ops * ops , unsigned int cmd_size )
43594365{
4366+ struct queue_limits lim = {};
43604367 int ret ;
43614368
43624369 memset (set , 0 , sizeof (* set ));
@@ -4376,7 +4383,7 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
43764383 if (ret )
43774384 return ret ;
43784385
4379- ctrl -> admin_q = blk_mq_alloc_queue (set , NULL , NULL );
4386+ ctrl -> admin_q = blk_mq_alloc_queue (set , & lim , NULL );
43804387 if (IS_ERR (ctrl -> admin_q )) {
43814388 ret = PTR_ERR (ctrl -> admin_q );
43824389 goto out_free_tagset ;
0 commit comments