@@ -131,7 +131,7 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl)
131131 /*
132132 * Only new queue scan work when admin and IO queues are both alive
133133 */
134- if (ctrl -> state == NVME_CTRL_LIVE && ctrl -> tagset )
134+ if (nvme_ctrl_state ( ctrl ) == NVME_CTRL_LIVE && ctrl -> tagset )
135135 queue_work (nvme_wq , & ctrl -> scan_work );
136136}
137137
@@ -143,7 +143,7 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl)
143143 */
144144int nvme_try_sched_reset (struct nvme_ctrl * ctrl )
145145{
146- if (ctrl -> state != NVME_CTRL_RESETTING )
146+ if (nvme_ctrl_state ( ctrl ) != NVME_CTRL_RESETTING )
147147 return - EBUSY ;
148148 if (!queue_work (nvme_reset_wq , & ctrl -> reset_work ))
149149 return - EBUSY ;
@@ -156,7 +156,7 @@ static void nvme_failfast_work(struct work_struct *work)
156156 struct nvme_ctrl * ctrl = container_of (to_delayed_work (work ),
157157 struct nvme_ctrl , failfast_work );
158158
159- if (ctrl -> state != NVME_CTRL_CONNECTING )
159+ if (nvme_ctrl_state ( ctrl ) != NVME_CTRL_CONNECTING )
160160 return ;
161161
162162 set_bit (NVME_CTRL_FAILFAST_EXPIRED , & ctrl -> flags );
@@ -200,7 +200,7 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
200200 ret = nvme_reset_ctrl (ctrl );
201201 if (!ret ) {
202202 flush_work (& ctrl -> reset_work );
203- if (ctrl -> state != NVME_CTRL_LIVE )
203+ if (nvme_ctrl_state ( ctrl ) != NVME_CTRL_LIVE )
204204 ret = - ENETRESET ;
205205 }
206206
@@ -499,7 +499,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
499499
500500 spin_lock_irqsave (& ctrl -> lock , flags );
501501
502- old_state = ctrl -> state ;
502+ old_state = nvme_ctrl_state ( ctrl ) ;
503503 switch (new_state ) {
504504 case NVME_CTRL_LIVE :
505505 switch (old_state ) {
@@ -567,19 +567,19 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
567567 }
568568
569569 if (changed ) {
570- ctrl -> state = new_state ;
570+ WRITE_ONCE ( ctrl -> state , new_state ) ;
571571 wake_up_all (& ctrl -> state_wq );
572572 }
573573
574574 spin_unlock_irqrestore (& ctrl -> lock , flags );
575575 if (!changed )
576576 return false;
577577
578- if (ctrl -> state == NVME_CTRL_LIVE ) {
578+ if (new_state == NVME_CTRL_LIVE ) {
579579 if (old_state == NVME_CTRL_CONNECTING )
580580 nvme_stop_failfast_work (ctrl );
581581 nvme_kick_requeue_lists (ctrl );
582- } else if (ctrl -> state == NVME_CTRL_CONNECTING &&
582+ } else if (new_state == NVME_CTRL_CONNECTING &&
583583 old_state == NVME_CTRL_RESETTING ) {
584584 nvme_start_failfast_work (ctrl );
585585 }
@@ -592,7 +592,7 @@ EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
592592 */
593593static bool nvme_state_terminal (struct nvme_ctrl * ctrl )
594594{
595- switch (ctrl -> state ) {
595+ switch (nvme_ctrl_state ( ctrl ) ) {
596596 case NVME_CTRL_NEW :
597597 case NVME_CTRL_LIVE :
598598 case NVME_CTRL_RESETTING :
@@ -617,7 +617,7 @@ bool nvme_wait_reset(struct nvme_ctrl *ctrl)
617617 wait_event (ctrl -> state_wq ,
618618 nvme_change_ctrl_state (ctrl , NVME_CTRL_RESETTING ) ||
619619 nvme_state_terminal (ctrl ));
620- return ctrl -> state == NVME_CTRL_RESETTING ;
620+ return nvme_ctrl_state ( ctrl ) == NVME_CTRL_RESETTING ;
621621}
622622EXPORT_SYMBOL_GPL (nvme_wait_reset );
623623
@@ -704,9 +704,11 @@ EXPORT_SYMBOL_GPL(nvme_init_request);
704704blk_status_t nvme_fail_nonready_command (struct nvme_ctrl * ctrl ,
705705 struct request * rq )
706706{
707- if (ctrl -> state != NVME_CTRL_DELETING_NOIO &&
708- ctrl -> state != NVME_CTRL_DELETING &&
709- ctrl -> state != NVME_CTRL_DEAD &&
707+ enum nvme_ctrl_state state = nvme_ctrl_state (ctrl );
708+
709+ if (state != NVME_CTRL_DELETING_NOIO &&
710+ state != NVME_CTRL_DELETING &&
711+ state != NVME_CTRL_DEAD &&
710712 !test_bit (NVME_CTRL_FAILFAST_EXPIRED , & ctrl -> flags ) &&
711713 !blk_noretry_request (rq ) && !(rq -> cmd_flags & REQ_NVME_MPATH ))
712714 return BLK_STS_RESOURCE ;
@@ -736,7 +738,7 @@ bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
736738 * command, which is require to set the queue live in the
737739 * appropinquate states.
738740 */
739- switch (ctrl -> state ) {
741+ switch (nvme_ctrl_state ( ctrl ) ) {
740742 case NVME_CTRL_CONNECTING :
741743 if (blk_rq_is_passthrough (rq ) && nvme_is_fabrics (req -> cmd ) &&
742744 (req -> cmd -> fabrics .fctype == nvme_fabrics_type_connect ||
@@ -2550,7 +2552,7 @@ static void nvme_set_latency_tolerance(struct device *dev, s32 val)
25502552
25512553 if (ctrl -> ps_max_latency_us != latency ) {
25522554 ctrl -> ps_max_latency_us = latency ;
2553- if (ctrl -> state == NVME_CTRL_LIVE )
2555+ if (nvme_ctrl_state ( ctrl ) == NVME_CTRL_LIVE )
25542556 nvme_configure_apst (ctrl );
25552557 }
25562558}
@@ -3238,7 +3240,7 @@ static int nvme_dev_open(struct inode *inode, struct file *file)
32383240 struct nvme_ctrl * ctrl =
32393241 container_of (inode -> i_cdev , struct nvme_ctrl , cdev );
32403242
3241- switch (ctrl -> state ) {
3243+ switch (nvme_ctrl_state ( ctrl ) ) {
32423244 case NVME_CTRL_LIVE :
32433245 break ;
32443246 default :
@@ -3660,6 +3662,14 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
36603662 goto out_unlink_ns ;
36613663
36623664 down_write (& ctrl -> namespaces_rwsem );
3665+ /*
3666+ * Ensure that no namespaces are added to the ctrl list after the queues
3667+ * are frozen, thereby avoiding a deadlock between scan and reset.
3668+ */
3669+ if (test_bit (NVME_CTRL_FROZEN , & ctrl -> flags )) {
3670+ up_write (& ctrl -> namespaces_rwsem );
3671+ goto out_unlink_ns ;
3672+ }
36633673 nvme_ns_add_to_ctrl_list (ns );
36643674 up_write (& ctrl -> namespaces_rwsem );
36653675 nvme_get_ctrl (ctrl );
@@ -3924,7 +3934,7 @@ static void nvme_scan_work(struct work_struct *work)
39243934 int ret ;
39253935
39263936 /* No tagset on a live ctrl means IO queues could not created */
3927- if (ctrl -> state != NVME_CTRL_LIVE || !ctrl -> tagset )
3937+ if (nvme_ctrl_state ( ctrl ) != NVME_CTRL_LIVE || !ctrl -> tagset )
39283938 return ;
39293939
39303940 /*
@@ -3994,7 +4004,7 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
39944004 * removing the namespaces' disks; fail all the queues now to avoid
39954005 * potentially having to clean up the failed sync later.
39964006 */
3997- if (ctrl -> state == NVME_CTRL_DEAD )
4007+ if (nvme_ctrl_state ( ctrl ) == NVME_CTRL_DEAD )
39984008 nvme_mark_namespaces_dead (ctrl );
39994009
40004010 /* this is a no-op when called from the controller reset handler */
@@ -4076,7 +4086,7 @@ static void nvme_async_event_work(struct work_struct *work)
40764086 * flushing ctrl async_event_work after changing the controller state
40774087 * from LIVE and before freeing the admin queue.
40784088 */
4079- if (ctrl -> state == NVME_CTRL_LIVE )
4089+ if (nvme_ctrl_state ( ctrl ) == NVME_CTRL_LIVE )
40804090 ctrl -> ops -> submit_async_event (ctrl );
40814091}
40824092
@@ -4471,7 +4481,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
44714481{
44724482 int ret ;
44734483
4474- ctrl -> state = NVME_CTRL_NEW ;
4484+ WRITE_ONCE ( ctrl -> state , NVME_CTRL_NEW ) ;
44754485 clear_bit (NVME_CTRL_FAILFAST_EXPIRED , & ctrl -> flags );
44764486 spin_lock_init (& ctrl -> lock );
44774487 mutex_init (& ctrl -> scan_lock );
@@ -4581,6 +4591,7 @@ void nvme_unfreeze(struct nvme_ctrl *ctrl)
45814591 list_for_each_entry (ns , & ctrl -> namespaces , list )
45824592 blk_mq_unfreeze_queue (ns -> queue );
45834593 up_read (& ctrl -> namespaces_rwsem );
4594+ clear_bit (NVME_CTRL_FROZEN , & ctrl -> flags );
45844595}
45854596EXPORT_SYMBOL_GPL (nvme_unfreeze );
45864597
@@ -4614,6 +4625,7 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl)
46144625{
46154626 struct nvme_ns * ns ;
46164627
4628+ set_bit (NVME_CTRL_FROZEN , & ctrl -> flags );
46174629 down_read (& ctrl -> namespaces_rwsem );
46184630 list_for_each_entry (ns , & ctrl -> namespaces , list )
46194631 blk_freeze_queue_start (ns -> queue );
0 commit comments