Skip to content

Commit 4f86a6f

Browse files
Ming LeiChristoph Hellwig
authored andcommitted
nvme-fcloop: fix "inconsistent {IN-HARDIRQ-W} -> {HARDIRQ-ON-W} usage"
fcloop_fcp_op() could be called from flush request's ->end_io(flush_end_io) in which the spinlock of fq->mq_flush_lock is grabbed with irq saved/disabled. So fcloop_fcp_op() can't call spin_unlock_irq(&tfcp_req->reqlock) simply which enables irq unconditionally. Fixes the warning by switching to spin_lock_irqsave()/spin_unlock_irqrestore() Fixes: c38dbbf ("nvme-fcloop: fix inconsistent lock state warnings") Reported-by: Yi Zhang <yi.zhang@redhat.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Ewan D. Milne <emilne@redhat.com> Tested-by: Yi Zhang <yi.zhang@redhat.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
1 parent edde9e7 commit 4f86a6f

1 file changed

Lines changed: 27 additions & 21 deletions

File tree

drivers/nvme/target/fcloop.c

Lines changed: 27 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -614,10 +614,11 @@ fcloop_fcp_recv_work(struct work_struct *work)
614614
struct fcloop_fcpreq *tfcp_req =
615615
container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
616616
struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
617+
unsigned long flags;
617618
int ret = 0;
618619
bool aborted = false;
619620

620-
spin_lock_irq(&tfcp_req->reqlock);
621+
spin_lock_irqsave(&tfcp_req->reqlock, flags);
621622
switch (tfcp_req->inistate) {
622623
case INI_IO_START:
623624
tfcp_req->inistate = INI_IO_ACTIVE;
@@ -626,11 +627,11 @@ fcloop_fcp_recv_work(struct work_struct *work)
626627
aborted = true;
627628
break;
628629
default:
629-
spin_unlock_irq(&tfcp_req->reqlock);
630+
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
630631
WARN_ON(1);
631632
return;
632633
}
633-
spin_unlock_irq(&tfcp_req->reqlock);
634+
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
634635

635636
if (unlikely(aborted))
636637
ret = -ECANCELED;
@@ -655,8 +656,9 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
655656
container_of(work, struct fcloop_fcpreq, abort_rcv_work);
656657
struct nvmefc_fcp_req *fcpreq;
657658
bool completed = false;
659+
unsigned long flags;
658660

659-
spin_lock_irq(&tfcp_req->reqlock);
661+
spin_lock_irqsave(&tfcp_req->reqlock, flags);
660662
fcpreq = tfcp_req->fcpreq;
661663
switch (tfcp_req->inistate) {
662664
case INI_IO_ABORTED:
@@ -665,11 +667,11 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
665667
completed = true;
666668
break;
667669
default:
668-
spin_unlock_irq(&tfcp_req->reqlock);
670+
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
669671
WARN_ON(1);
670672
return;
671673
}
672-
spin_unlock_irq(&tfcp_req->reqlock);
674+
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
673675

674676
if (unlikely(completed)) {
675677
/* remove reference taken in original abort downcall */
@@ -681,9 +683,9 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
681683
nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
682684
&tfcp_req->tgt_fcp_req);
683685

684-
spin_lock_irq(&tfcp_req->reqlock);
686+
spin_lock_irqsave(&tfcp_req->reqlock, flags);
685687
tfcp_req->fcpreq = NULL;
686-
spin_unlock_irq(&tfcp_req->reqlock);
688+
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
687689

688690
fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
689691
/* call_host_done releases reference for abort downcall */
@@ -699,11 +701,12 @@ fcloop_tgt_fcprqst_done_work(struct work_struct *work)
699701
struct fcloop_fcpreq *tfcp_req =
700702
container_of(work, struct fcloop_fcpreq, tio_done_work);
701703
struct nvmefc_fcp_req *fcpreq;
704+
unsigned long flags;
702705

703-
spin_lock_irq(&tfcp_req->reqlock);
706+
spin_lock_irqsave(&tfcp_req->reqlock, flags);
704707
fcpreq = tfcp_req->fcpreq;
705708
tfcp_req->inistate = INI_IO_COMPLETED;
706-
spin_unlock_irq(&tfcp_req->reqlock);
709+
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
707710

708711
fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
709712
}
@@ -807,23 +810,24 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
807810
u32 rsplen = 0, xfrlen = 0;
808811
int fcp_err = 0, active, aborted;
809812
u8 op = tgt_fcpreq->op;
813+
unsigned long flags;
810814

811-
spin_lock_irq(&tfcp_req->reqlock);
815+
spin_lock_irqsave(&tfcp_req->reqlock, flags);
812816
fcpreq = tfcp_req->fcpreq;
813817
active = tfcp_req->active;
814818
aborted = tfcp_req->aborted;
815819
tfcp_req->active = true;
816-
spin_unlock_irq(&tfcp_req->reqlock);
820+
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
817821

818822
if (unlikely(active))
819823
/* illegal - call while i/o active */
820824
return -EALREADY;
821825

822826
if (unlikely(aborted)) {
823827
/* target transport has aborted i/o prior */
824-
spin_lock_irq(&tfcp_req->reqlock);
828+
spin_lock_irqsave(&tfcp_req->reqlock, flags);
825829
tfcp_req->active = false;
826-
spin_unlock_irq(&tfcp_req->reqlock);
830+
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
827831
tgt_fcpreq->transferred_length = 0;
828832
tgt_fcpreq->fcp_error = -ECANCELED;
829833
tgt_fcpreq->done(tgt_fcpreq);
@@ -880,9 +884,9 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
880884
break;
881885
}
882886

883-
spin_lock_irq(&tfcp_req->reqlock);
887+
spin_lock_irqsave(&tfcp_req->reqlock, flags);
884888
tfcp_req->active = false;
885-
spin_unlock_irq(&tfcp_req->reqlock);
889+
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
886890

887891
tgt_fcpreq->transferred_length = xfrlen;
888892
tgt_fcpreq->fcp_error = fcp_err;
@@ -896,15 +900,16 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
896900
struct nvmefc_tgt_fcp_req *tgt_fcpreq)
897901
{
898902
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
903+
unsigned long flags;
899904

900905
/*
901906
* mark aborted only in case there were 2 threads in transport
902907
* (one doing io, other doing abort) and only kills ops posted
903908
* after the abort request
904909
*/
905-
spin_lock_irq(&tfcp_req->reqlock);
910+
spin_lock_irqsave(&tfcp_req->reqlock, flags);
906911
tfcp_req->aborted = true;
907-
spin_unlock_irq(&tfcp_req->reqlock);
912+
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
908913

909914
tfcp_req->status = NVME_SC_INTERNAL;
910915

@@ -946,6 +951,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
946951
struct fcloop_ini_fcpreq *inireq = fcpreq->private;
947952
struct fcloop_fcpreq *tfcp_req;
948953
bool abortio = true;
954+
unsigned long flags;
949955

950956
spin_lock(&inireq->inilock);
951957
tfcp_req = inireq->tfcp_req;
@@ -958,7 +964,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
958964
return;
959965

960966
/* break initiator/target relationship for io */
961-
spin_lock_irq(&tfcp_req->reqlock);
967+
spin_lock_irqsave(&tfcp_req->reqlock, flags);
962968
switch (tfcp_req->inistate) {
963969
case INI_IO_START:
964970
case INI_IO_ACTIVE:
@@ -968,11 +974,11 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
968974
abortio = false;
969975
break;
970976
default:
971-
spin_unlock_irq(&tfcp_req->reqlock);
977+
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
972978
WARN_ON(1);
973979
return;
974980
}
975-
spin_unlock_irq(&tfcp_req->reqlock);
981+
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
976982

977983
if (abortio)
978984
/* leave the reference while the work item is scheduled */

0 commit comments

Comments
 (0)