Commit 4f86a6ff authored by Ming Lei's avatar Ming Lei Committed by Christoph Hellwig

nvme-fcloop: fix "inconsistent {IN-HARDIRQ-W} -> {HARDIRQ-ON-W} usage"

fcloop_fcp_op() could be called from flush request's ->end_io(flush_end_io) in
which the spinlock of fq->mq_flush_lock is grabbed with irq saved/disabled.

So fcloop_fcp_op() can't call spin_unlock_irq(&tfcp_req->reqlock) simply
which enables irq unconditionally.

Fixes the warning by switching to spin_lock_irqsave()/spin_unlock_irqrestore()

Fixes: c38dbbfa ("nvme-fcloop: fix inconsistent lock state warnings")
Reported-by: default avatarYi Zhang <yi.zhang@redhat.com>
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Reviewed-by: default avatarEwan D. Milne <emilne@redhat.com>
Tested-by: default avatarYi Zhang <yi.zhang@redhat.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent edde9e70
...@@ -614,10 +614,11 @@ fcloop_fcp_recv_work(struct work_struct *work) ...@@ -614,10 +614,11 @@ fcloop_fcp_recv_work(struct work_struct *work)
struct fcloop_fcpreq *tfcp_req = struct fcloop_fcpreq *tfcp_req =
container_of(work, struct fcloop_fcpreq, fcp_rcv_work); container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq; struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
unsigned long flags;
int ret = 0; int ret = 0;
bool aborted = false; bool aborted = false;
spin_lock_irq(&tfcp_req->reqlock); spin_lock_irqsave(&tfcp_req->reqlock, flags);
switch (tfcp_req->inistate) { switch (tfcp_req->inistate) {
case INI_IO_START: case INI_IO_START:
tfcp_req->inistate = INI_IO_ACTIVE; tfcp_req->inistate = INI_IO_ACTIVE;
...@@ -626,11 +627,11 @@ fcloop_fcp_recv_work(struct work_struct *work) ...@@ -626,11 +627,11 @@ fcloop_fcp_recv_work(struct work_struct *work)
aborted = true; aborted = true;
break; break;
default: default:
spin_unlock_irq(&tfcp_req->reqlock); spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
WARN_ON(1); WARN_ON(1);
return; return;
} }
spin_unlock_irq(&tfcp_req->reqlock); spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
if (unlikely(aborted)) if (unlikely(aborted))
ret = -ECANCELED; ret = -ECANCELED;
...@@ -655,8 +656,9 @@ fcloop_fcp_abort_recv_work(struct work_struct *work) ...@@ -655,8 +656,9 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
container_of(work, struct fcloop_fcpreq, abort_rcv_work); container_of(work, struct fcloop_fcpreq, abort_rcv_work);
struct nvmefc_fcp_req *fcpreq; struct nvmefc_fcp_req *fcpreq;
bool completed = false; bool completed = false;
unsigned long flags;
spin_lock_irq(&tfcp_req->reqlock); spin_lock_irqsave(&tfcp_req->reqlock, flags);
fcpreq = tfcp_req->fcpreq; fcpreq = tfcp_req->fcpreq;
switch (tfcp_req->inistate) { switch (tfcp_req->inistate) {
case INI_IO_ABORTED: case INI_IO_ABORTED:
...@@ -665,11 +667,11 @@ fcloop_fcp_abort_recv_work(struct work_struct *work) ...@@ -665,11 +667,11 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
completed = true; completed = true;
break; break;
default: default:
spin_unlock_irq(&tfcp_req->reqlock); spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
WARN_ON(1); WARN_ON(1);
return; return;
} }
spin_unlock_irq(&tfcp_req->reqlock); spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
if (unlikely(completed)) { if (unlikely(completed)) {
/* remove reference taken in original abort downcall */ /* remove reference taken in original abort downcall */
...@@ -681,9 +683,9 @@ fcloop_fcp_abort_recv_work(struct work_struct *work) ...@@ -681,9 +683,9 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport, nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
&tfcp_req->tgt_fcp_req); &tfcp_req->tgt_fcp_req);
spin_lock_irq(&tfcp_req->reqlock); spin_lock_irqsave(&tfcp_req->reqlock, flags);
tfcp_req->fcpreq = NULL; tfcp_req->fcpreq = NULL;
spin_unlock_irq(&tfcp_req->reqlock); spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED); fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
/* call_host_done releases reference for abort downcall */ /* call_host_done releases reference for abort downcall */
...@@ -699,11 +701,12 @@ fcloop_tgt_fcprqst_done_work(struct work_struct *work) ...@@ -699,11 +701,12 @@ fcloop_tgt_fcprqst_done_work(struct work_struct *work)
struct fcloop_fcpreq *tfcp_req = struct fcloop_fcpreq *tfcp_req =
container_of(work, struct fcloop_fcpreq, tio_done_work); container_of(work, struct fcloop_fcpreq, tio_done_work);
struct nvmefc_fcp_req *fcpreq; struct nvmefc_fcp_req *fcpreq;
unsigned long flags;
spin_lock_irq(&tfcp_req->reqlock); spin_lock_irqsave(&tfcp_req->reqlock, flags);
fcpreq = tfcp_req->fcpreq; fcpreq = tfcp_req->fcpreq;
tfcp_req->inistate = INI_IO_COMPLETED; tfcp_req->inistate = INI_IO_COMPLETED;
spin_unlock_irq(&tfcp_req->reqlock); spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status); fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
} }
...@@ -807,13 +810,14 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport, ...@@ -807,13 +810,14 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
u32 rsplen = 0, xfrlen = 0; u32 rsplen = 0, xfrlen = 0;
int fcp_err = 0, active, aborted; int fcp_err = 0, active, aborted;
u8 op = tgt_fcpreq->op; u8 op = tgt_fcpreq->op;
unsigned long flags;
spin_lock_irq(&tfcp_req->reqlock); spin_lock_irqsave(&tfcp_req->reqlock, flags);
fcpreq = tfcp_req->fcpreq; fcpreq = tfcp_req->fcpreq;
active = tfcp_req->active; active = tfcp_req->active;
aborted = tfcp_req->aborted; aborted = tfcp_req->aborted;
tfcp_req->active = true; tfcp_req->active = true;
spin_unlock_irq(&tfcp_req->reqlock); spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
if (unlikely(active)) if (unlikely(active))
/* illegal - call while i/o active */ /* illegal - call while i/o active */
...@@ -821,9 +825,9 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport, ...@@ -821,9 +825,9 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
if (unlikely(aborted)) { if (unlikely(aborted)) {
/* target transport has aborted i/o prior */ /* target transport has aborted i/o prior */
spin_lock_irq(&tfcp_req->reqlock); spin_lock_irqsave(&tfcp_req->reqlock, flags);
tfcp_req->active = false; tfcp_req->active = false;
spin_unlock_irq(&tfcp_req->reqlock); spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
tgt_fcpreq->transferred_length = 0; tgt_fcpreq->transferred_length = 0;
tgt_fcpreq->fcp_error = -ECANCELED; tgt_fcpreq->fcp_error = -ECANCELED;
tgt_fcpreq->done(tgt_fcpreq); tgt_fcpreq->done(tgt_fcpreq);
...@@ -880,9 +884,9 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport, ...@@ -880,9 +884,9 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
break; break;
} }
spin_lock_irq(&tfcp_req->reqlock); spin_lock_irqsave(&tfcp_req->reqlock, flags);
tfcp_req->active = false; tfcp_req->active = false;
spin_unlock_irq(&tfcp_req->reqlock); spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
tgt_fcpreq->transferred_length = xfrlen; tgt_fcpreq->transferred_length = xfrlen;
tgt_fcpreq->fcp_error = fcp_err; tgt_fcpreq->fcp_error = fcp_err;
...@@ -896,15 +900,16 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport, ...@@ -896,15 +900,16 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *tgt_fcpreq) struct nvmefc_tgt_fcp_req *tgt_fcpreq)
{ {
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq); struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
unsigned long flags;
/* /*
* mark aborted only in case there were 2 threads in transport * mark aborted only in case there were 2 threads in transport
* (one doing io, other doing abort) and only kills ops posted * (one doing io, other doing abort) and only kills ops posted
* after the abort request * after the abort request
*/ */
spin_lock_irq(&tfcp_req->reqlock); spin_lock_irqsave(&tfcp_req->reqlock, flags);
tfcp_req->aborted = true; tfcp_req->aborted = true;
spin_unlock_irq(&tfcp_req->reqlock); spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
tfcp_req->status = NVME_SC_INTERNAL; tfcp_req->status = NVME_SC_INTERNAL;
...@@ -946,6 +951,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport, ...@@ -946,6 +951,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
struct fcloop_ini_fcpreq *inireq = fcpreq->private; struct fcloop_ini_fcpreq *inireq = fcpreq->private;
struct fcloop_fcpreq *tfcp_req; struct fcloop_fcpreq *tfcp_req;
bool abortio = true; bool abortio = true;
unsigned long flags;
spin_lock(&inireq->inilock); spin_lock(&inireq->inilock);
tfcp_req = inireq->tfcp_req; tfcp_req = inireq->tfcp_req;
...@@ -958,7 +964,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport, ...@@ -958,7 +964,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
return; return;
/* break initiator/target relationship for io */ /* break initiator/target relationship for io */
spin_lock_irq(&tfcp_req->reqlock); spin_lock_irqsave(&tfcp_req->reqlock, flags);
switch (tfcp_req->inistate) { switch (tfcp_req->inistate) {
case INI_IO_START: case INI_IO_START:
case INI_IO_ACTIVE: case INI_IO_ACTIVE:
...@@ -968,11 +974,11 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport, ...@@ -968,11 +974,11 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
abortio = false; abortio = false;
break; break;
default: default:
spin_unlock_irq(&tfcp_req->reqlock); spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
WARN_ON(1); WARN_ON(1);
return; return;
} }
spin_unlock_irq(&tfcp_req->reqlock); spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
if (abortio) if (abortio)
/* leave the reference while the work item is scheduled */ /* leave the reference while the work item is scheduled */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment