Commit 8832cf92 authored by Sagi Grimberg's avatar Sagi Grimberg Committed by Christoph Hellwig

nvmet: use a private workqueue instead of the system workqueue

Any attempt to flush kernel-global WQs has possibility of deadlock
so we should simply stop using them, instead introduce nvmet_wq
which is the generic nvmet workqueue for work elements that
don't explicitly require a dedicated workqueue (by the mere fact
that they are using the system_wq).

Changes were done using the following replaces:

 - s/schedule_work(/queue_work(nvmet_wq, /g
 - s/schedule_delayed_work(/queue_delayed_work(nvmet_wq, /g
 - s/flush_scheduled_work()/flush_workqueue(nvmet_wq)/g
Reported-by: default avatarTetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Signed-off-by: default avatarSagi Grimberg <sagi@grimberg.me>
Reviewed-by: default avatarChaitanya Kulkarni <kch@nvidia.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent bc360b0b
...@@ -988,7 +988,7 @@ void nvmet_execute_async_event(struct nvmet_req *req) ...@@ -988,7 +988,7 @@ void nvmet_execute_async_event(struct nvmet_req *req)
ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req; ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
mutex_unlock(&ctrl->lock); mutex_unlock(&ctrl->lock);
schedule_work(&ctrl->async_event_work); queue_work(nvmet_wq, &ctrl->async_event_work);
} }
void nvmet_execute_keep_alive(struct nvmet_req *req) void nvmet_execute_keep_alive(struct nvmet_req *req)
......
...@@ -1593,7 +1593,7 @@ static void nvmet_port_release(struct config_item *item) ...@@ -1593,7 +1593,7 @@ static void nvmet_port_release(struct config_item *item)
struct nvmet_port *port = to_nvmet_port(item); struct nvmet_port *port = to_nvmet_port(item);
/* Let inflight controllers teardown complete */ /* Let inflight controllers teardown complete */
flush_scheduled_work(); flush_workqueue(nvmet_wq);
list_del(&port->global_entry); list_del(&port->global_entry);
kfree(port->ana_state); kfree(port->ana_state);
......
...@@ -20,6 +20,9 @@ struct workqueue_struct *zbd_wq; ...@@ -20,6 +20,9 @@ struct workqueue_struct *zbd_wq;
static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
static DEFINE_IDA(cntlid_ida); static DEFINE_IDA(cntlid_ida);
struct workqueue_struct *nvmet_wq;
EXPORT_SYMBOL_GPL(nvmet_wq);
/* /*
* This read/write semaphore is used to synchronize access to configuration * This read/write semaphore is used to synchronize access to configuration
* information on a target system that will result in discovery log page * information on a target system that will result in discovery log page
...@@ -205,7 +208,7 @@ void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, ...@@ -205,7 +208,7 @@ void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
list_add_tail(&aen->entry, &ctrl->async_events); list_add_tail(&aen->entry, &ctrl->async_events);
mutex_unlock(&ctrl->lock); mutex_unlock(&ctrl->lock);
schedule_work(&ctrl->async_event_work); queue_work(nvmet_wq, &ctrl->async_event_work);
} }
static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid) static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
...@@ -385,7 +388,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work) ...@@ -385,7 +388,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
if (reset_tbkas) { if (reset_tbkas) {
pr_debug("ctrl %d reschedule traffic based keep-alive timer\n", pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
ctrl->cntlid); ctrl->cntlid);
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
return; return;
} }
...@@ -403,7 +406,7 @@ void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) ...@@ -403,7 +406,7 @@ void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
pr_debug("ctrl %d start keep-alive timer for %d secs\n", pr_debug("ctrl %d start keep-alive timer for %d secs\n",
ctrl->cntlid, ctrl->kato); ctrl->cntlid, ctrl->kato);
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
} }
void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
...@@ -1478,7 +1481,7 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) ...@@ -1478,7 +1481,7 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
mutex_lock(&ctrl->lock); mutex_lock(&ctrl->lock);
if (!(ctrl->csts & NVME_CSTS_CFS)) { if (!(ctrl->csts & NVME_CSTS_CFS)) {
ctrl->csts |= NVME_CSTS_CFS; ctrl->csts |= NVME_CSTS_CFS;
schedule_work(&ctrl->fatal_err_work); queue_work(nvmet_wq, &ctrl->fatal_err_work);
} }
mutex_unlock(&ctrl->lock); mutex_unlock(&ctrl->lock);
} }
...@@ -1620,9 +1623,15 @@ static int __init nvmet_init(void) ...@@ -1620,9 +1623,15 @@ static int __init nvmet_init(void)
goto out_free_zbd_work_queue; goto out_free_zbd_work_queue;
} }
nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
if (!nvmet_wq) {
error = -ENOMEM;
goto out_free_buffered_work_queue;
}
error = nvmet_init_discovery(); error = nvmet_init_discovery();
if (error) if (error)
goto out_free_work_queue; goto out_free_nvmet_work_queue;
error = nvmet_init_configfs(); error = nvmet_init_configfs();
if (error) if (error)
...@@ -1631,7 +1640,9 @@ static int __init nvmet_init(void) ...@@ -1631,7 +1640,9 @@ static int __init nvmet_init(void)
out_exit_discovery: out_exit_discovery:
nvmet_exit_discovery(); nvmet_exit_discovery();
out_free_work_queue: out_free_nvmet_work_queue:
destroy_workqueue(nvmet_wq);
out_free_buffered_work_queue:
destroy_workqueue(buffered_io_wq); destroy_workqueue(buffered_io_wq);
out_free_zbd_work_queue: out_free_zbd_work_queue:
destroy_workqueue(zbd_wq); destroy_workqueue(zbd_wq);
...@@ -1643,6 +1654,7 @@ static void __exit nvmet_exit(void) ...@@ -1643,6 +1654,7 @@ static void __exit nvmet_exit(void)
nvmet_exit_configfs(); nvmet_exit_configfs();
nvmet_exit_discovery(); nvmet_exit_discovery();
ida_destroy(&cntlid_ida); ida_destroy(&cntlid_ida);
destroy_workqueue(nvmet_wq);
destroy_workqueue(buffered_io_wq); destroy_workqueue(buffered_io_wq);
destroy_workqueue(zbd_wq); destroy_workqueue(zbd_wq);
......
...@@ -1491,7 +1491,7 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport) ...@@ -1491,7 +1491,7 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (!nvmet_fc_tgt_a_get(assoc)) if (!nvmet_fc_tgt_a_get(assoc))
continue; continue;
if (!schedule_work(&assoc->del_work)) if (!queue_work(nvmet_wq, &assoc->del_work))
/* already deleting - release local reference */ /* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc); nvmet_fc_tgt_a_put(assoc);
} }
...@@ -1546,7 +1546,7 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port, ...@@ -1546,7 +1546,7 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
continue; continue;
assoc->hostport->invalid = 1; assoc->hostport->invalid = 1;
noassoc = false; noassoc = false;
if (!schedule_work(&assoc->del_work)) if (!queue_work(nvmet_wq, &assoc->del_work))
/* already deleting - release local reference */ /* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc); nvmet_fc_tgt_a_put(assoc);
} }
...@@ -1592,7 +1592,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) ...@@ -1592,7 +1592,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
nvmet_fc_tgtport_put(tgtport); nvmet_fc_tgtport_put(tgtport);
if (found_ctrl) { if (found_ctrl) {
if (!schedule_work(&assoc->del_work)) if (!queue_work(nvmet_wq, &assoc->del_work))
/* already deleting - release local reference */ /* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc); nvmet_fc_tgt_a_put(assoc);
return; return;
...@@ -2060,7 +2060,7 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port, ...@@ -2060,7 +2060,7 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
iod->rqstdatalen = lsreqbuf_len; iod->rqstdatalen = lsreqbuf_len;
iod->hosthandle = hosthandle; iod->hosthandle = hosthandle;
schedule_work(&iod->work); queue_work(nvmet_wq, &iod->work);
return 0; return 0;
} }
......
...@@ -360,7 +360,7 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport, ...@@ -360,7 +360,7 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
spin_lock(&rport->lock); spin_lock(&rport->lock);
list_add_tail(&rport->ls_list, &tls_req->ls_list); list_add_tail(&rport->ls_list, &tls_req->ls_list);
spin_unlock(&rport->lock); spin_unlock(&rport->lock);
schedule_work(&rport->ls_work); queue_work(nvmet_wq, &rport->ls_work);
return ret; return ret;
} }
...@@ -393,7 +393,7 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport, ...@@ -393,7 +393,7 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
spin_lock(&rport->lock); spin_lock(&rport->lock);
list_add_tail(&rport->ls_list, &tls_req->ls_list); list_add_tail(&rport->ls_list, &tls_req->ls_list);
spin_unlock(&rport->lock); spin_unlock(&rport->lock);
schedule_work(&rport->ls_work); queue_work(nvmet_wq, &rport->ls_work);
} }
return 0; return 0;
...@@ -448,7 +448,7 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle, ...@@ -448,7 +448,7 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
spin_lock(&tport->lock); spin_lock(&tport->lock);
list_add_tail(&tport->ls_list, &tls_req->ls_list); list_add_tail(&tport->ls_list, &tls_req->ls_list);
spin_unlock(&tport->lock); spin_unlock(&tport->lock);
schedule_work(&tport->ls_work); queue_work(nvmet_wq, &tport->ls_work);
return ret; return ret;
} }
...@@ -480,7 +480,7 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport, ...@@ -480,7 +480,7 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
spin_lock(&tport->lock); spin_lock(&tport->lock);
list_add_tail(&tport->ls_list, &tls_req->ls_list); list_add_tail(&tport->ls_list, &tls_req->ls_list);
spin_unlock(&tport->lock); spin_unlock(&tport->lock);
schedule_work(&tport->ls_work); queue_work(nvmet_wq, &tport->ls_work);
} }
return 0; return 0;
...@@ -520,7 +520,7 @@ fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport) ...@@ -520,7 +520,7 @@ fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
tgt_rscn->tport = tgtport->private; tgt_rscn->tport = tgtport->private;
INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work); INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
schedule_work(&tgt_rscn->work); queue_work(nvmet_wq, &tgt_rscn->work);
} }
static void static void
...@@ -739,7 +739,7 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport, ...@@ -739,7 +739,7 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work); INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
kref_init(&tfcp_req->ref); kref_init(&tfcp_req->ref);
schedule_work(&tfcp_req->fcp_rcv_work); queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work);
return 0; return 0;
} }
...@@ -921,7 +921,7 @@ fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport, ...@@ -921,7 +921,7 @@ fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
{ {
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq); struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
schedule_work(&tfcp_req->tio_done_work); queue_work(nvmet_wq, &tfcp_req->tio_done_work);
} }
static void static void
...@@ -976,7 +976,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport, ...@@ -976,7 +976,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
if (abortio) if (abortio)
/* leave the reference while the work item is scheduled */ /* leave the reference while the work item is scheduled */
WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work)); WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work));
else { else {
/* /*
* as the io has already had the done callback made, * as the io has already had the done callback made,
......
...@@ -283,7 +283,7 @@ static void nvmet_file_execute_flush(struct nvmet_req *req) ...@@ -283,7 +283,7 @@ static void nvmet_file_execute_flush(struct nvmet_req *req)
if (!nvmet_check_transfer_len(req, 0)) if (!nvmet_check_transfer_len(req, 0))
return; return;
INIT_WORK(&req->f.work, nvmet_file_flush_work); INIT_WORK(&req->f.work, nvmet_file_flush_work);
schedule_work(&req->f.work); queue_work(nvmet_wq, &req->f.work);
} }
static void nvmet_file_execute_discard(struct nvmet_req *req) static void nvmet_file_execute_discard(struct nvmet_req *req)
...@@ -343,7 +343,7 @@ static void nvmet_file_execute_dsm(struct nvmet_req *req) ...@@ -343,7 +343,7 @@ static void nvmet_file_execute_dsm(struct nvmet_req *req)
if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req))) if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
return; return;
INIT_WORK(&req->f.work, nvmet_file_dsm_work); INIT_WORK(&req->f.work, nvmet_file_dsm_work);
schedule_work(&req->f.work); queue_work(nvmet_wq, &req->f.work);
} }
static void nvmet_file_write_zeroes_work(struct work_struct *w) static void nvmet_file_write_zeroes_work(struct work_struct *w)
...@@ -373,7 +373,7 @@ static void nvmet_file_execute_write_zeroes(struct nvmet_req *req) ...@@ -373,7 +373,7 @@ static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
if (!nvmet_check_transfer_len(req, 0)) if (!nvmet_check_transfer_len(req, 0))
return; return;
INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work); INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
schedule_work(&req->f.work); queue_work(nvmet_wq, &req->f.work);
} }
u16 nvmet_file_parse_io_cmd(struct nvmet_req *req) u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
......
...@@ -166,7 +166,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -166,7 +166,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
iod->req.transfer_len = blk_rq_payload_bytes(req); iod->req.transfer_len = blk_rq_payload_bytes(req);
} }
schedule_work(&iod->work); queue_work(nvmet_wq, &iod->work);
return BLK_STS_OK; return BLK_STS_OK;
} }
...@@ -187,7 +187,7 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg) ...@@ -187,7 +187,7 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
return; return;
} }
schedule_work(&iod->work); queue_work(nvmet_wq, &iod->work);
} }
static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl, static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
......
...@@ -366,6 +366,7 @@ struct nvmet_req { ...@@ -366,6 +366,7 @@ struct nvmet_req {
extern struct workqueue_struct *buffered_io_wq; extern struct workqueue_struct *buffered_io_wq;
extern struct workqueue_struct *zbd_wq; extern struct workqueue_struct *zbd_wq;
extern struct workqueue_struct *nvmet_wq;
static inline void nvmet_set_result(struct nvmet_req *req, u32 result) static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
{ {
......
...@@ -283,7 +283,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req) ...@@ -283,7 +283,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
if (req->p.use_workqueue || effects) { if (req->p.use_workqueue || effects) {
INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work); INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
req->p.rq = rq; req->p.rq = rq;
schedule_work(&req->p.work); queue_work(nvmet_wq, &req->p.work);
} else { } else {
rq->end_io_data = req; rq->end_io_data = req;
blk_execute_rq_nowait(rq, false, nvmet_passthru_req_done); blk_execute_rq_nowait(rq, false, nvmet_passthru_req_done);
......
...@@ -1584,7 +1584,7 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, ...@@ -1584,7 +1584,7 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
if (queue->host_qid == 0) { if (queue->host_qid == 0) {
/* Let inflight controller teardown complete */ /* Let inflight controller teardown complete */
flush_scheduled_work(); flush_workqueue(nvmet_wq);
} }
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
...@@ -1669,7 +1669,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) ...@@ -1669,7 +1669,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
if (disconnect) { if (disconnect) {
rdma_disconnect(queue->cm_id); rdma_disconnect(queue->cm_id);
schedule_work(&queue->release_work); queue_work(nvmet_wq, &queue->release_work);
} }
} }
...@@ -1699,7 +1699,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, ...@@ -1699,7 +1699,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
mutex_unlock(&nvmet_rdma_queue_mutex); mutex_unlock(&nvmet_rdma_queue_mutex);
pr_err("failed to connect queue %d\n", queue->idx); pr_err("failed to connect queue %d\n", queue->idx);
schedule_work(&queue->release_work); queue_work(nvmet_wq, &queue->release_work);
} }
/** /**
...@@ -1773,7 +1773,7 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, ...@@ -1773,7 +1773,7 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
if (!queue) { if (!queue) {
struct nvmet_rdma_port *port = cm_id->context; struct nvmet_rdma_port *port = cm_id->context;
schedule_delayed_work(&port->repair_work, 0); queue_delayed_work(nvmet_wq, &port->repair_work, 0);
break; break;
} }
fallthrough; fallthrough;
...@@ -1903,7 +1903,7 @@ static void nvmet_rdma_repair_port_work(struct work_struct *w) ...@@ -1903,7 +1903,7 @@ static void nvmet_rdma_repair_port_work(struct work_struct *w)
nvmet_rdma_disable_port(port); nvmet_rdma_disable_port(port);
ret = nvmet_rdma_enable_port(port); ret = nvmet_rdma_enable_port(port);
if (ret) if (ret)
schedule_delayed_work(&port->repair_work, 5 * HZ); queue_delayed_work(nvmet_wq, &port->repair_work, 5 * HZ);
} }
static int nvmet_rdma_add_port(struct nvmet_port *nport) static int nvmet_rdma_add_port(struct nvmet_port *nport)
...@@ -2053,7 +2053,7 @@ static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data ...@@ -2053,7 +2053,7 @@ static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data
} }
mutex_unlock(&nvmet_rdma_queue_mutex); mutex_unlock(&nvmet_rdma_queue_mutex);
flush_scheduled_work(); flush_workqueue(nvmet_wq);
} }
static struct ib_client nvmet_rdma_ib_client = { static struct ib_client nvmet_rdma_ib_client = {
......
...@@ -1269,7 +1269,7 @@ static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue) ...@@ -1269,7 +1269,7 @@ static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
spin_lock(&queue->state_lock); spin_lock(&queue->state_lock);
if (queue->state != NVMET_TCP_Q_DISCONNECTING) { if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
queue->state = NVMET_TCP_Q_DISCONNECTING; queue->state = NVMET_TCP_Q_DISCONNECTING;
schedule_work(&queue->release_work); queue_work(nvmet_wq, &queue->release_work);
} }
spin_unlock(&queue->state_lock); spin_unlock(&queue->state_lock);
} }
...@@ -1684,7 +1684,7 @@ static void nvmet_tcp_listen_data_ready(struct sock *sk) ...@@ -1684,7 +1684,7 @@ static void nvmet_tcp_listen_data_ready(struct sock *sk)
goto out; goto out;
if (sk->sk_state == TCP_LISTEN) if (sk->sk_state == TCP_LISTEN)
schedule_work(&port->accept_work); queue_work(nvmet_wq, &port->accept_work);
out: out:
read_unlock_bh(&sk->sk_callback_lock); read_unlock_bh(&sk->sk_callback_lock);
} }
...@@ -1815,7 +1815,7 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq) ...@@ -1815,7 +1815,7 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
if (sq->qid == 0) { if (sq->qid == 0) {
/* Let inflight controller teardown complete */ /* Let inflight controller teardown complete */
flush_scheduled_work(); flush_workqueue(nvmet_wq);
} }
queue->nr_cmds = sq->size * 2; queue->nr_cmds = sq->size * 2;
...@@ -1876,12 +1876,12 @@ static void __exit nvmet_tcp_exit(void) ...@@ -1876,12 +1876,12 @@ static void __exit nvmet_tcp_exit(void)
nvmet_unregister_transport(&nvmet_tcp_ops); nvmet_unregister_transport(&nvmet_tcp_ops);
flush_scheduled_work(); flush_workqueue(nvmet_wq);
mutex_lock(&nvmet_tcp_queue_mutex); mutex_lock(&nvmet_tcp_queue_mutex);
list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
kernel_sock_shutdown(queue->sock, SHUT_RDWR); kernel_sock_shutdown(queue->sock, SHUT_RDWR);
mutex_unlock(&nvmet_tcp_queue_mutex); mutex_unlock(&nvmet_tcp_queue_mutex);
flush_scheduled_work(); flush_workqueue(nvmet_wq);
destroy_workqueue(nvmet_tcp_wq); destroy_workqueue(nvmet_tcp_wq);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment