Commit 815a76b9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-6.8-2024-02-01' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:

 - NVMe pull request via Keith:
     - Remove duplicated enums (Guixen)
     - Use appropriate controller state accessors (Keith)
     - Retryable authentication (Hannes)
     - Add missing module descriptions (Chaitanya)
     - Fibre-channel fixes for blktests (Daniel)
     - Various type correctness updates (Caleb)
     - Improve fabrics connection debugging prints (Nitin)
     - Passthrough command verbose error logging (Adam)

 - Fix for where we set IO priority in the bio for drivers that use
   fops->submit_bio() to queue IO, like md/dm etc.

* tag 'block-6.8-2024-02-01' of git://git.kernel.dk/linux: (32 commits)
  block: Fix where bio IO priority gets set
  nvme: allow passthru cmd error logging
  nvme-fc: show hostnqn when connecting to fc target
  nvme-rdma: show hostnqn when connecting to rdma target
  nvme-tcp: show hostnqn when connecting to tcp target
  nvmet-fc: use RCU list iterator for assoc_list
  nvmet-fc: take ref count on tgtport before delete assoc
  nvmet-fc: avoid deadlock on delete association path
  nvmet-fc: abort command when there is no binding
  nvmet-fc: do not tack refs on tgtports from assoc
  nvmet-fc: remove null hostport pointer check
  nvmet-fc: hold reference on hostport match
  nvmet-fc: free queue and assoc directly
  nvmet-fc: defer cleanup using RCU properly
  nvmet-fc: release reference on target port
  nvmet-fcloop: swap the list_add_tail arguments
  nvme-fc: do not wait in vain when unloading module
  nvme-fc: log human-readable opcode on timeout
  nvme: split out fabrics version of nvme_opcode_str()
  nvme: take const cmd pointer in read-only helpers
  ...
parents 717ca0b8 f3c89983
......@@ -49,6 +49,7 @@
#include "blk-pm.h"
#include "blk-cgroup.h"
#include "blk-throttle.h"
#include "blk-ioprio.h"
struct dentry *blk_debugfs_root;
......@@ -833,6 +834,14 @@ void submit_bio_noacct(struct bio *bio)
}
EXPORT_SYMBOL(submit_bio_noacct);
static void bio_set_ioprio(struct bio *bio)
{
/* Nobody set ioprio so far? Initialize it based on task's nice value */
if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE)
bio->bi_ioprio = get_current_ioprio();
blkcg_set_ioprio(bio);
}
/**
* submit_bio - submit a bio to the block device layer for I/O
* @bio: The &struct bio which describes the I/O
......@@ -855,6 +864,7 @@ void submit_bio(struct bio *bio)
count_vm_events(PGPGOUT, bio_sectors(bio));
}
bio_set_ioprio(bio);
submit_bio_noacct(bio);
}
EXPORT_SYMBOL(submit_bio);
......
......@@ -40,7 +40,6 @@
#include "blk-stat.h"
#include "blk-mq-sched.h"
#include "blk-rq-qos.h"
#include "blk-ioprio.h"
static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd);
......@@ -2944,14 +2943,6 @@ static bool blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug,
return true;
}
static void bio_set_ioprio(struct bio *bio)
{
/* Nobody set ioprio so far? Initialize it based on task's nice value */
if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE)
bio->bi_ioprio = get_current_ioprio();
blkcg_set_ioprio(bio);
}
/**
* blk_mq_submit_bio - Create and send a request to block device.
* @bio: Bio pointer.
......@@ -2976,7 +2967,6 @@ void blk_mq_submit_bio(struct bio *bio)
blk_status_t ret;
bio = blk_queue_bounce(bio, q);
bio_set_ioprio(bio);
if (plug) {
rq = rq_list_peek(&plug->cached_rq);
......
......@@ -471,4 +471,5 @@ int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key)
}
EXPORT_SYMBOL_GPL(nvme_auth_generate_key);
MODULE_DESCRIPTION("NVMe Authentication framework");
MODULE_LICENSE("GPL v2");
......@@ -181,5 +181,6 @@ static void __exit nvme_keyring_exit(void)
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>");
MODULE_DESCRIPTION("NVMe Keyring implementation");
module_init(nvme_keyring_init);
module_exit(nvme_keyring_exit);
......@@ -797,6 +797,7 @@ static int apple_nvme_init_request(struct blk_mq_tag_set *set,
static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
{
enum nvme_ctrl_state state = nvme_ctrl_state(&anv->ctrl);
u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
bool dead = false, freeze = false;
unsigned long flags;
......@@ -808,8 +809,8 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
if (csts & NVME_CSTS_CFS)
dead = true;
if (anv->ctrl.state == NVME_CTRL_LIVE ||
anv->ctrl.state == NVME_CTRL_RESETTING) {
if (state == NVME_CTRL_LIVE ||
state == NVME_CTRL_RESETTING) {
freeze = true;
nvme_start_freeze(&anv->ctrl);
}
......@@ -881,7 +882,7 @@ static enum blk_eh_timer_return apple_nvme_timeout(struct request *req)
unsigned long flags;
u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
if (anv->ctrl.state != NVME_CTRL_LIVE) {
if (nvme_ctrl_state(&anv->ctrl) != NVME_CTRL_LIVE) {
/*
* From rdma.c:
* If we are resetting, connecting or deleting we should
......@@ -985,10 +986,10 @@ static void apple_nvme_reset_work(struct work_struct *work)
u32 boot_status, aqa;
struct apple_nvme *anv =
container_of(work, struct apple_nvme, ctrl.reset_work);
enum nvme_ctrl_state state = nvme_ctrl_state(&anv->ctrl);
if (anv->ctrl.state != NVME_CTRL_RESETTING) {
dev_warn(anv->dev, "ctrl state %d is not RESETTING\n",
anv->ctrl.state);
if (state != NVME_CTRL_RESETTING) {
dev_warn(anv->dev, "ctrl state %d is not RESETTING\n", state);
ret = -ENODEV;
goto out;
}
......
......@@ -48,11 +48,6 @@ struct nvme_dhchap_queue_context {
static struct workqueue_struct *nvme_auth_wq;
#define nvme_auth_flags_from_qid(qid) \
(qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED
#define nvme_auth_queue_from_qid(ctrl, qid) \
(qid == 0) ? (ctrl)->fabrics_q : (ctrl)->connect_q
static inline int ctrl_max_dhchaps(struct nvme_ctrl *ctrl)
{
return ctrl->opts->nr_io_queues + ctrl->opts->nr_write_queues +
......@@ -63,10 +58,15 @@ static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
void *data, size_t data_len, bool auth_send)
{
struct nvme_command cmd = {};
blk_mq_req_flags_t flags = nvme_auth_flags_from_qid(qid);
struct request_queue *q = nvme_auth_queue_from_qid(ctrl, qid);
nvme_submit_flags_t flags = NVME_SUBMIT_RETRY;
struct request_queue *q = ctrl->fabrics_q;
int ret;
if (qid != 0) {
flags |= NVME_SUBMIT_NOWAIT | NVME_SUBMIT_RESERVED;
q = ctrl->connect_q;
}
cmd.auth_common.opcode = nvme_fabrics_command;
cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
cmd.auth_common.spsp0 = 0x01;
......@@ -80,8 +80,7 @@ static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
}
ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len,
qid == 0 ? NVME_QID_ANY : qid,
0, flags);
qid == 0 ? NVME_QID_ANY : qid, flags);
if (ret > 0)
dev_warn(ctrl->device,
"qid %d auth_send failed with status %d\n", qid, ret);
......@@ -897,7 +896,7 @@ static void nvme_ctrl_auth_work(struct work_struct *work)
* If the ctrl is no connected, bail as reconnect will handle
* authentication.
*/
if (ctrl->state != NVME_CTRL_LIVE)
if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
return;
/* Authenticate admin queue first */
......
......@@ -171,15 +171,15 @@ static const char * const nvme_statuses[] = {
[NVME_SC_HOST_ABORTED_CMD] = "Host Aborted Command",
};
const unsigned char *nvme_get_error_status_str(u16 status)
const char *nvme_get_error_status_str(u16 status)
{
status &= 0x7ff;
if (status < ARRAY_SIZE(nvme_statuses) && nvme_statuses[status])
return nvme_statuses[status & 0x7ff];
return nvme_statuses[status];
return "Unknown";
}
const unsigned char *nvme_get_opcode_str(u8 opcode)
const char *nvme_get_opcode_str(u8 opcode)
{
if (opcode < ARRAY_SIZE(nvme_ops) && nvme_ops[opcode])
return nvme_ops[opcode];
......@@ -187,7 +187,7 @@ const unsigned char *nvme_get_opcode_str(u8 opcode)
}
EXPORT_SYMBOL_GPL(nvme_get_opcode_str);
const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
const char *nvme_get_admin_opcode_str(u8 opcode)
{
if (opcode < ARRAY_SIZE(nvme_admin_ops) && nvme_admin_ops[opcode])
return nvme_admin_ops[opcode];
......@@ -195,7 +195,7 @@ const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
}
EXPORT_SYMBOL_GPL(nvme_get_admin_opcode_str);
const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode) {
const char *nvme_get_fabrics_opcode_str(u8 opcode) {
if (opcode < ARRAY_SIZE(nvme_fabrics_ops) && nvme_fabrics_ops[opcode])
return nvme_fabrics_ops[opcode];
return "Unknown";
......
......@@ -338,6 +338,30 @@ static void nvme_log_error(struct request *req)
nr->status & NVME_SC_DNR ? "DNR " : "");
}
static void nvme_log_err_passthru(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
struct nvme_request *nr = nvme_req(req);
pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s"
"cdw10=0x%x cdw11=0x%x cdw12=0x%x cdw13=0x%x cdw14=0x%x cdw15=0x%x\n",
ns ? ns->disk->disk_name : dev_name(nr->ctrl->device),
ns ? nvme_get_opcode_str(nr->cmd->common.opcode) :
nvme_get_admin_opcode_str(nr->cmd->common.opcode),
nr->cmd->common.opcode,
nvme_get_error_status_str(nr->status),
nr->status >> 8 & 7, /* Status Code Type */
nr->status & 0xff, /* Status Code */
nr->status & NVME_SC_MORE ? "MORE " : "",
nr->status & NVME_SC_DNR ? "DNR " : "",
nr->cmd->common.cdw10,
nr->cmd->common.cdw11,
nr->cmd->common.cdw12,
nr->cmd->common.cdw13,
nr->cmd->common.cdw14,
nr->cmd->common.cdw14);
}
enum nvme_disposition {
COMPLETE,
RETRY,
......@@ -385,8 +409,12 @@ static inline void nvme_end_req(struct request *req)
{
blk_status_t status = nvme_error_status(nvme_req(req)->status);
if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET)))
nvme_log_error(req);
if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) {
if (blk_rq_is_passthrough(req))
nvme_log_err_passthru(req);
else
nvme_log_error(req);
}
nvme_end_req_zoned(req);
nvme_trace_bio_complete(req);
if (req->cmd_flags & REQ_NVME_MPATH)
......@@ -679,10 +707,21 @@ static inline void nvme_clear_nvme_request(struct request *req)
/* initialize a passthrough request */
void nvme_init_request(struct request *req, struct nvme_command *cmd)
{
if (req->q->queuedata)
struct nvme_request *nr = nvme_req(req);
bool logging_enabled;
if (req->q->queuedata) {
struct nvme_ns *ns = req->q->disk->private_data;
logging_enabled = ns->passthru_err_log_enabled;
req->timeout = NVME_IO_TIMEOUT;
else /* no queuedata implies admin queue */
} else { /* no queuedata implies admin queue */
logging_enabled = nr->ctrl->passthru_err_log_enabled;
req->timeout = NVME_ADMIN_TIMEOUT;
}
if (!logging_enabled)
req->rq_flags |= RQF_QUIET;
/* passthru commands should let the driver set the SGL flags */
cmd->common.flags &= ~NVME_CMD_SGL_ALL;
......@@ -691,8 +730,7 @@ void nvme_init_request(struct request *req, struct nvme_command *cmd)
if (req->mq_hctx->type == HCTX_TYPE_POLL)
req->cmd_flags |= REQ_POLLED;
nvme_clear_nvme_request(req);
req->rq_flags |= RQF_QUIET;
memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
memcpy(nr->cmd, cmd, sizeof(*cmd));
}
EXPORT_SYMBOL_GPL(nvme_init_request);
......@@ -721,7 +759,7 @@ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
EXPORT_SYMBOL_GPL(nvme_fail_nonready_command);
bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
bool queue_live)
bool queue_live, enum nvme_ctrl_state state)
{
struct nvme_request *req = nvme_req(rq);
......@@ -742,7 +780,7 @@ bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
* command, which is require to set the queue live in the
* appropinquate states.
*/
switch (nvme_ctrl_state(ctrl)) {
switch (state) {
case NVME_CTRL_CONNECTING:
if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
(req->cmd->fabrics.fctype == nvme_fabrics_type_connect ||
......@@ -1051,20 +1089,27 @@ EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, NVME_TARGET_PASSTHRU);
*/
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
int qid, int at_head, blk_mq_req_flags_t flags)
int qid, nvme_submit_flags_t flags)
{
struct request *req;
int ret;
blk_mq_req_flags_t blk_flags = 0;
if (flags & NVME_SUBMIT_NOWAIT)
blk_flags |= BLK_MQ_REQ_NOWAIT;
if (flags & NVME_SUBMIT_RESERVED)
blk_flags |= BLK_MQ_REQ_RESERVED;
if (qid == NVME_QID_ANY)
req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
req = blk_mq_alloc_request(q, nvme_req_op(cmd), blk_flags);
else
req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), blk_flags,
qid - 1);
if (IS_ERR(req))
return PTR_ERR(req);
nvme_init_request(req, cmd);
if (flags & NVME_SUBMIT_RETRY)
req->cmd_flags &= ~REQ_FAILFAST_DRIVER;
if (buffer && bufflen) {
ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
......@@ -1072,7 +1117,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
goto out;
}
ret = nvme_execute_rq(req, at_head);
ret = nvme_execute_rq(req, flags & NVME_SUBMIT_AT_HEAD);
if (result && ret >= 0)
*result = nvme_req(req)->result;
out:
......@@ -1085,7 +1130,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, unsigned bufflen)
{
return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen,
NVME_QID_ANY, 0, 0);
NVME_QID_ANY, 0);
}
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
......@@ -1560,7 +1605,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
c.features.dword11 = cpu_to_le32(dword11);
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
buffer, buflen, NVME_QID_ANY, 0, 0);
buffer, buflen, NVME_QID_ANY, 0);
if (ret >= 0 && result)
*result = le32_to_cpu(res.u32);
return ret;
......@@ -2172,7 +2217,7 @@ static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t l
cmd.common.cdw11 = cpu_to_le32(len);
return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
NVME_QID_ANY, 1, 0);
NVME_QID_ANY, NVME_SUBMIT_AT_HEAD);
}
static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended)
......@@ -3651,6 +3696,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
ns->disk = disk;
ns->queue = disk->queue;
ns->passthru_err_log_enabled = false;
if (ctrl->opts && ctrl->opts->data_digest)
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
......@@ -3714,6 +3760,13 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
nvme_mpath_add_disk(ns, info->anagrpid);
nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
/*
* Set ns->disk->device->driver_data to ns so we can access
* ns->logging_enabled in nvme_passthru_err_log_enabled_store() and
* nvme_passthru_err_log_enabled_show().
*/
dev_set_drvdata(disk_to_dev(ns->disk), ns);
return;
out_cleanup_ns_from_list:
......@@ -4514,6 +4567,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
int ret;
WRITE_ONCE(ctrl->state, NVME_CTRL_NEW);
ctrl->passthru_err_log_enabled = false;
clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
spin_lock_init(&ctrl->lock);
mutex_init(&ctrl->scan_lock);
......@@ -4851,5 +4905,6 @@ static void __exit nvme_core_exit(void)
MODULE_LICENSE("GPL");
MODULE_VERSION("1.0");
MODULE_DESCRIPTION("NVMe host core framework");
module_init(nvme_core_init);
module_exit(nvme_core_exit);
......@@ -180,7 +180,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
NVME_QID_ANY, 0, 0);
NVME_QID_ANY, 0);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
......@@ -226,7 +226,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
NVME_QID_ANY, 0, 0);
NVME_QID_ANY, 0);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
......@@ -271,7 +271,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
cmd.prop_set.value = cpu_to_le64(val);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0,
NVME_QID_ANY, 0, 0);
NVME_QID_ANY, 0);
if (unlikely(ret))
dev_err(ctrl->device,
"Property Set error: %d, offset %#x\n",
......@@ -450,8 +450,10 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
return -ENOMEM;
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
data, sizeof(*data), NVME_QID_ANY, 1,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
data, sizeof(*data), NVME_QID_ANY,
NVME_SUBMIT_AT_HEAD |
NVME_SUBMIT_NOWAIT |
NVME_SUBMIT_RESERVED);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
......@@ -525,8 +527,10 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
return -ENOMEM;
ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
data, sizeof(*data), qid, 1,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
data, sizeof(*data), qid,
NVME_SUBMIT_AT_HEAD |
NVME_SUBMIT_RESERVED |
NVME_SUBMIT_NOWAIT);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
......@@ -1488,6 +1492,7 @@ static void __exit nvmf_exit(void)
}
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("NVMe host fabrics library");
module_init(nvmf_init);
module_exit(nvmf_exit);
......@@ -185,9 +185,11 @@ static inline bool
nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
struct nvmf_ctrl_options *opts)
{
if (ctrl->state == NVME_CTRL_DELETING ||
ctrl->state == NVME_CTRL_DELETING_NOIO ||
ctrl->state == NVME_CTRL_DEAD ||
enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
if (state == NVME_CTRL_DELETING ||
state == NVME_CTRL_DELETING_NOIO ||
state == NVME_CTRL_DEAD ||
strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
!uuid_equal(&opts->host->id, &ctrl->opts->host->id))
......
......@@ -221,11 +221,6 @@ static LIST_HEAD(nvme_fc_lport_list);
static DEFINE_IDA(nvme_fc_local_port_cnt);
static DEFINE_IDA(nvme_fc_ctrl_cnt);
static struct workqueue_struct *nvme_fc_wq;
static bool nvme_fc_waiting_to_unload;
static DECLARE_COMPLETION(nvme_fc_unload_proceed);
/*
* These items are short-term. They will eventually be moved into
* a generic FC class. See comments in module init.
......@@ -255,8 +250,6 @@ nvme_fc_free_lport(struct kref *ref)
/* remove from transport list */
spin_lock_irqsave(&nvme_fc_lock, flags);
list_del(&lport->port_list);
if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list))
complete(&nvme_fc_unload_proceed);
spin_unlock_irqrestore(&nvme_fc_lock, flags);
ida_free(&nvme_fc_local_port_cnt, lport->localport.port_num);
......@@ -2574,6 +2567,7 @@ static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq)
{
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
struct nvme_fc_ctrl *ctrl = op->ctrl;
u16 qnum = op->queue->qnum;
struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
struct nvme_command *sqe = &cmdiu->sqe;
......@@ -2582,10 +2576,11 @@ static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq)
* will detect the aborted io and will fail the connection.
*/
dev_info(ctrl->ctrl.device,
"NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: "
"NVME-FC{%d.%d}: io timeout: opcode %d fctype %d (%s) w10/11: "
"x%08x/x%08x\n",
ctrl->cnum, op->queue->qnum, sqe->common.opcode,
sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11);
ctrl->cnum, qnum, sqe->common.opcode, sqe->fabrics.fctype,
nvme_fabrics_opcode_str(qnum, sqe),
sqe->common.cdw10, sqe->common.cdw11);
if (__nvme_fc_abort_op(ctrl, op))
nvme_fc_error_recovery(ctrl, "io timeout abort failed");
......@@ -3575,8 +3570,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
flush_delayed_work(&ctrl->connect_work);
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl));
"NVME-FC{%d}: new ctrl: NQN \"%s\", hostnqn: %s\n",
ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl), opts->host->nqn);
return &ctrl->ctrl;
......@@ -3894,10 +3889,6 @@ static int __init nvme_fc_init_module(void)
{
int ret;
nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
if (!nvme_fc_wq)
return -ENOMEM;
/*
* NOTE:
* It is expected that in the future the kernel will combine
......@@ -3915,7 +3906,7 @@ static int __init nvme_fc_init_module(void)
ret = class_register(&fc_class);
if (ret) {
pr_err("couldn't register class fc\n");
goto out_destroy_wq;
return ret;
}
/*
......@@ -3939,8 +3930,6 @@ static int __init nvme_fc_init_module(void)
device_destroy(&fc_class, MKDEV(0, 0));
out_destroy_class:
class_unregister(&fc_class);
out_destroy_wq:
destroy_workqueue(nvme_fc_wq);
return ret;
}
......@@ -3960,48 +3949,27 @@ nvme_fc_delete_controllers(struct nvme_fc_rport *rport)
spin_unlock(&rport->lock);
}
static void
nvme_fc_cleanup_for_unload(void)
static void __exit nvme_fc_exit_module(void)
{
struct nvme_fc_lport *lport;
struct nvme_fc_rport *rport;
list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
list_for_each_entry(rport, &lport->endp_list, endp_list) {
nvme_fc_delete_controllers(rport);
}
}
}
static void __exit nvme_fc_exit_module(void)
{
unsigned long flags;
bool need_cleanup = false;
spin_lock_irqsave(&nvme_fc_lock, flags);
nvme_fc_waiting_to_unload = true;
if (!list_empty(&nvme_fc_lport_list)) {
need_cleanup = true;
nvme_fc_cleanup_for_unload();
}
list_for_each_entry(lport, &nvme_fc_lport_list, port_list)
list_for_each_entry(rport, &lport->endp_list, endp_list)
nvme_fc_delete_controllers(rport);
spin_unlock_irqrestore(&nvme_fc_lock, flags);
if (need_cleanup) {
pr_info("%s: waiting for ctlr deletes\n", __func__);
wait_for_completion(&nvme_fc_unload_proceed);
pr_info("%s: ctrl deletes complete\n", __func__);
}
flush_workqueue(nvme_delete_wq);
nvmf_unregister_transport(&nvme_fc_transport);
ida_destroy(&nvme_fc_local_port_cnt);
ida_destroy(&nvme_fc_ctrl_cnt);
device_destroy(&fc_class, MKDEV(0, 0));
class_unregister(&fc_class);
destroy_workqueue(nvme_fc_wq);
}
module_init(nvme_fc_init_module);
module_exit(nvme_fc_exit_module);
MODULE_DESCRIPTION("NVMe host FC transport driver");
MODULE_LICENSE("GPL v2");
......@@ -156,7 +156,7 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
if (!ns->head->disk)
continue;
kblockd_schedule_work(&ns->head->requeue_work);
if (ctrl->state == NVME_CTRL_LIVE)
if (nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE)
disk_uevent(ns->head->disk, KOBJ_CHANGE);
}
up_read(&ctrl->namespaces_rwsem);
......@@ -223,13 +223,14 @@ void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
static bool nvme_path_is_disabled(struct nvme_ns *ns)
{
enum nvme_ctrl_state state = nvme_ctrl_state(ns->ctrl);
/*
* We don't treat NVME_CTRL_DELETING as a disabled path as I/O should
* still be able to complete assuming that the controller is connected.
* Otherwise it will fail immediately and return to the requeue list.
*/
if (ns->ctrl->state != NVME_CTRL_LIVE &&
ns->ctrl->state != NVME_CTRL_DELETING)
if (state != NVME_CTRL_LIVE && state != NVME_CTRL_DELETING)
return true;
if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
!test_bit(NVME_NS_READY, &ns->flags))
......@@ -331,7 +332,7 @@ static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
{
return ns->ctrl->state == NVME_CTRL_LIVE &&
return nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE &&
ns->ana_state == NVME_ANA_OPTIMIZED;
}
......@@ -358,7 +359,7 @@ static bool nvme_available_path(struct nvme_ns_head *head)
list_for_each_entry_rcu(ns, &head->list, siblings) {
if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags))
continue;
switch (ns->ctrl->state) {
switch (nvme_ctrl_state(ns->ctrl)) {
case NVME_CTRL_LIVE:
case NVME_CTRL_RESETTING:
case NVME_CTRL_CONNECTING:
......@@ -667,7 +668,7 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
* controller is ready.
*/
if (nvme_state_is_live(ns->ana_state) &&
ns->ctrl->state == NVME_CTRL_LIVE)
nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE)
nvme_mpath_set_live(ns);
}
......@@ -748,7 +749,7 @@ static void nvme_ana_work(struct work_struct *work)
{
struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
if (ctrl->state != NVME_CTRL_LIVE)
if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
return;
nvme_read_ana_log(ctrl);
......
......@@ -263,6 +263,7 @@ enum nvme_ctrl_flags {
struct nvme_ctrl {
bool comp_seen;
bool identified;
bool passthru_err_log_enabled;
enum nvme_ctrl_state state;
spinlock_t lock;
struct mutex scan_lock;
......@@ -522,7 +523,7 @@ struct nvme_ns {
struct device cdev_device;
struct nvme_fault_inject fault_inject;
bool passthru_err_log_enabled;
};
/* NVMe ns supports metadata actions by the controller (generate/strip) */
......@@ -805,17 +806,18 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
struct request *req);
bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
bool queue_live);
bool queue_live, enum nvme_ctrl_state state);
static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
bool queue_live)
{
if (likely(ctrl->state == NVME_CTRL_LIVE))
enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
if (likely(state == NVME_CTRL_LIVE))
return true;
if (ctrl->ops->flags & NVME_F_FABRICS &&
ctrl->state == NVME_CTRL_DELETING)
if (ctrl->ops->flags & NVME_F_FABRICS && state == NVME_CTRL_DELETING)
return queue_live;
return __nvme_check_ready(ctrl, rq, queue_live);
return __nvme_check_ready(ctrl, rq, queue_live, state);
}
/*
......@@ -836,12 +838,27 @@ static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl,
(ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS);
}
/*
* Flags for __nvme_submit_sync_cmd()
*/
typedef __u32 __bitwise nvme_submit_flags_t;
enum {
/* Insert request at the head of the queue */
NVME_SUBMIT_AT_HEAD = (__force nvme_submit_flags_t)(1 << 0),
/* Set BLK_MQ_REQ_NOWAIT when allocating request */
NVME_SUBMIT_NOWAIT = (__force nvme_submit_flags_t)(1 << 1),
/* Set BLK_MQ_REQ_RESERVED when allocating request */
NVME_SUBMIT_RESERVED = (__force nvme_submit_flags_t)(1 << 2),
/* Retry command when NVME_SC_DNR is not set in the result */
NVME_SUBMIT_RETRY = (__force nvme_submit_flags_t)(1 << 3),
};
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
int qid, int at_head,
blk_mq_req_flags_t flags);
int qid, nvme_submit_flags_t flags);
int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
unsigned int dword11, void *buffer, size_t buflen,
u32 *result);
......@@ -1124,35 +1141,42 @@ static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
}
#ifdef CONFIG_NVME_VERBOSE_ERRORS
const unsigned char *nvme_get_error_status_str(u16 status);
const unsigned char *nvme_get_opcode_str(u8 opcode);
const unsigned char *nvme_get_admin_opcode_str(u8 opcode);
const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode);
const char *nvme_get_error_status_str(u16 status);
const char *nvme_get_opcode_str(u8 opcode);
const char *nvme_get_admin_opcode_str(u8 opcode);
const char *nvme_get_fabrics_opcode_str(u8 opcode);
#else /* CONFIG_NVME_VERBOSE_ERRORS */
static inline const unsigned char *nvme_get_error_status_str(u16 status)
static inline const char *nvme_get_error_status_str(u16 status)
{
return "I/O Error";
}
static inline const unsigned char *nvme_get_opcode_str(u8 opcode)
static inline const char *nvme_get_opcode_str(u8 opcode)
{
return "I/O Cmd";
}
static inline const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
static inline const char *nvme_get_admin_opcode_str(u8 opcode)
{
return "Admin Cmd";
}
static inline const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode)
static inline const char *nvme_get_fabrics_opcode_str(u8 opcode)
{
return "Fabrics Cmd";
}
#endif /* CONFIG_NVME_VERBOSE_ERRORS */
static inline const unsigned char *nvme_opcode_str(int qid, u8 opcode, u8 fctype)
static inline const char *nvme_opcode_str(int qid, u8 opcode)
{
if (opcode == nvme_fabrics_command)
return nvme_get_fabrics_opcode_str(fctype);
return qid ? nvme_get_opcode_str(opcode) :
nvme_get_admin_opcode_str(opcode);
}
static inline const char *nvme_fabrics_opcode_str(
int qid, const struct nvme_command *cmd)
{
if (nvme_is_fabrics(cmd))
return nvme_get_fabrics_opcode_str(cmd->fabrics.fctype);
return nvme_opcode_str(qid, cmd->common.opcode);
}
#endif /* _NVME_H */
......@@ -1349,7 +1349,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
dev_warn(dev->ctrl.device,
"I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, reset controller\n",
req->tag, nvme_cid(req), opcode,
nvme_opcode_str(nvmeq->qid, opcode, 0), nvmeq->qid);
nvme_opcode_str(nvmeq->qid, opcode), nvmeq->qid);
nvme_req(req)->flags |= NVME_REQ_CANCELLED;
goto disable;
}
......@@ -3543,5 +3543,6 @@ static void __exit nvme_exit(void)
MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.0");
MODULE_DESCRIPTION("NVMe host PCIe transport driver");
module_init(nvme_init);
module_exit(nvme_exit);
......@@ -1410,6 +1410,8 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
struct nvme_ns *ns = rq->q->queuedata;
struct bio *bio = rq->bio;
struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
u32 xfer_len;
int nr;
req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs);
......@@ -1422,8 +1424,7 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
if (unlikely(nr))
goto mr_put;
nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_bdev->bd_disk), c,
req->mr->sig_attrs, ns->head->pi_type);
nvme_rdma_set_sig_attrs(bi, c, req->mr->sig_attrs, ns->head->pi_type);
nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask);
ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
......@@ -1441,7 +1442,11 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
IB_ACCESS_REMOTE_WRITE;
sg->addr = cpu_to_le64(req->mr->iova);
put_unaligned_le24(req->mr->length, sg->length);
xfer_len = req->mr->length;
/* Check if PI is added by the HW */
if (!pi_count)
xfer_len += (xfer_len >> bi->interval_exp) * ns->head->pi_size;
put_unaligned_le24(xfer_len, sg->length);
put_unaligned_le32(req->mr->rkey, sg->key);
sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
......@@ -1946,14 +1951,13 @@ static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq)
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_queue *queue = req->queue;
struct nvme_rdma_ctrl *ctrl = queue->ctrl;
u8 opcode = req->req.cmd->common.opcode;
u8 fctype = req->req.cmd->fabrics.fctype;
struct nvme_command *cmd = req->req.cmd;
int qid = nvme_rdma_queue_idx(queue);
dev_warn(ctrl->ctrl.device,
"I/O tag %d (%04x) opcode %#x (%s) QID %d timeout\n",
rq->tag, nvme_cid(rq), opcode,
nvme_opcode_str(qid, opcode, fctype), qid);
rq->tag, nvme_cid(rq), cmd->common.opcode,
nvme_fabrics_opcode_str(qid, cmd), qid);
if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_LIVE) {
/*
......@@ -2296,8 +2300,8 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
if (ret)
goto out_uninit_ctrl;
dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n",
nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs, hostnqn: %s\n",
nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr, opts->host->nqn);
mutex_lock(&nvme_rdma_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
......@@ -2400,4 +2404,5 @@ static void __exit nvme_rdma_cleanup_module(void)
module_init(nvme_rdma_init_module);
module_exit(nvme_rdma_cleanup_module);
MODULE_DESCRIPTION("NVMe host RDMA transport driver");
MODULE_LICENSE("GPL v2");
......@@ -35,6 +35,62 @@ static ssize_t nvme_sysfs_rescan(struct device *dev,
}
static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
static ssize_t nvme_adm_passthru_err_log_enabled_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
return sysfs_emit(buf,
ctrl->passthru_err_log_enabled ? "on\n" : "off\n");
}
static ssize_t nvme_adm_passthru_err_log_enabled_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
int err;
bool passthru_err_log_enabled;
err = kstrtobool(buf, &passthru_err_log_enabled);
if (err)
return -EINVAL;
ctrl->passthru_err_log_enabled = passthru_err_log_enabled;
return count;
}
static ssize_t nvme_io_passthru_err_log_enabled_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvme_ns *n = dev_get_drvdata(dev);
return sysfs_emit(buf, n->passthru_err_log_enabled ? "on\n" : "off\n");
}
static ssize_t nvme_io_passthru_err_log_enabled_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct nvme_ns *ns = dev_get_drvdata(dev);
int err;
bool passthru_err_log_enabled;
err = kstrtobool(buf, &passthru_err_log_enabled);
if (err)
return -EINVAL;
ns->passthru_err_log_enabled = passthru_err_log_enabled;
return count;
}
static struct device_attribute dev_attr_adm_passthru_err_log_enabled = \
__ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \
nvme_adm_passthru_err_log_enabled_show, nvme_adm_passthru_err_log_enabled_store);
static struct device_attribute dev_attr_io_passthru_err_log_enabled = \
__ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \
nvme_io_passthru_err_log_enabled_show, nvme_io_passthru_err_log_enabled_store);
static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
{
struct gendisk *disk = dev_to_disk(dev);
......@@ -208,6 +264,7 @@ static struct attribute *nvme_ns_attrs[] = {
&dev_attr_ana_grpid.attr,
&dev_attr_ana_state.attr,
#endif
&dev_attr_io_passthru_err_log_enabled.attr,
NULL,
};
......@@ -311,6 +368,7 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
char *buf)
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
unsigned state = (unsigned)nvme_ctrl_state(ctrl);
static const char *const state_name[] = {
[NVME_CTRL_NEW] = "new",
[NVME_CTRL_LIVE] = "live",
......@@ -321,9 +379,8 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
[NVME_CTRL_DEAD] = "dead",
};
if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
state_name[ctrl->state])
return sysfs_emit(buf, "%s\n", state_name[ctrl->state]);
if (state < ARRAY_SIZE(state_name) && state_name[state])
return sysfs_emit(buf, "%s\n", state_name[state]);
return sysfs_emit(buf, "unknown state\n");
}
......@@ -655,6 +712,7 @@ static struct attribute *nvme_dev_attrs[] = {
#ifdef CONFIG_NVME_TCP_TLS
&dev_attr_tls_key.attr,
#endif
&dev_attr_adm_passthru_err_log_enabled.attr,
NULL
};
......
......@@ -2428,13 +2428,13 @@ static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
u8 opc = pdu->cmd.common.opcode, fctype = pdu->cmd.fabrics.fctype;
struct nvme_command *cmd = &pdu->cmd;
int qid = nvme_tcp_queue_id(req->queue);
dev_warn(ctrl->device,
"I/O tag %d (%04x) type %d opcode %#x (%s) QID %d timeout\n",
rq->tag, nvme_cid(rq), pdu->hdr.type, opc,
nvme_opcode_str(qid, opc, fctype), qid);
rq->tag, nvme_cid(rq), pdu->hdr.type, cmd->common.opcode,
nvme_fabrics_opcode_str(qid, cmd), qid);
if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) {
/*
......@@ -2753,8 +2753,8 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
if (ret)
goto out_uninit_ctrl;
dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp, hostnqn: %s\n",
nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr, opts->host->nqn);
mutex_lock(&nvme_tcp_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
......@@ -2826,4 +2826,5 @@ static void __exit nvme_tcp_cleanup_module(void)
module_init(nvme_tcp_init_module);
module_exit(nvme_tcp_cleanup_module);
MODULE_DESCRIPTION("NVMe host TCP transport driver");
MODULE_LICENSE("GPL v2");
......@@ -248,7 +248,7 @@ void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
continue;
nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
NVME_AER_NOTICE_NS_CHANGED,
NVME_LOG_CHANGED_NS);
}
......@@ -265,7 +265,7 @@ void nvmet_send_ana_event(struct nvmet_subsys *subsys,
continue;
if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
continue;
nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
}
mutex_unlock(&subsys->lock);
......@@ -1705,4 +1705,5 @@ static void __exit nvmet_exit(void)
module_init(nvmet_init);
module_exit(nvmet_exit);
MODULE_DESCRIPTION("NVMe target core framework");
MODULE_LICENSE("GPL v2");
......@@ -21,7 +21,7 @@ static void __nvmet_disc_changed(struct nvmet_port *port,
if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_DISC_CHANGE))
return;
nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
NVME_AER_NOTICE_DISC_CHANGED, NVME_LOG_DISC);
}
......
......@@ -111,6 +111,8 @@ struct nvmet_fc_tgtport {
struct nvmet_fc_port_entry *pe;
struct kref ref;
u32 max_sg_cnt;
struct work_struct put_work;
};
struct nvmet_fc_port_entry {
......@@ -145,7 +147,6 @@ struct nvmet_fc_tgt_queue {
struct list_head avail_defer_list;
struct workqueue_struct *work_q;
struct kref ref;
struct rcu_head rcu;
/* array of fcp_iods */
struct nvmet_fc_fcp_iod fod[] __counted_by(sqsize);
} __aligned(sizeof(unsigned long long));
......@@ -166,10 +167,9 @@ struct nvmet_fc_tgt_assoc {
struct nvmet_fc_hostport *hostport;
struct nvmet_fc_ls_iod *rcv_disconn;
struct list_head a_list;
struct nvmet_fc_tgt_queue __rcu *queues[NVMET_NR_QUEUES + 1];
struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
struct kref ref;
struct work_struct del_work;
struct rcu_head rcu;
};
......@@ -249,6 +249,13 @@ static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
static void nvmet_fc_put_tgtport_work(struct work_struct *work)
{
struct nvmet_fc_tgtport *tgtport =
container_of(work, struct nvmet_fc_tgtport, put_work);
nvmet_fc_tgtport_put(tgtport);
}
static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_fcp_iod *fod);
......@@ -360,7 +367,7 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
if (!lsop->req_queued) {
spin_unlock_irqrestore(&tgtport->lock, flags);
return;
goto out_putwork;
}
list_del(&lsop->lsreq_list);
......@@ -373,7 +380,8 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
(lsreq->rqstlen + lsreq->rsplen),
DMA_BIDIRECTIONAL);
nvmet_fc_tgtport_put(tgtport);
out_putwork:
queue_work(nvmet_wq, &tgtport->put_work);
}
static int
......@@ -489,8 +497,7 @@ nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
* message is normal. Otherwise, send unless the hostport has
* already been invalidated by the lldd.
*/
if (!tgtport->ops->ls_req || !assoc->hostport ||
assoc->hostport->invalid)
if (!tgtport->ops->ls_req || assoc->hostport->invalid)
return;
lsop = kzalloc((sizeof(*lsop) +
......@@ -802,14 +809,11 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
if (!queue)
return NULL;
if (!nvmet_fc_tgt_a_get(assoc))
goto out_free_queue;
queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
assoc->tgtport->fc_target_port.port_num,
assoc->a_id, qid);
if (!queue->work_q)
goto out_a_put;
goto out_free_queue;
queue->qid = qid;
queue->sqsize = sqsize;
......@@ -831,15 +835,13 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
goto out_fail_iodlist;
WARN_ON(assoc->queues[qid]);
rcu_assign_pointer(assoc->queues[qid], queue);
assoc->queues[qid] = queue;
return queue;
out_fail_iodlist:
nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
destroy_workqueue(queue->work_q);
out_a_put:
nvmet_fc_tgt_a_put(assoc);
out_free_queue:
kfree(queue);
return NULL;
......@@ -852,15 +854,11 @@ nvmet_fc_tgt_queue_free(struct kref *ref)
struct nvmet_fc_tgt_queue *queue =
container_of(ref, struct nvmet_fc_tgt_queue, ref);
rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL);
nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
nvmet_fc_tgt_a_put(queue->assoc);
destroy_workqueue(queue->work_q);
kfree_rcu(queue, rcu);
kfree(queue);
}
static void
......@@ -969,7 +967,7 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
rcu_read_lock();
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (association_id == assoc->association_id) {
queue = rcu_dereference(assoc->queues[qid]);
queue = assoc->queues[qid];
if (queue &&
(!atomic_read(&queue->connected) ||
!nvmet_fc_tgt_q_get(queue)))
......@@ -1078,8 +1076,6 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
/* new allocation not needed */
kfree(newhost);
newhost = match;
/* no new allocation - release reference */
nvmet_fc_tgtport_put(tgtport);
} else {
newhost->tgtport = tgtport;
newhost->hosthandle = hosthandle;
......@@ -1094,23 +1090,54 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
}
static void
nvmet_fc_delete_assoc(struct work_struct *work)
nvmet_fc_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
{
nvmet_fc_delete_target_assoc(assoc);
nvmet_fc_tgt_a_put(assoc);
}
static void
nvmet_fc_delete_assoc_work(struct work_struct *work)
{
struct nvmet_fc_tgt_assoc *assoc =
container_of(work, struct nvmet_fc_tgt_assoc, del_work);
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
nvmet_fc_delete_target_assoc(assoc);
nvmet_fc_tgt_a_put(assoc);
nvmet_fc_delete_assoc(assoc);
nvmet_fc_tgtport_put(tgtport);
}
static void
nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
{
nvmet_fc_tgtport_get(assoc->tgtport);
queue_work(nvmet_wq, &assoc->del_work);
}
static bool
nvmet_fc_assoc_exits(struct nvmet_fc_tgtport *tgtport, u64 association_id)
{
struct nvmet_fc_tgt_assoc *a;
list_for_each_entry_rcu(a, &tgtport->assoc_list, a_list) {
if (association_id == a->association_id)
return true;
}
return false;
}
static struct nvmet_fc_tgt_assoc *
nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
{
struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
struct nvmet_fc_tgt_assoc *assoc;
unsigned long flags;
bool done;
u64 ran;
int idx;
bool needrandom = true;
if (!tgtport->pe)
return NULL;
assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
if (!assoc)
......@@ -1120,43 +1147,35 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
if (idx < 0)
goto out_free_assoc;
if (!nvmet_fc_tgtport_get(tgtport))
goto out_ida;
assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle);
if (IS_ERR(assoc->hostport))
goto out_put;
goto out_ida;
assoc->tgtport = tgtport;
assoc->a_id = idx;
INIT_LIST_HEAD(&assoc->a_list);
kref_init(&assoc->ref);
INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc_work);
atomic_set(&assoc->terminating, 0);
while (needrandom) {
done = false;
do {
get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
ran = ran << BYTES_FOR_QID_SHIFT;
spin_lock_irqsave(&tgtport->lock, flags);
needrandom = false;
list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
if (ran == tmpassoc->association_id) {
needrandom = true;
break;
}
}
if (!needrandom) {
rcu_read_lock();
if (!nvmet_fc_assoc_exits(tgtport, ran)) {
assoc->association_id = ran;
list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
done = true;
}
rcu_read_unlock();
spin_unlock_irqrestore(&tgtport->lock, flags);
}
} while (!done);
return assoc;
out_put:
nvmet_fc_tgtport_put(tgtport);
out_ida:
ida_free(&tgtport->assoc_cnt, idx);
out_free_assoc:
......@@ -1172,13 +1191,18 @@ nvmet_fc_target_assoc_free(struct kref *ref)
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
struct nvmet_fc_ls_iod *oldls;
unsigned long flags;
int i;
for (i = NVMET_NR_QUEUES; i >= 0; i--) {
if (assoc->queues[i])
nvmet_fc_delete_target_queue(assoc->queues[i]);
}
/* Send Disconnect now that all i/o has completed */
nvmet_fc_xmt_disconnect_assoc(assoc);
nvmet_fc_free_hostport(assoc->hostport);
spin_lock_irqsave(&tgtport->lock, flags);
list_del_rcu(&assoc->a_list);
oldls = assoc->rcv_disconn;
spin_unlock_irqrestore(&tgtport->lock, flags);
/* if pending Rcv Disconnect Association LS, send rsp now */
......@@ -1188,8 +1212,7 @@ nvmet_fc_target_assoc_free(struct kref *ref)
dev_info(tgtport->dev,
"{%d:%d} Association freed\n",
tgtport->fc_target_port.port_num, assoc->a_id);
kfree_rcu(assoc, rcu);
nvmet_fc_tgtport_put(tgtport);
kfree(assoc);
}
static void
......@@ -1208,7 +1231,7 @@ static void
nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
{
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
struct nvmet_fc_tgt_queue *queue;
unsigned long flags;
int i, terminating;
terminating = atomic_xchg(&assoc->terminating, 1);
......@@ -1217,29 +1240,21 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
if (terminating)
return;
spin_lock_irqsave(&tgtport->lock, flags);
list_del_rcu(&assoc->a_list);
spin_unlock_irqrestore(&tgtport->lock, flags);
for (i = NVMET_NR_QUEUES; i >= 0; i--) {
rcu_read_lock();
queue = rcu_dereference(assoc->queues[i]);
if (!queue) {
rcu_read_unlock();
continue;
}
synchronize_rcu();
if (!nvmet_fc_tgt_q_get(queue)) {
rcu_read_unlock();
continue;
}
rcu_read_unlock();
nvmet_fc_delete_target_queue(queue);
nvmet_fc_tgt_q_put(queue);
/* ensure all in-flight I/Os have been processed */
for (i = NVMET_NR_QUEUES; i >= 0; i--) {
if (assoc->queues[i])
flush_workqueue(assoc->queues[i]->work_q);
}
dev_info(tgtport->dev,
"{%d:%d} Association deleted\n",
tgtport->fc_target_port.port_num, assoc->a_id);
nvmet_fc_tgt_a_put(assoc);
}
static struct nvmet_fc_tgt_assoc *
......@@ -1415,6 +1430,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
kref_init(&newrec->ref);
ida_init(&newrec->assoc_cnt);
newrec->max_sg_cnt = template->max_sgl_segments;
INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work);
ret = nvmet_fc_alloc_ls_iodlist(newrec);
if (ret) {
......@@ -1492,9 +1508,8 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (!nvmet_fc_tgt_a_get(assoc))
continue;
if (!queue_work(nvmet_wq, &assoc->del_work))
/* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
nvmet_fc_schedule_delete_assoc(assoc);
nvmet_fc_tgt_a_put(assoc);
}
rcu_read_unlock();
}
......@@ -1540,16 +1555,14 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
spin_lock_irqsave(&tgtport->lock, flags);
list_for_each_entry_safe(assoc, next,
&tgtport->assoc_list, a_list) {
if (!assoc->hostport ||
assoc->hostport->hosthandle != hosthandle)
if (assoc->hostport->hosthandle != hosthandle)
continue;
if (!nvmet_fc_tgt_a_get(assoc))
continue;
assoc->hostport->invalid = 1;
noassoc = false;
if (!queue_work(nvmet_wq, &assoc->del_work))
/* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
nvmet_fc_schedule_delete_assoc(assoc);
nvmet_fc_tgt_a_put(assoc);
}
spin_unlock_irqrestore(&tgtport->lock, flags);
......@@ -1581,7 +1594,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
rcu_read_lock();
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
queue = rcu_dereference(assoc->queues[0]);
queue = assoc->queues[0];
if (queue && queue->nvme_sq.ctrl == ctrl) {
if (nvmet_fc_tgt_a_get(assoc))
found_ctrl = true;
......@@ -1593,9 +1606,8 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
nvmet_fc_tgtport_put(tgtport);
if (found_ctrl) {
if (!queue_work(nvmet_wq, &assoc->del_work))
/* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
nvmet_fc_schedule_delete_assoc(assoc);
nvmet_fc_tgt_a_put(assoc);
return;
}
......@@ -1625,6 +1637,8 @@ nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
/* terminate any outstanding associations */
__nvmet_fc_free_assocs(tgtport);
flush_workqueue(nvmet_wq);
/*
* should terminate LS's as well. However, LS's will be generated
* at the tail end of association termination, so they likely don't
......@@ -1870,9 +1884,6 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
FCNVME_LS_DISCONNECT_ASSOC);
/* release get taken in nvmet_fc_find_target_assoc */
nvmet_fc_tgt_a_put(assoc);
/*
* The rules for LS response says the response cannot
* go back until ABTS's have been sent for all outstanding
......@@ -1887,8 +1898,6 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
assoc->rcv_disconn = iod;
spin_unlock_irqrestore(&tgtport->lock, flags);
nvmet_fc_delete_target_assoc(assoc);
if (oldls) {
dev_info(tgtport->dev,
"{%d:%d} Multiple Disconnect Association LS's "
......@@ -1904,6 +1913,9 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
nvmet_fc_xmt_ls_rsp(tgtport, oldls);
}
nvmet_fc_schedule_delete_assoc(assoc);
nvmet_fc_tgt_a_put(assoc);
return false;
}
......@@ -2540,8 +2552,9 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
fod->req.cmd = &fod->cmdiubuf.sqe;
fod->req.cqe = &fod->rspiubuf.cqe;
if (tgtport->pe)
fod->req.port = tgtport->pe->port;
if (!tgtport->pe)
goto transport_error;
fod->req.port = tgtport->pe->port;
/* clear any response payload */
memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
......@@ -2902,6 +2915,9 @@ nvmet_fc_remove_port(struct nvmet_port *port)
nvmet_fc_portentry_unbind(pe);
/* terminate any outstanding associations */
__nvmet_fc_free_assocs(pe->tgtport);
kfree(pe);
}
......@@ -2933,6 +2949,9 @@ static int __init nvmet_fc_init_module(void)
static void __exit nvmet_fc_exit_module(void)
{
/* ensure any shutdown operation, e.g. delete ctrls have finished */
flush_workqueue(nvmet_wq);
/* sanity check - all lports should be removed */
if (!list_empty(&nvmet_fc_target_list))
pr_warn("%s: targetport list not empty\n", __func__);
......@@ -2945,4 +2964,5 @@ static void __exit nvmet_fc_exit_module(void)
module_init(nvmet_fc_init_module);
module_exit(nvmet_fc_exit_module);
MODULE_DESCRIPTION("NVMe target FC transport driver");
MODULE_LICENSE("GPL v2");
......@@ -358,7 +358,7 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
if (!rport->targetport) {
tls_req->status = -ECONNREFUSED;
spin_lock(&rport->lock);
list_add_tail(&rport->ls_list, &tls_req->ls_list);
list_add_tail(&tls_req->ls_list, &rport->ls_list);
spin_unlock(&rport->lock);
queue_work(nvmet_wq, &rport->ls_work);
return ret;
......@@ -391,7 +391,7 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
if (remoteport) {
rport = remoteport->private;
spin_lock(&rport->lock);
list_add_tail(&rport->ls_list, &tls_req->ls_list);
list_add_tail(&tls_req->ls_list, &rport->ls_list);
spin_unlock(&rport->lock);
queue_work(nvmet_wq, &rport->ls_work);
}
......@@ -446,7 +446,7 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
if (!tport->remoteport) {
tls_req->status = -ECONNREFUSED;
spin_lock(&tport->lock);
list_add_tail(&tport->ls_list, &tls_req->ls_list);
list_add_tail(&tls_req->ls_list, &tport->ls_list);
spin_unlock(&tport->lock);
queue_work(nvmet_wq, &tport->ls_work);
return ret;
......@@ -1650,4 +1650,5 @@ static void __exit fcloop_exit(void)
module_init(fcloop_init);
module_exit(fcloop_exit);
MODULE_DESCRIPTION("NVMe target FC loop transport driver");
MODULE_LICENSE("GPL v2");
......@@ -400,7 +400,7 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
}
nvme_quiesce_admin_queue(&ctrl->ctrl);
if (ctrl->ctrl.state == NVME_CTRL_LIVE)
if (nvme_ctrl_state(&ctrl->ctrl) == NVME_CTRL_LIVE)
nvme_disable_ctrl(&ctrl->ctrl, true);
nvme_cancel_admin_tagset(&ctrl->ctrl);
......@@ -434,8 +434,10 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
nvme_loop_shutdown_ctrl(ctrl);
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
if (ctrl->ctrl.state != NVME_CTRL_DELETING &&
ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO)
enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
if (state != NVME_CTRL_DELETING &&
state != NVME_CTRL_DELETING_NOIO)
/* state change failure for non-deleted ctrl? */
WARN_ON_ONCE(1);
return;
......@@ -688,5 +690,6 @@ static void __exit nvme_loop_cleanup_module(void)
module_init(nvme_loop_init_module);
module_exit(nvme_loop_cleanup_module);
MODULE_DESCRIPTION("NVMe target loop transport driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */
......@@ -2104,5 +2104,6 @@ static void __exit nvmet_rdma_exit(void)
module_init(nvmet_rdma_init);
module_exit(nvmet_rdma_exit);
MODULE_DESCRIPTION("NVMe target RDMA transport driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */
......@@ -2216,10 +2216,12 @@ static void __exit nvmet_tcp_exit(void)
flush_workqueue(nvmet_wq);
destroy_workqueue(nvmet_tcp_wq);
ida_destroy(&nvmet_tcp_queue_ida);
}
module_init(nvmet_tcp_init);
module_exit(nvmet_tcp_exit);
MODULE_DESCRIPTION("NVMe target TCP transport driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */
......@@ -816,12 +816,6 @@ struct nvme_reservation_status_ext {
struct nvme_registered_ctrl_ext regctl_eds[];
};
enum nvme_async_event_type {
NVME_AER_TYPE_ERROR = 0,
NVME_AER_TYPE_SMART = 1,
NVME_AER_TYPE_NOTICE = 2,
};
/* I/O commands */
enum nvme_opcode {
......@@ -1818,7 +1812,7 @@ struct nvme_command {
};
};
static inline bool nvme_is_fabrics(struct nvme_command *cmd)
static inline bool nvme_is_fabrics(const struct nvme_command *cmd)
{
return cmd->common.opcode == nvme_fabrics_command;
}
......@@ -1837,7 +1831,7 @@ struct nvme_error_slot {
__u8 resv2[24];
};
static inline bool nvme_is_write(struct nvme_command *cmd)
static inline bool nvme_is_write(const struct nvme_command *cmd)
{
/*
* What a mess...
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment