Commit 1b3c47c1 authored by Sagi Grimberg's avatar Sagi Grimberg Committed by Jens Axboe

nvme: Log the ctrl device name instead of the underlying pci device name

Having the ctrl name "nvmeX" seems much more friendly than
the underlying device name. Also, with other nvme transports
such as the soon to come nvme-loop we don't have an underlying
device so it doesn't makes sense to make up one.

In order to help matching an instance name to a pci function,
we add a info print in nvme_probe.
Signed-off-by: default avatarSagi Grimberg <sagig@mellanox.com>
Acked-by: default avatarKeith Busch <keith.busch@intel.com>

Manually fixed up the hunk in nvme_cancel_queue_ios().
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent f4f0f63e
...@@ -557,8 +557,8 @@ static int nvme_revalidate_disk(struct gendisk *disk) ...@@ -557,8 +557,8 @@ static int nvme_revalidate_disk(struct gendisk *disk)
unsigned short bs; unsigned short bs;
if (nvme_identify_ns(ns->ctrl, ns->ns_id, &id)) { if (nvme_identify_ns(ns->ctrl, ns->ns_id, &id)) {
dev_warn(ns->ctrl->dev, "%s: Identify failure nvme%dn%d\n", dev_warn(disk_to_dev(ns->disk), "%s: Identify failure\n",
__func__, ns->ctrl->instance, ns->ns_id); __func__);
return -ENODEV; return -ENODEV;
} }
if (id->ncap == 0) { if (id->ncap == 0) {
...@@ -568,7 +568,7 @@ static int nvme_revalidate_disk(struct gendisk *disk) ...@@ -568,7 +568,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
if (nvme_nvm_ns_supported(ns, id) && ns->type != NVME_NS_LIGHTNVM) { if (nvme_nvm_ns_supported(ns, id) && ns->type != NVME_NS_LIGHTNVM) {
if (nvme_nvm_register(ns->queue, disk->disk_name)) { if (nvme_nvm_register(ns->queue, disk->disk_name)) {
dev_warn(ns->ctrl->dev, dev_warn(disk_to_dev(ns->disk),
"%s: LightNVM init failure\n", __func__); "%s: LightNVM init failure\n", __func__);
kfree(id); kfree(id);
return -ENODEV; return -ENODEV;
...@@ -741,7 +741,7 @@ static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled) ...@@ -741,7 +741,7 @@ static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
if (fatal_signal_pending(current)) if (fatal_signal_pending(current))
return -EINTR; return -EINTR;
if (time_after(jiffies, timeout)) { if (time_after(jiffies, timeout)) {
dev_err(ctrl->dev, dev_err(ctrl->device,
"Device not ready; aborting %s\n", enabled ? "Device not ready; aborting %s\n", enabled ?
"initialisation" : "reset"); "initialisation" : "reset");
return -ENODEV; return -ENODEV;
...@@ -781,7 +781,7 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap) ...@@ -781,7 +781,7 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
int ret; int ret;
if (page_shift < dev_page_min) { if (page_shift < dev_page_min) {
dev_err(ctrl->dev, dev_err(ctrl->device,
"Minimum device page size %u too large for host (%u)\n", "Minimum device page size %u too large for host (%u)\n",
1 << dev_page_min, 1 << page_shift); 1 << dev_page_min, 1 << page_shift);
return -ENODEV; return -ENODEV;
...@@ -822,7 +822,7 @@ int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl) ...@@ -822,7 +822,7 @@ int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
if (fatal_signal_pending(current)) if (fatal_signal_pending(current))
return -EINTR; return -EINTR;
if (time_after(jiffies, timeout)) { if (time_after(jiffies, timeout)) {
dev_err(ctrl->dev, dev_err(ctrl->device,
"Device shutdown incomplete; abort shutdown\n"); "Device shutdown incomplete; abort shutdown\n");
return -ENODEV; return -ENODEV;
} }
...@@ -844,13 +844,13 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) ...@@ -844,13 +844,13 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
if (ret) { if (ret) {
dev_err(ctrl->dev, "Reading VS failed (%d)\n", ret); dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
return ret; return ret;
} }
ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap); ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap);
if (ret) { if (ret) {
dev_err(ctrl->dev, "Reading CAP failed (%d)\n", ret); dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
return ret; return ret;
} }
page_shift = NVME_CAP_MPSMIN(cap) + 12; page_shift = NVME_CAP_MPSMIN(cap) + 12;
...@@ -860,7 +860,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) ...@@ -860,7 +860,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ret = nvme_identify_ctrl(ctrl, &id); ret = nvme_identify_ctrl(ctrl, &id);
if (ret) { if (ret) {
dev_err(ctrl->dev, "Identify Controller failed (%d)\n", ret); dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
return -EIO; return -EIO;
} }
...@@ -937,13 +937,13 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) ...@@ -937,13 +937,13 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list); ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) { if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
dev_warn(ctrl->dev, dev_warn(ctrl->device,
"NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n"); "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
ret = -EINVAL; ret = -EINVAL;
goto out_unlock; goto out_unlock;
} }
dev_warn(ctrl->dev, dev_warn(ctrl->device,
"using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n"); "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
kref_get(&ns->kref); kref_get(&ns->kref);
mutex_unlock(&ctrl->namespaces_mutex); mutex_unlock(&ctrl->namespaces_mutex);
...@@ -969,7 +969,7 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd, ...@@ -969,7 +969,7 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
case NVME_IOCTL_IO_CMD: case NVME_IOCTL_IO_CMD:
return nvme_dev_user_cmd(ctrl, argp); return nvme_dev_user_cmd(ctrl, argp);
case NVME_IOCTL_RESET: case NVME_IOCTL_RESET:
dev_warn(ctrl->dev, "resetting controller\n"); dev_warn(ctrl->device, "resetting controller\n");
return ctrl->ops->reset_ctrl(ctrl); return ctrl->ops->reset_ctrl(ctrl);
case NVME_IOCTL_SUBSYS_RESET: case NVME_IOCTL_SUBSYS_RESET:
return nvme_reset_subsystem(ctrl); return nvme_reset_subsystem(ctrl);
......
...@@ -299,10 +299,10 @@ static void nvme_complete_async_event(struct nvme_dev *dev, ...@@ -299,10 +299,10 @@ static void nvme_complete_async_event(struct nvme_dev *dev,
switch (result & 0xff07) { switch (result & 0xff07) {
case NVME_AER_NOTICE_NS_CHANGED: case NVME_AER_NOTICE_NS_CHANGED:
dev_info(dev->dev, "rescanning\n"); dev_info(dev->ctrl.device, "rescanning\n");
queue_work(nvme_workq, &dev->scan_work); queue_work(nvme_workq, &dev->scan_work);
default: default:
dev_warn(dev->dev, "async event result %08x\n", result); dev_warn(dev->ctrl.device, "async event result %08x\n", result);
} }
} }
...@@ -708,7 +708,7 @@ static void nvme_complete_rq(struct request *req) ...@@ -708,7 +708,7 @@ static void nvme_complete_rq(struct request *req)
} }
if (unlikely(iod->aborted)) { if (unlikely(iod->aborted)) {
dev_warn(dev->dev, dev_warn(dev->ctrl.device,
"completing aborted command with status: %04x\n", "completing aborted command with status: %04x\n",
req->errors); req->errors);
} }
...@@ -740,7 +740,7 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag) ...@@ -740,7 +740,7 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
*tag = -1; *tag = -1;
if (unlikely(cqe.command_id >= nvmeq->q_depth)) { if (unlikely(cqe.command_id >= nvmeq->q_depth)) {
dev_warn(nvmeq->q_dmadev, dev_warn(nvmeq->dev->ctrl.device,
"invalid id %d completed on queue %d\n", "invalid id %d completed on queue %d\n",
cqe.command_id, le16_to_cpu(cqe.sq_id)); cqe.command_id, le16_to_cpu(cqe.sq_id));
continue; continue;
...@@ -908,7 +908,8 @@ static void abort_endio(struct request *req, int error) ...@@ -908,7 +908,8 @@ static void abort_endio(struct request *req, int error)
u32 result = (u32)(uintptr_t)req->special; u32 result = (u32)(uintptr_t)req->special;
u16 status = req->errors; u16 status = req->errors;
dev_warn(nvmeq->q_dmadev, "Abort status:%x result:%x", status, result); dev_warn(nvmeq->dev->ctrl.device,
"Abort status:%x result:%x", status, result);
atomic_inc(&nvmeq->dev->ctrl.abort_limit); atomic_inc(&nvmeq->dev->ctrl.abort_limit);
blk_mq_free_request(req); blk_mq_free_request(req);
...@@ -929,7 +930,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) ...@@ -929,7 +930,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
* shutdown, so we return BLK_EH_HANDLED. * shutdown, so we return BLK_EH_HANDLED.
*/ */
if (test_bit(NVME_CTRL_RESETTING, &dev->flags)) { if (test_bit(NVME_CTRL_RESETTING, &dev->flags)) {
dev_warn(dev->dev, dev_warn(dev->ctrl.device,
"I/O %d QID %d timeout, disable controller\n", "I/O %d QID %d timeout, disable controller\n",
req->tag, nvmeq->qid); req->tag, nvmeq->qid);
nvme_dev_disable(dev, false); nvme_dev_disable(dev, false);
...@@ -943,7 +944,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) ...@@ -943,7 +944,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
* returned to the driver, or if this is the admin queue. * returned to the driver, or if this is the admin queue.
*/ */
if (!nvmeq->qid || iod->aborted) { if (!nvmeq->qid || iod->aborted) {
dev_warn(dev->dev, dev_warn(dev->ctrl.device,
"I/O %d QID %d timeout, reset controller\n", "I/O %d QID %d timeout, reset controller\n",
req->tag, nvmeq->qid); req->tag, nvmeq->qid);
nvme_dev_disable(dev, false); nvme_dev_disable(dev, false);
...@@ -969,8 +970,9 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) ...@@ -969,8 +970,9 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
cmd.abort.cid = req->tag; cmd.abort.cid = req->tag;
cmd.abort.sqid = cpu_to_le16(nvmeq->qid); cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
dev_warn(nvmeq->q_dmadev, "I/O %d QID %d timeout, aborting\n", dev_warn(nvmeq->dev->ctrl.device,
req->tag, nvmeq->qid); "I/O %d QID %d timeout, aborting\n",
req->tag, nvmeq->qid);
abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd, abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
BLK_MQ_REQ_NOWAIT); BLK_MQ_REQ_NOWAIT);
...@@ -999,7 +1001,7 @@ static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved ...@@ -999,7 +1001,7 @@ static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved
if (!blk_mq_request_started(req)) if (!blk_mq_request_started(req))
return; return;
dev_warn(nvmeq->q_dmadev, dev_warn(nvmeq->dev->ctrl.device,
"Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid); "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid);
status = NVME_SC_ABORT_REQ; status = NVME_SC_ABORT_REQ;
...@@ -1355,7 +1357,7 @@ static int nvme_kthread(void *data) ...@@ -1355,7 +1357,7 @@ static int nvme_kthread(void *data)
if ((dev->subsystem && (csts & NVME_CSTS_NSSRO)) || if ((dev->subsystem && (csts & NVME_CSTS_NSSRO)) ||
csts & NVME_CSTS_CFS) { csts & NVME_CSTS_CFS) {
if (queue_work(nvme_workq, &dev->reset_work)) { if (queue_work(nvme_workq, &dev->reset_work)) {
dev_warn(dev->dev, dev_warn(dev->ctrl.device,
"Failed status: %x, reset controller\n", "Failed status: %x, reset controller\n",
readl(dev->bar + NVME_REG_CSTS)); readl(dev->bar + NVME_REG_CSTS));
} }
...@@ -1483,7 +1485,8 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) ...@@ -1483,7 +1485,8 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
* access to the admin queue, as that might be only way to fix them up. * access to the admin queue, as that might be only way to fix them up.
*/ */
if (result > 0) { if (result > 0) {
dev_err(dev->dev, "Could not set queue count (%d)\n", result); dev_err(dev->ctrl.device,
"Could not set queue count (%d)\n", result);
nr_io_queues = 0; nr_io_queues = 0;
result = 0; result = 0;
} }
...@@ -1947,7 +1950,7 @@ static void nvme_reset_work(struct work_struct *work) ...@@ -1947,7 +1950,7 @@ static void nvme_reset_work(struct work_struct *work)
* any working I/O queue. * any working I/O queue.
*/ */
if (dev->online_queues < 2) { if (dev->online_queues < 2) {
dev_warn(dev->dev, "IO queues not created\n"); dev_warn(dev->ctrl.device, "IO queues not created\n");
nvme_remove_namespaces(&dev->ctrl); nvme_remove_namespaces(&dev->ctrl);
} else { } else {
nvme_start_queues(&dev->ctrl); nvme_start_queues(&dev->ctrl);
...@@ -1984,7 +1987,7 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work) ...@@ -1984,7 +1987,7 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work)
static void nvme_remove_dead_ctrl(struct nvme_dev *dev) static void nvme_remove_dead_ctrl(struct nvme_dev *dev)
{ {
dev_warn(dev->dev, "Removing after probe failure\n"); dev_warn(dev->ctrl.device, "Removing after probe failure\n");
kref_get(&dev->ctrl.kref); kref_get(&dev->ctrl.kref);
if (!schedule_work(&dev->remove_work)) if (!schedule_work(&dev->remove_work))
nvme_put_ctrl(&dev->ctrl); nvme_put_ctrl(&dev->ctrl);
...@@ -2081,6 +2084,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2081,6 +2084,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result) if (result)
goto release_pools; goto release_pools;
dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
queue_work(nvme_workq, &dev->reset_work); queue_work(nvme_workq, &dev->reset_work);
return 0; return 0;
...@@ -2164,7 +2169,7 @@ static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev, ...@@ -2164,7 +2169,7 @@ static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
* shutdown the controller to quiesce. The controller will be restarted * shutdown the controller to quiesce. The controller will be restarted
* after the slot reset through driver's slot_reset callback. * after the slot reset through driver's slot_reset callback.
*/ */
dev_warn(&pdev->dev, "error detected: state:%d\n", state); dev_warn(dev->ctrl.device, "error detected: state:%d\n", state);
switch (state) { switch (state) {
case pci_channel_io_normal: case pci_channel_io_normal:
return PCI_ERS_RESULT_CAN_RECOVER; return PCI_ERS_RESULT_CAN_RECOVER;
...@@ -2181,7 +2186,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) ...@@ -2181,7 +2186,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
{ {
struct nvme_dev *dev = pci_get_drvdata(pdev); struct nvme_dev *dev = pci_get_drvdata(pdev);
dev_info(&pdev->dev, "restart after slot reset\n"); dev_info(dev->ctrl.device, "restart after slot reset\n");
pci_restore_state(pdev); pci_restore_state(pdev);
queue_work(nvme_workq, &dev->reset_work); queue_work(nvme_workq, &dev->reset_work);
return PCI_ERS_RESULT_RECOVERED; return PCI_ERS_RESULT_RECOVERED;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment