Commit a1e21033 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

 - an NVMe fix from Gabriel, fixing a suspend/resume issue on some
   setups

 - addition of a few missing entries in the block queue sysfs
   documentation, from Joe

 - a fix for a sparse shadow warning for the bvec iterator, from
   Johannes

 - a writeback deadlock involving raid issuing barriers, and not
   flushing the plug when we wakeup the flusher threads.  From
   Konstantin

 - a set of patches for the NVMe target/loop/rdma code, from Roland and
   Sagi

* 'for-linus' of git://git.kernel.dk/linux-block:
  bvec: avoid variable shadowing warning
  doc: update block/queue-sysfs.txt entries
  nvme: Suspend all queues before deletion
  mm, writeback: flush plugged IO in wakeup_flusher_threads()
  nvme-rdma: Remove unused includes
  nvme-rdma: start async event handler after reconnecting to a controller
  nvmet: Fix controller serial number inconsistency
  nvmet-rdma: Don't use the inline buffer in order to avoid allocation for small reads
  nvmet-rdma: Correctly handle RDMA device hot removal
  nvme-rdma: Make sure to shutdown the controller if we can
  nvme-loop: Remove duplicate call to nvme_remove_namespaces
  nvme-rdma: Free the I/O tags when we delete the controller
  nvme-rdma: Remove duplicate call to nvme_remove_namespaces
  nvme-rdma: Fix device removal handling
  nvme-rdma: Queue ns scanning after a sucessful reconnection
  nvme-rdma: Don't leak uninitialized memory in connect request private data
parents f31494bd 1ea049b2
...@@ -14,6 +14,12 @@ add_random (RW) ...@@ -14,6 +14,12 @@ add_random (RW)
This file allows to turn off the disk entropy contribution. Default This file allows to turn off the disk entropy contribution. Default
value of this file is '1'(on). value of this file is '1'(on).
dax (RO)
--------
This file indicates whether the device supports Direct Access (DAX),
used by CPU-addressable storage to bypass the pagecache. It shows '1'
if true, '0' if not.
discard_granularity (RO) discard_granularity (RO)
----------------------- -----------------------
This shows the size of internal allocation of the device in bytes, if This shows the size of internal allocation of the device in bytes, if
...@@ -46,6 +52,12 @@ hw_sector_size (RO) ...@@ -46,6 +52,12 @@ hw_sector_size (RO)
------------------- -------------------
This is the hardware sector size of the device, in bytes. This is the hardware sector size of the device, in bytes.
io_poll (RW)
------------
When read, this file shows the total number of block IO polls and how
many returned success. Writing '0' to this file will disable polling
for this device. Writing any non-zero value will enable this feature.
iostats (RW) iostats (RW)
------------- -------------
This file is used to control (on/off) the iostats accounting of the This file is used to control (on/off) the iostats accounting of the
...@@ -151,5 +163,11 @@ device state. This means that it might not be safe to toggle the ...@@ -151,5 +163,11 @@ device state. This means that it might not be safe to toggle the
setting from "write back" to "write through", since that will also setting from "write back" to "write through", since that will also
eliminate cache flushes issued by the kernel. eliminate cache flushes issued by the kernel.
write_same_max_bytes (RO)
-------------------------
This is the number of bytes the device can write in a single write-same
command. A value of '0' means write-same is not supported by this
device.
Jens Axboe <jens.axboe@oracle.com>, February 2009 Jens Axboe <jens.axboe@oracle.com>, February 2009
...@@ -1543,15 +1543,10 @@ static void nvme_disable_io_queues(struct nvme_dev *dev) ...@@ -1543,15 +1543,10 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
reinit_completion(&dev->ioq_wait); reinit_completion(&dev->ioq_wait);
retry: retry:
timeout = ADMIN_TIMEOUT; timeout = ADMIN_TIMEOUT;
for (; i > 0; i--) { for (; i > 0; i--, sent++)
struct nvme_queue *nvmeq = dev->queues[i]; if (nvme_delete_queue(dev->queues[i], opcode))
if (!pass)
nvme_suspend_queue(nvmeq);
if (nvme_delete_queue(nvmeq, opcode))
break; break;
++sent;
}
while (sent--) { while (sent--) {
timeout = wait_for_completion_io_timeout(&dev->ioq_wait, timeout); timeout = wait_for_completion_io_timeout(&dev->ioq_wait, timeout);
if (timeout == 0) if (timeout == 0)
...@@ -1693,11 +1688,12 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) ...@@ -1693,11 +1688,12 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
nvme_stop_queues(&dev->ctrl); nvme_stop_queues(&dev->ctrl);
csts = readl(dev->bar + NVME_REG_CSTS); csts = readl(dev->bar + NVME_REG_CSTS);
} }
for (i = dev->queue_count - 1; i > 0; i--)
nvme_suspend_queue(dev->queues[i]);
if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) { if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
for (i = dev->queue_count - 1; i >= 0; i--) { nvme_suspend_queue(dev->queues[0]);
struct nvme_queue *nvmeq = dev->queues[i];
nvme_suspend_queue(nvmeq);
}
} else { } else {
nvme_disable_io_queues(dev); nvme_disable_io_queues(dev);
nvme_disable_admin_queue(dev, shutdown); nvme_disable_admin_queue(dev, shutdown);
......
...@@ -12,13 +12,11 @@ ...@@ -12,13 +12,11 @@
* more details. * more details.
*/ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/delay.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/jiffies.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/blk-mq.h> #include <linux/blk-mq.h>
#include <linux/types.h> #include <linux/types.h>
...@@ -26,7 +24,6 @@ ...@@ -26,7 +24,6 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/nvme.h> #include <linux/nvme.h>
#include <linux/t10-pi.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <rdma/ib_verbs.h> #include <rdma/ib_verbs.h>
...@@ -169,7 +166,6 @@ MODULE_PARM_DESC(register_always, ...@@ -169,7 +166,6 @@ MODULE_PARM_DESC(register_always,
static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event); struct rdma_cm_event *event);
static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl);
/* XXX: really should move to a generic header sooner or later.. */ /* XXX: really should move to a generic header sooner or later.. */
static inline void put_unaligned_le24(u32 val, u8 *p) static inline void put_unaligned_le24(u32 val, u8 *p)
...@@ -687,11 +683,6 @@ static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl) ...@@ -687,11 +683,6 @@ static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
list_del(&ctrl->list); list_del(&ctrl->list);
mutex_unlock(&nvme_rdma_ctrl_mutex); mutex_unlock(&nvme_rdma_ctrl_mutex);
if (ctrl->ctrl.tagset) {
blk_cleanup_queue(ctrl->ctrl.connect_q);
blk_mq_free_tag_set(&ctrl->tag_set);
nvme_rdma_dev_put(ctrl->device);
}
kfree(ctrl->queues); kfree(ctrl->queues);
nvmf_free_options(nctrl->opts); nvmf_free_options(nctrl->opts);
free_ctrl: free_ctrl:
...@@ -748,8 +739,11 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) ...@@ -748,8 +739,11 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
WARN_ON_ONCE(!changed); WARN_ON_ONCE(!changed);
if (ctrl->queue_count > 1) if (ctrl->queue_count > 1) {
nvme_start_queues(&ctrl->ctrl); nvme_start_queues(&ctrl->ctrl);
nvme_queue_scan(&ctrl->ctrl);
nvme_queue_async_events(&ctrl->ctrl);
}
dev_info(ctrl->ctrl.device, "Successfully reconnected\n"); dev_info(ctrl->ctrl.device, "Successfully reconnected\n");
...@@ -1269,7 +1263,7 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue) ...@@ -1269,7 +1263,7 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
{ {
struct nvme_rdma_ctrl *ctrl = queue->ctrl; struct nvme_rdma_ctrl *ctrl = queue->ctrl;
struct rdma_conn_param param = { }; struct rdma_conn_param param = { };
struct nvme_rdma_cm_req priv; struct nvme_rdma_cm_req priv = { };
int ret; int ret;
param.qp_num = queue->qp->qp_num; param.qp_num = queue->qp->qp_num;
...@@ -1318,37 +1312,39 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue) ...@@ -1318,37 +1312,39 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
* that caught the event. Since we hold the callout until the controller * that caught the event. Since we hold the callout until the controller
* deletion is completed, we'll deadlock if the controller deletion will * deletion is completed, we'll deadlock if the controller deletion will
* call rdma_destroy_id on this queue's cm_id. Thus, we claim ownership * call rdma_destroy_id on this queue's cm_id. Thus, we claim ownership
* of destroying this queue before-hand, destroy the queue resources * of destroying this queue before-hand, destroy the queue resources,
* after the controller deletion completed with the exception of destroying * then queue the controller deletion which won't destroy this queue and
* the cm_id implicitely by returning a non-zero rc to the callout. * we destroy the cm_id implicitely by returning a non-zero rc to the callout.
*/ */
static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue) static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue)
{ {
struct nvme_rdma_ctrl *ctrl = queue->ctrl; struct nvme_rdma_ctrl *ctrl = queue->ctrl;
int ret, ctrl_deleted = 0; int ret;
/* First disable the queue so ctrl delete won't free it */ /* Own the controller deletion */
if (!test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags)) if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
goto out; return 0;
/* delete the controller */
ret = __nvme_rdma_del_ctrl(ctrl);
if (!ret) {
dev_warn(ctrl->ctrl.device, dev_warn(ctrl->ctrl.device,
"Got rdma device removal event, deleting ctrl\n"); "Got rdma device removal event, deleting ctrl\n");
flush_work(&ctrl->delete_work);
/* Return non-zero so the cm_id will destroy implicitly */ /* Get rid of reconnect work if its running */
ctrl_deleted = 1; cancel_delayed_work_sync(&ctrl->reconnect_work);
/* Disable the queue so ctrl delete won't free it */
if (test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags)) {
/* Free this queue ourselves */ /* Free this queue ourselves */
rdma_disconnect(queue->cm_id); nvme_rdma_stop_queue(queue);
ib_drain_qp(queue->qp);
nvme_rdma_destroy_queue_ib(queue); nvme_rdma_destroy_queue_ib(queue);
/* Return non-zero so the cm_id will destroy implicitly */
ret = 1;
} }
out: /* Queue controller deletion */
return ctrl_deleted; queue_work(nvme_rdma_wq, &ctrl->delete_work);
flush_work(&ctrl->delete_work);
return ret;
} }
static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
...@@ -1648,7 +1644,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl) ...@@ -1648,7 +1644,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl)
nvme_rdma_free_io_queues(ctrl); nvme_rdma_free_io_queues(ctrl);
} }
if (ctrl->ctrl.state == NVME_CTRL_LIVE) if (test_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[0].flags))
nvme_shutdown_ctrl(&ctrl->ctrl); nvme_shutdown_ctrl(&ctrl->ctrl);
blk_mq_stop_hw_queues(ctrl->ctrl.admin_q); blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
...@@ -1657,15 +1653,27 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl) ...@@ -1657,15 +1653,27 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl)
nvme_rdma_destroy_admin_queue(ctrl); nvme_rdma_destroy_admin_queue(ctrl);
} }
static void __nvme_rdma_remove_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
{
nvme_uninit_ctrl(&ctrl->ctrl);
if (shutdown)
nvme_rdma_shutdown_ctrl(ctrl);
if (ctrl->ctrl.tagset) {
blk_cleanup_queue(ctrl->ctrl.connect_q);
blk_mq_free_tag_set(&ctrl->tag_set);
nvme_rdma_dev_put(ctrl->device);
}
nvme_put_ctrl(&ctrl->ctrl);
}
static void nvme_rdma_del_ctrl_work(struct work_struct *work) static void nvme_rdma_del_ctrl_work(struct work_struct *work)
{ {
struct nvme_rdma_ctrl *ctrl = container_of(work, struct nvme_rdma_ctrl *ctrl = container_of(work,
struct nvme_rdma_ctrl, delete_work); struct nvme_rdma_ctrl, delete_work);
nvme_remove_namespaces(&ctrl->ctrl); __nvme_rdma_remove_ctrl(ctrl, true);
nvme_rdma_shutdown_ctrl(ctrl);
nvme_uninit_ctrl(&ctrl->ctrl);
nvme_put_ctrl(&ctrl->ctrl);
} }
static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl) static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl)
...@@ -1698,9 +1706,7 @@ static void nvme_rdma_remove_ctrl_work(struct work_struct *work) ...@@ -1698,9 +1706,7 @@ static void nvme_rdma_remove_ctrl_work(struct work_struct *work)
struct nvme_rdma_ctrl *ctrl = container_of(work, struct nvme_rdma_ctrl *ctrl = container_of(work,
struct nvme_rdma_ctrl, delete_work); struct nvme_rdma_ctrl, delete_work);
nvme_remove_namespaces(&ctrl->ctrl); __nvme_rdma_remove_ctrl(ctrl, false);
nvme_uninit_ctrl(&ctrl->ctrl);
nvme_put_ctrl(&ctrl->ctrl);
} }
static void nvme_rdma_reset_ctrl_work(struct work_struct *work) static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
...@@ -1739,6 +1745,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work) ...@@ -1739,6 +1745,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
if (ctrl->queue_count > 1) { if (ctrl->queue_count > 1) {
nvme_start_queues(&ctrl->ctrl); nvme_start_queues(&ctrl->ctrl);
nvme_queue_scan(&ctrl->ctrl); nvme_queue_scan(&ctrl->ctrl);
nvme_queue_async_events(&ctrl->ctrl);
} }
return; return;
......
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
*/ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h> #include <linux/module.h>
#include <linux/random.h>
#include <generated/utsrelease.h> #include <generated/utsrelease.h>
#include "nvmet.h" #include "nvmet.h"
...@@ -83,7 +82,6 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) ...@@ -83,7 +82,6 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
{ {
struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvme_id_ctrl *id; struct nvme_id_ctrl *id;
u64 serial;
u16 status = 0; u16 status = 0;
id = kzalloc(sizeof(*id), GFP_KERNEL); id = kzalloc(sizeof(*id), GFP_KERNEL);
...@@ -96,10 +94,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) ...@@ -96,10 +94,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->vid = 0; id->vid = 0;
id->ssvid = 0; id->ssvid = 0;
/* generate a random serial number as our controllers are ephemeral: */
get_random_bytes(&serial, sizeof(serial));
memset(id->sn, ' ', sizeof(id->sn)); memset(id->sn, ' ', sizeof(id->sn));
snprintf(id->sn, sizeof(id->sn), "%llx", serial); snprintf(id->sn, sizeof(id->sn), "%llx", ctrl->serial);
memset(id->mn, ' ', sizeof(id->mn)); memset(id->mn, ' ', sizeof(id->mn));
strncpy((char *)id->mn, "Linux", sizeof(id->mn)); strncpy((char *)id->mn, "Linux", sizeof(id->mn));
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
*/ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h> #include <linux/module.h>
#include <linux/random.h>
#include "nvmet.h" #include "nvmet.h"
static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
...@@ -728,6 +729,9 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, ...@@ -728,6 +729,9 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
/* generate a random serial number as our controllers are ephemeral: */
get_random_bytes(&ctrl->serial, sizeof(ctrl->serial));
kref_init(&ctrl->ref); kref_init(&ctrl->ref);
ctrl->subsys = subsys; ctrl->subsys = subsys;
......
...@@ -414,9 +414,8 @@ static void nvme_loop_del_ctrl_work(struct work_struct *work) ...@@ -414,9 +414,8 @@ static void nvme_loop_del_ctrl_work(struct work_struct *work)
struct nvme_loop_ctrl *ctrl = container_of(work, struct nvme_loop_ctrl *ctrl = container_of(work,
struct nvme_loop_ctrl, delete_work); struct nvme_loop_ctrl, delete_work);
nvme_remove_namespaces(&ctrl->ctrl);
nvme_loop_shutdown_ctrl(ctrl);
nvme_uninit_ctrl(&ctrl->ctrl); nvme_uninit_ctrl(&ctrl->ctrl);
nvme_loop_shutdown_ctrl(ctrl);
nvme_put_ctrl(&ctrl->ctrl); nvme_put_ctrl(&ctrl->ctrl);
} }
...@@ -501,7 +500,6 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work) ...@@ -501,7 +500,6 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
nvme_loop_destroy_admin_queue(ctrl); nvme_loop_destroy_admin_queue(ctrl);
out_disable: out_disable:
dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
nvme_remove_namespaces(&ctrl->ctrl);
nvme_uninit_ctrl(&ctrl->ctrl); nvme_uninit_ctrl(&ctrl->ctrl);
nvme_put_ctrl(&ctrl->ctrl); nvme_put_ctrl(&ctrl->ctrl);
} }
......
...@@ -113,6 +113,7 @@ struct nvmet_ctrl { ...@@ -113,6 +113,7 @@ struct nvmet_ctrl {
struct mutex lock; struct mutex lock;
u64 cap; u64 cap;
u64 serial;
u32 cc; u32 cc;
u32 csts; u32 csts;
......
...@@ -77,6 +77,7 @@ enum nvmet_rdma_queue_state { ...@@ -77,6 +77,7 @@ enum nvmet_rdma_queue_state {
NVMET_RDMA_Q_CONNECTING, NVMET_RDMA_Q_CONNECTING,
NVMET_RDMA_Q_LIVE, NVMET_RDMA_Q_LIVE,
NVMET_RDMA_Q_DISCONNECTING, NVMET_RDMA_Q_DISCONNECTING,
NVMET_RDMA_IN_DEVICE_REMOVAL,
}; };
struct nvmet_rdma_queue { struct nvmet_rdma_queue {
...@@ -615,15 +616,10 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, ...@@ -615,15 +616,10 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
if (!len) if (!len)
return 0; return 0;
/* use the already allocated data buffer if possible */
if (len <= NVMET_RDMA_INLINE_DATA_SIZE && rsp->queue->host_qid) {
nvmet_rdma_use_inline_sg(rsp, len, 0);
} else {
status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt, status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt,
len); len);
if (status) if (status)
return status; return status;
}
ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
rsp->req.sg, rsp->req.sg_cnt, 0, addr, key, rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
...@@ -984,7 +980,10 @@ static void nvmet_rdma_release_queue_work(struct work_struct *w) ...@@ -984,7 +980,10 @@ static void nvmet_rdma_release_queue_work(struct work_struct *w)
struct nvmet_rdma_device *dev = queue->dev; struct nvmet_rdma_device *dev = queue->dev;
nvmet_rdma_free_queue(queue); nvmet_rdma_free_queue(queue);
if (queue->state != NVMET_RDMA_IN_DEVICE_REMOVAL)
rdma_destroy_id(cm_id); rdma_destroy_id(cm_id);
kref_put(&dev->ref, nvmet_rdma_free_dev); kref_put(&dev->ref, nvmet_rdma_free_dev);
} }
...@@ -1233,8 +1232,9 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) ...@@ -1233,8 +1232,9 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
switch (queue->state) { switch (queue->state) {
case NVMET_RDMA_Q_CONNECTING: case NVMET_RDMA_Q_CONNECTING:
case NVMET_RDMA_Q_LIVE: case NVMET_RDMA_Q_LIVE:
disconnect = true;
queue->state = NVMET_RDMA_Q_DISCONNECTING; queue->state = NVMET_RDMA_Q_DISCONNECTING;
case NVMET_RDMA_IN_DEVICE_REMOVAL:
disconnect = true;
break; break;
case NVMET_RDMA_Q_DISCONNECTING: case NVMET_RDMA_Q_DISCONNECTING:
break; break;
...@@ -1272,6 +1272,62 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, ...@@ -1272,6 +1272,62 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
schedule_work(&queue->release_work); schedule_work(&queue->release_work);
} }
/**
* nvme_rdma_device_removal() - Handle RDMA device removal
* @queue: nvmet rdma queue (cm id qp_context)
* @addr: nvmet address (cm_id context)
*
* DEVICE_REMOVAL event notifies us that the RDMA device is about
* to unplug so we should take care of destroying our RDMA resources.
* This event will be generated for each allocated cm_id.
*
* Note that this event can be generated on a normal queue cm_id
* and/or a device bound listener cm_id (where in this case
* queue will be null).
*
* we claim ownership on destroying the cm_id. For queues we move
* the queue state to NVMET_RDMA_IN_DEVICE_REMOVAL and for port
* we nullify the priv to prevent double cm_id destruction and destroying
* the cm_id implicitely by returning a non-zero rc to the callout.
*/
static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
struct nvmet_rdma_queue *queue)
{
unsigned long flags;
if (!queue) {
struct nvmet_port *port = cm_id->context;
/*
* This is a listener cm_id. Make sure that
* future remove_port won't invoke a double
* cm_id destroy. use atomic xchg to make sure
* we don't compete with remove_port.
*/
if (xchg(&port->priv, NULL) != cm_id)
return 0;
} else {
/*
* This is a queue cm_id. Make sure that
* release queue will not destroy the cm_id
* and schedule all ctrl queues removal (only
* if the queue is not disconnecting already).
*/
spin_lock_irqsave(&queue->state_lock, flags);
if (queue->state != NVMET_RDMA_Q_DISCONNECTING)
queue->state = NVMET_RDMA_IN_DEVICE_REMOVAL;
spin_unlock_irqrestore(&queue->state_lock, flags);
nvmet_rdma_queue_disconnect(queue);
flush_scheduled_work();
}
/*
* We need to return 1 so that the core will destroy
* it's own ID. What a great API design..
*/
return 1;
}
static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event) struct rdma_cm_event *event)
{ {
...@@ -1294,20 +1350,11 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, ...@@ -1294,20 +1350,11 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
break; break;
case RDMA_CM_EVENT_ADDR_CHANGE: case RDMA_CM_EVENT_ADDR_CHANGE:
case RDMA_CM_EVENT_DISCONNECTED: case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_DEVICE_REMOVAL:
case RDMA_CM_EVENT_TIMEWAIT_EXIT: case RDMA_CM_EVENT_TIMEWAIT_EXIT:
/*
* We can get the device removal callback even for a
* CM ID that we aren't actually using. In that case
* the context pointer is NULL, so we shouldn't try
* to disconnect a non-existing queue. But we also
* need to return 1 so that the core will destroy
* it's own ID. What a great API design..
*/
if (queue)
nvmet_rdma_queue_disconnect(queue); nvmet_rdma_queue_disconnect(queue);
else break;
ret = 1; case RDMA_CM_EVENT_DEVICE_REMOVAL:
ret = nvmet_rdma_device_removal(cm_id, queue);
break; break;
case RDMA_CM_EVENT_REJECTED: case RDMA_CM_EVENT_REJECTED:
case RDMA_CM_EVENT_UNREACHABLE: case RDMA_CM_EVENT_UNREACHABLE:
...@@ -1396,8 +1443,9 @@ static int nvmet_rdma_add_port(struct nvmet_port *port) ...@@ -1396,8 +1443,9 @@ static int nvmet_rdma_add_port(struct nvmet_port *port)
static void nvmet_rdma_remove_port(struct nvmet_port *port) static void nvmet_rdma_remove_port(struct nvmet_port *port)
{ {
struct rdma_cm_id *cm_id = port->priv; struct rdma_cm_id *cm_id = xchg(&port->priv, NULL);
if (cm_id)
rdma_destroy_id(cm_id); rdma_destroy_id(cm_id);
} }
......
...@@ -1949,6 +1949,12 @@ void wakeup_flusher_threads(long nr_pages, enum wb_reason reason) ...@@ -1949,6 +1949,12 @@ void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
{ {
struct backing_dev_info *bdi; struct backing_dev_info *bdi;
/*
* If we are expecting writeback progress we must submit plugged IO.
*/
if (blk_needs_flush_plug(current))
blk_schedule_flush_plug(current);
if (!nr_pages) if (!nr_pages)
nr_pages = get_nr_dirty_pages(); nr_pages = get_nr_dirty_pages();
......
...@@ -74,7 +74,8 @@ static inline void bvec_iter_advance(const struct bio_vec *bv, ...@@ -74,7 +74,8 @@ static inline void bvec_iter_advance(const struct bio_vec *bv,
"Attempted to advance past end of bvec iter\n"); "Attempted to advance past end of bvec iter\n");
while (bytes) { while (bytes) {
unsigned len = min(bytes, bvec_iter_len(bv, *iter)); unsigned iter_len = bvec_iter_len(bv, *iter);
unsigned len = min(bytes, iter_len);
bytes -= len; bytes -= len;
iter->bi_size -= len; iter->bi_size -= len;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment