Commit 894e2164 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A small collection of fixes that should go into this cycle.

   - a pull request from Christoph for NVMe, which ended up being
     manually applied to avoid pulling in newer bits in master. Mostly
     fibre channel fixes from James, but also a few fixes from Jon and
     Vijay

   - a pull request from Konrad, with just a single fix for xen-blkback
     from Gustavo.

   - a fuseblk bdi fix from Jan, fixing a regression in this series with
     the dynamic backing devices.

   - a blktrace fix from Shaohua, replacing sscanf() with kstrtoull().

   - a request leak fix for drbd from Lars, fixing a regression in the
     last series with the kref changes. This will go to stable as well"

* 'for-linus' of git://git.kernel.dk/linux-block:
  nvmet: release the sq ref on rdma read errors
  nvmet-fc: remove target cpu scheduling flag
  nvme-fc: stop queues on error detection
  nvme-fc: require target or discovery role for fc-nvme targets
  nvme-fc: correct port role bits
  nvme: unmap CMB and remove sysfs file in reset path
  blktrace: fix integer parse
  fuseblk: Fix warning in super_setup_bdi_name()
  block: xen-blkback: add null check to avoid null pointer dereference
  drbd: fix request leak introduced by locking/atomic, kref: Kill kref_sub()
parents ef82f1ad 549f01ae
...@@ -315,24 +315,32 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) ...@@ -315,24 +315,32 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
} }
/* still holds resource->req_lock */ /* still holds resource->req_lock */
static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put) static void drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
{ {
struct drbd_device *device = req->device; struct drbd_device *device = req->device;
D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED)); D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED));
if (!put)
return;
if (!atomic_sub_and_test(put, &req->completion_ref)) if (!atomic_sub_and_test(put, &req->completion_ref))
return 0; return;
drbd_req_complete(req, m); drbd_req_complete(req, m);
/* local completion may still come in later,
* we need to keep the req object around. */
if (req->rq_state & RQ_LOCAL_ABORTED)
return;
if (req->rq_state & RQ_POSTPONED) { if (req->rq_state & RQ_POSTPONED) {
/* don't destroy the req object just yet, /* don't destroy the req object just yet,
* but queue it for retry */ * but queue it for retry */
drbd_restart_request(req); drbd_restart_request(req);
return 0; return;
} }
return 1; kref_put(&req->kref, drbd_req_destroy);
} }
static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req) static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
...@@ -519,12 +527,8 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, ...@@ -519,12 +527,8 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
if (req->i.waiting) if (req->i.waiting)
wake_up(&device->misc_wait); wake_up(&device->misc_wait);
if (c_put) { drbd_req_put_completion_ref(req, m, c_put);
if (drbd_req_put_completion_ref(req, m, c_put)) kref_put(&req->kref, drbd_req_destroy);
kref_put(&req->kref, drbd_req_destroy);
} else {
kref_put(&req->kref, drbd_req_destroy);
}
} }
static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req) static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req)
...@@ -1366,8 +1370,7 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request ...@@ -1366,8 +1370,7 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
} }
out: out:
if (drbd_req_put_completion_ref(req, &m, 1)) drbd_req_put_completion_ref(req, &m, 1);
kref_put(&req->kref, drbd_req_destroy);
spin_unlock_irq(&resource->req_lock); spin_unlock_irq(&resource->req_lock);
/* Even though above is a kref_put(), this is safe. /* Even though above is a kref_put(), this is safe.
......
...@@ -504,11 +504,13 @@ static int xen_blkbk_remove(struct xenbus_device *dev) ...@@ -504,11 +504,13 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
dev_set_drvdata(&dev->dev, NULL); dev_set_drvdata(&dev->dev, NULL);
if (be->blkif) if (be->blkif) {
xen_blkif_disconnect(be->blkif); xen_blkif_disconnect(be->blkif);
/* Put the reference we set in xen_blkif_alloc(). */ /* Put the reference we set in xen_blkif_alloc(). */
xen_blkif_put(be->blkif); xen_blkif_put(be->blkif);
}
kfree(be->mode); kfree(be->mode);
kfree(be); kfree(be);
return 0; return 0;
......
...@@ -1754,6 +1754,10 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) ...@@ -1754,6 +1754,10 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
dev_info(ctrl->ctrl.device, dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: resetting controller\n", ctrl->cnum); "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
/* stop the queues on error, cleanup is in reset thread */
if (ctrl->queue_count > 1)
nvme_stop_queues(&ctrl->ctrl);
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
dev_err(ctrl->ctrl.device, dev_err(ctrl->ctrl.device,
"NVME-FC{%d}: error_recovery: Couldn't change state " "NVME-FC{%d}: error_recovery: Couldn't change state "
...@@ -2720,6 +2724,12 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, ...@@ -2720,6 +2724,12 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
unsigned long flags; unsigned long flags;
int ret, idx; int ret, idx;
if (!(rport->remoteport.port_role &
(FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
ret = -EBADR;
goto out_fail;
}
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
if (!ctrl) { if (!ctrl) {
ret = -ENOMEM; ret = -ENOMEM;
......
...@@ -1506,6 +1506,11 @@ static inline void nvme_release_cmb(struct nvme_dev *dev) ...@@ -1506,6 +1506,11 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
if (dev->cmb) { if (dev->cmb) {
iounmap(dev->cmb); iounmap(dev->cmb);
dev->cmb = NULL; dev->cmb = NULL;
if (dev->cmbsz) {
sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
&dev_attr_cmb.attr, NULL);
dev->cmbsz = 0;
}
} }
} }
...@@ -1779,6 +1784,7 @@ static void nvme_pci_disable(struct nvme_dev *dev) ...@@ -1779,6 +1784,7 @@ static void nvme_pci_disable(struct nvme_dev *dev)
{ {
struct pci_dev *pdev = to_pci_dev(dev->dev); struct pci_dev *pdev = to_pci_dev(dev->dev);
nvme_release_cmb(dev);
pci_free_irq_vectors(pdev); pci_free_irq_vectors(pdev);
if (pci_is_enabled(pdev)) { if (pci_is_enabled(pdev)) {
...@@ -2184,7 +2190,6 @@ static void nvme_remove(struct pci_dev *pdev) ...@@ -2184,7 +2190,6 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_dev_disable(dev, true); nvme_dev_disable(dev, true);
nvme_dev_remove_admin(dev); nvme_dev_remove_admin(dev);
nvme_free_queues(dev, 0); nvme_free_queues(dev, 0);
nvme_release_cmb(dev);
nvme_release_prp_pools(dev); nvme_release_prp_pools(dev);
nvme_dev_unmap(dev); nvme_dev_unmap(dev);
nvme_put_ctrl(&dev->ctrl); nvme_put_ctrl(&dev->ctrl);
......
...@@ -529,6 +529,12 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, ...@@ -529,6 +529,12 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
} }
EXPORT_SYMBOL_GPL(nvmet_req_init); EXPORT_SYMBOL_GPL(nvmet_req_init);
void nvmet_req_uninit(struct nvmet_req *req)
{
percpu_ref_put(&req->sq->ref);
}
EXPORT_SYMBOL_GPL(nvmet_req_uninit);
static inline bool nvmet_cc_en(u32 cc) static inline bool nvmet_cc_en(u32 cc)
{ {
return cc & 0x1; return cc & 0x1;
......
...@@ -517,9 +517,7 @@ nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid) ...@@ -517,9 +517,7 @@ nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
{ {
int cpu, idx, cnt; int cpu, idx, cnt;
if (!(tgtport->ops->target_features & if (tgtport->ops->max_hw_queues == 1)
NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED) ||
tgtport->ops->max_hw_queues == 1)
return WORK_CPU_UNBOUND; return WORK_CPU_UNBOUND;
/* Simple cpu selection based on qid modulo active cpu count */ /* Simple cpu selection based on qid modulo active cpu count */
......
...@@ -698,7 +698,6 @@ static struct nvmet_fc_target_template tgttemplate = { ...@@ -698,7 +698,6 @@ static struct nvmet_fc_target_template tgttemplate = {
.dma_boundary = FCLOOP_DMABOUND_4G, .dma_boundary = FCLOOP_DMABOUND_4G,
/* optional features */ /* optional features */
.target_features = NVMET_FCTGTFEAT_CMD_IN_ISR | .target_features = NVMET_FCTGTFEAT_CMD_IN_ISR |
NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED |
NVMET_FCTGTFEAT_OPDONE_IN_ISR, NVMET_FCTGTFEAT_OPDONE_IN_ISR,
/* sizes of additional private data for data structures */ /* sizes of additional private data for data structures */
.target_priv_sz = sizeof(struct fcloop_tport), .target_priv_sz = sizeof(struct fcloop_tport),
......
...@@ -261,6 +261,7 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req); ...@@ -261,6 +261,7 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops); struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops);
void nvmet_req_uninit(struct nvmet_req *req);
void nvmet_req_complete(struct nvmet_req *req, u16 status); void nvmet_req_complete(struct nvmet_req *req, u16 status);
void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid, void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
......
...@@ -567,6 +567,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc) ...@@ -567,6 +567,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
rsp->n_rdma = 0; rsp->n_rdma = 0;
if (unlikely(wc->status != IB_WC_SUCCESS)) { if (unlikely(wc->status != IB_WC_SUCCESS)) {
nvmet_req_uninit(&rsp->req);
nvmet_rdma_release_rsp(rsp); nvmet_rdma_release_rsp(rsp);
if (wc->status != IB_WC_WR_FLUSH_ERR) { if (wc->status != IB_WC_WR_FLUSH_ERR) {
pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n", pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n",
......
...@@ -764,7 +764,6 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba) ...@@ -764,7 +764,6 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel; lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP | lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED |
NVMET_FCTGTFEAT_CMD_IN_ISR | NVMET_FCTGTFEAT_CMD_IN_ISR |
NVMET_FCTGTFEAT_OPDONE_IN_ISR; NVMET_FCTGTFEAT_OPDONE_IN_ISR;
......
...@@ -975,8 +975,15 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb) ...@@ -975,8 +975,15 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
int err; int err;
char *suffix = ""; char *suffix = "";
if (sb->s_bdev) if (sb->s_bdev) {
suffix = "-fuseblk"; suffix = "-fuseblk";
/*
* sb->s_bdi points to blkdev's bdi however we want to redirect
* it to our private bdi...
*/
bdi_put(sb->s_bdi);
sb->s_bdi = &noop_backing_dev_info;
}
err = super_setup_bdi_name(sb, "%u:%u%s", MAJOR(fc->dev), err = super_setup_bdi_name(sb, "%u:%u%s", MAJOR(fc->dev),
MINOR(fc->dev), suffix); MINOR(fc->dev), suffix);
if (err) if (err)
......
...@@ -27,8 +27,8 @@ ...@@ -27,8 +27,8 @@
/* FC Port role bitmask - can merge with FC Port Roles in fc transport */ /* FC Port role bitmask - can merge with FC Port Roles in fc transport */
#define FC_PORT_ROLE_NVME_INITIATOR 0x10 #define FC_PORT_ROLE_NVME_INITIATOR 0x10
#define FC_PORT_ROLE_NVME_TARGET 0x11 #define FC_PORT_ROLE_NVME_TARGET 0x20
#define FC_PORT_ROLE_NVME_DISCOVERY 0x12 #define FC_PORT_ROLE_NVME_DISCOVERY 0x40
/** /**
...@@ -642,15 +642,7 @@ enum { ...@@ -642,15 +642,7 @@ enum {
* sequence in one LLDD operation. Errors during Data * sequence in one LLDD operation. Errors during Data
* sequence transmit must not allow RSP sequence to be sent. * sequence transmit must not allow RSP sequence to be sent.
*/ */
NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED = (1 << 1), NVMET_FCTGTFEAT_CMD_IN_ISR = (1 << 1),
/* Bit 1: When 0, the LLDD will deliver FCP CMD
* on the CPU it should be affinitized to. Thus work will
* be scheduled on the cpu received on. When 1, the LLDD
* may not deliver the CMD on the CPU it should be worked
* on. The transport should pick a cpu to schedule the work
* on.
*/
NVMET_FCTGTFEAT_CMD_IN_ISR = (1 << 2),
/* Bit 2: When 0, the LLDD is calling the cmd rcv handler /* Bit 2: When 0, the LLDD is calling the cmd rcv handler
* in a non-isr context, allowing the transport to finish * in a non-isr context, allowing the transport to finish
* op completion in the calling context. When 1, the LLDD * op completion in the calling context. When 1, the LLDD
...@@ -658,7 +650,7 @@ enum { ...@@ -658,7 +650,7 @@ enum {
* requiring the transport to transition to a workqueue * requiring the transport to transition to a workqueue
* for op completion. * for op completion.
*/ */
NVMET_FCTGTFEAT_OPDONE_IN_ISR = (1 << 3), NVMET_FCTGTFEAT_OPDONE_IN_ISR = (1 << 2),
/* Bit 3: When 0, the LLDD is calling the op done handler /* Bit 3: When 0, the LLDD is calling the op done handler
* in a non-isr context, allowing the transport to finish * in a non-isr context, allowing the transport to finish
* op completion in the calling context. When 1, the LLDD * op completion in the calling context. When 1, the LLDD
......
...@@ -1662,14 +1662,14 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, ...@@ -1662,14 +1662,14 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
goto out; goto out;
if (attr == &dev_attr_act_mask) { if (attr == &dev_attr_act_mask) {
if (sscanf(buf, "%llx", &value) != 1) { if (kstrtoull(buf, 0, &value)) {
/* Assume it is a list of trace category names */ /* Assume it is a list of trace category names */
ret = blk_trace_str2mask(buf); ret = blk_trace_str2mask(buf);
if (ret < 0) if (ret < 0)
goto out; goto out;
value = ret; value = ret;
} }
} else if (sscanf(buf, "%llu", &value) != 1) } else if (kstrtoull(buf, 0, &value))
goto out; goto out;
ret = -ENXIO; ret = -ENXIO;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment