Commit 91a26209 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-20180309' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

 - a xen-blkfront fix from Bhavesh with a multiqueue fix when
   detaching/re-attaching

 - a few important NVMe fixes, including a revert for a sysfs fix that
   caused some user space confusion

 - two bcache fixes by way of Michael Lyle

 - a loop regression fix, fixing an issue with lost writes on DAX.

* tag 'for-linus-20180309' of git://git.kernel.dk/linux-block:
  loop: Fix lost writes caused by missing flag
  nvme_fc: rework sqsize handling
  nvme-fabrics: Ignore nr_io_queues option for discovery controllers
  xen-blkfront: move negotiate_mq to cover all cases of new VBDs
  Revert "nvme: create 'slaves' and 'holders' entries for hidden controllers"
  bcache: don't attach backing with duplicate UUID
  bcache: fix crashes in duplicate cache device register
  nvme: pci: pass max vectors as num_possible_cpus() to pci_alloc_irq_vectors
  nvme-pci: Fix EEH failure on ppc
parents b3b25b1d 1d037577
...@@ -266,7 +266,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos) ...@@ -266,7 +266,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
struct iov_iter i; struct iov_iter i;
ssize_t bw; ssize_t bw;
iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len); iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len);
file_start_write(file); file_start_write(file);
bw = vfs_iter_write(file, &i, ppos, 0); bw = vfs_iter_write(file, &i, ppos, 0);
......
...@@ -262,6 +262,7 @@ static DEFINE_SPINLOCK(minor_lock); ...@@ -262,6 +262,7 @@ static DEFINE_SPINLOCK(minor_lock);
static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo); static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
static void blkfront_gather_backend_features(struct blkfront_info *info); static void blkfront_gather_backend_features(struct blkfront_info *info);
static int negotiate_mq(struct blkfront_info *info);
static int get_id_from_freelist(struct blkfront_ring_info *rinfo) static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
{ {
...@@ -1774,11 +1775,18 @@ static int talk_to_blkback(struct xenbus_device *dev, ...@@ -1774,11 +1775,18 @@ static int talk_to_blkback(struct xenbus_device *dev,
unsigned int i, max_page_order; unsigned int i, max_page_order;
unsigned int ring_page_order; unsigned int ring_page_order;
if (!info)
return -ENODEV;
max_page_order = xenbus_read_unsigned(info->xbdev->otherend, max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
"max-ring-page-order", 0); "max-ring-page-order", 0);
ring_page_order = min(xen_blkif_max_ring_order, max_page_order); ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
info->nr_ring_pages = 1 << ring_page_order; info->nr_ring_pages = 1 << ring_page_order;
err = negotiate_mq(info);
if (err)
goto destroy_blkring;
for (i = 0; i < info->nr_rings; i++) { for (i = 0; i < info->nr_rings; i++) {
struct blkfront_ring_info *rinfo = &info->rinfo[i]; struct blkfront_ring_info *rinfo = &info->rinfo[i];
...@@ -1978,11 +1986,6 @@ static int blkfront_probe(struct xenbus_device *dev, ...@@ -1978,11 +1986,6 @@ static int blkfront_probe(struct xenbus_device *dev,
} }
info->xbdev = dev; info->xbdev = dev;
err = negotiate_mq(info);
if (err) {
kfree(info);
return err;
}
mutex_init(&info->mutex); mutex_init(&info->mutex);
info->vdevice = vdevice; info->vdevice = vdevice;
...@@ -2099,10 +2102,6 @@ static int blkfront_resume(struct xenbus_device *dev) ...@@ -2099,10 +2102,6 @@ static int blkfront_resume(struct xenbus_device *dev)
blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
err = negotiate_mq(info);
if (err)
return err;
err = talk_to_blkback(dev, info); err = talk_to_blkback(dev, info);
if (!err) if (!err)
blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings); blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
......
...@@ -963,6 +963,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, ...@@ -963,6 +963,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
uint32_t rtime = cpu_to_le32(get_seconds()); uint32_t rtime = cpu_to_le32(get_seconds());
struct uuid_entry *u; struct uuid_entry *u;
char buf[BDEVNAME_SIZE]; char buf[BDEVNAME_SIZE];
struct cached_dev *exist_dc, *t;
bdevname(dc->bdev, buf); bdevname(dc->bdev, buf);
...@@ -987,6 +988,16 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, ...@@ -987,6 +988,16 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
return -EINVAL; return -EINVAL;
} }
/* Check whether already attached */
list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
pr_err("Tried to attach %s but duplicate UUID already attached",
buf);
return -EINVAL;
}
}
u = uuid_find(c, dc->sb.uuid); u = uuid_find(c, dc->sb.uuid);
if (u && if (u &&
...@@ -1204,7 +1215,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page, ...@@ -1204,7 +1215,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
return; return;
err: err:
pr_notice("error opening %s: %s", bdevname(bdev, name), err); pr_notice("error %s: %s", bdevname(bdev, name), err);
bcache_device_stop(&dc->disk); bcache_device_stop(&dc->disk);
} }
...@@ -1883,6 +1894,8 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, ...@@ -1883,6 +1894,8 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
const char *err = NULL; /* must be set for any error case */ const char *err = NULL; /* must be set for any error case */
int ret = 0; int ret = 0;
bdevname(bdev, name);
memcpy(&ca->sb, sb, sizeof(struct cache_sb)); memcpy(&ca->sb, sb, sizeof(struct cache_sb));
ca->bdev = bdev; ca->bdev = bdev;
ca->bdev->bd_holder = ca; ca->bdev->bd_holder = ca;
...@@ -1891,11 +1904,12 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, ...@@ -1891,11 +1904,12 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page; bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page;
get_page(sb_page); get_page(sb_page);
if (blk_queue_discard(bdev_get_queue(ca->bdev))) if (blk_queue_discard(bdev_get_queue(bdev)))
ca->discard = CACHE_DISCARD(&ca->sb); ca->discard = CACHE_DISCARD(&ca->sb);
ret = cache_alloc(ca); ret = cache_alloc(ca);
if (ret != 0) { if (ret != 0) {
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
if (ret == -ENOMEM) if (ret == -ENOMEM)
err = "cache_alloc(): -ENOMEM"; err = "cache_alloc(): -ENOMEM";
else else
...@@ -1918,14 +1932,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, ...@@ -1918,14 +1932,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
goto out; goto out;
} }
pr_info("registered cache device %s", bdevname(bdev, name)); pr_info("registered cache device %s", name);
out: out:
kobject_put(&ca->kobj); kobject_put(&ca->kobj);
err: err:
if (err) if (err)
pr_notice("error opening %s: %s", bdevname(bdev, name), err); pr_notice("error %s: %s", name, err);
return ret; return ret;
} }
...@@ -2014,6 +2028,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, ...@@ -2014,6 +2028,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (err) if (err)
goto err_close; goto err_close;
err = "failed to register device";
if (SB_IS_BDEV(sb)) { if (SB_IS_BDEV(sb)) {
struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
if (!dc) if (!dc)
...@@ -2028,7 +2043,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, ...@@ -2028,7 +2043,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
goto err_close; goto err_close;
if (register_cache(sb, sb_page, bdev, ca) != 0) if (register_cache(sb, sb_page, bdev, ca) != 0)
goto err_close; goto err;
} }
out: out:
if (sb_page) if (sb_page)
...@@ -2041,7 +2056,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, ...@@ -2041,7 +2056,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
err_close: err_close:
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
err: err:
pr_info("error opening %s: %s", path, err); pr_info("error %s: %s", path, err);
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
......
...@@ -3033,7 +3033,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) ...@@ -3033,7 +3033,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
ns->disk->disk_name); ns->disk->disk_name);
nvme_mpath_add_disk(ns->head); nvme_mpath_add_disk(ns->head);
nvme_mpath_add_disk_links(ns);
return; return;
out_unlink_ns: out_unlink_ns:
mutex_lock(&ctrl->subsys->lock); mutex_lock(&ctrl->subsys->lock);
...@@ -3053,7 +3052,6 @@ static void nvme_ns_remove(struct nvme_ns *ns) ...@@ -3053,7 +3052,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
return; return;
if (ns->disk && ns->disk->flags & GENHD_FL_UP) { if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
nvme_mpath_remove_disk_links(ns);
sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
&nvme_ns_id_attr_group); &nvme_ns_id_attr_group);
if (ns->ndev) if (ns->ndev)
......
...@@ -650,6 +650,11 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ...@@ -650,6 +650,11 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
if (opts->discovery_nqn) {
pr_debug("Ignoring nr_io_queues value for discovery controller\n");
break;
}
opts->nr_io_queues = min_t(unsigned int, opts->nr_io_queues = min_t(unsigned int,
num_online_cpus(), token); num_online_cpus(), token);
break; break;
......
...@@ -1206,7 +1206,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl, ...@@ -1206,7 +1206,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize); assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1);
/* Linux supports only Dynamic controllers */ /* Linux supports only Dynamic controllers */
assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff); assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id); uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
...@@ -1321,7 +1321,7 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, ...@@ -1321,7 +1321,7 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum); conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize); conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1);
lsop->queue = queue; lsop->queue = queue;
lsreq->rqstaddr = conn_rqst; lsreq->rqstaddr = conn_rqst;
...@@ -2481,11 +2481,11 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) ...@@ -2481,11 +2481,11 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
goto out_free_tag_set; goto out_free_tag_set;
} }
ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size); ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret) if (ret)
goto out_cleanup_blk_queue; goto out_cleanup_blk_queue;
ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size); ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret) if (ret)
goto out_delete_hw_queues; goto out_delete_hw_queues;
...@@ -2532,11 +2532,11 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl) ...@@ -2532,11 +2532,11 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
if (ret) if (ret)
goto out_free_io_queues; goto out_free_io_queues;
ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size); ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret) if (ret)
goto out_free_io_queues; goto out_free_io_queues;
ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size); ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret) if (ret)
goto out_delete_hw_queues; goto out_delete_hw_queues;
...@@ -2632,13 +2632,12 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) ...@@ -2632,13 +2632,12 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
nvme_fc_init_queue(ctrl, 0); nvme_fc_init_queue(ctrl, 0);
ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
NVME_AQ_BLK_MQ_DEPTH); NVME_AQ_DEPTH);
if (ret) if (ret)
goto out_free_queue; goto out_free_queue;
ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
NVME_AQ_BLK_MQ_DEPTH, NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
(NVME_AQ_BLK_MQ_DEPTH / 4));
if (ret) if (ret)
goto out_delete_hw_queue; goto out_delete_hw_queue;
...@@ -2666,7 +2665,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) ...@@ -2666,7 +2665,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
} }
ctrl->ctrl.sqsize = ctrl->ctrl.sqsize =
min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap) + 1, ctrl->ctrl.sqsize); min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
if (ret) if (ret)
...@@ -2699,6 +2698,14 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) ...@@ -2699,6 +2698,14 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
opts->queue_size = ctrl->ctrl.maxcmd; opts->queue_size = ctrl->ctrl.maxcmd;
} }
if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
/* warn if sqsize is lower than queue_size */
dev_warn(ctrl->ctrl.device,
"queue_size %zu > ctrl sqsize %u, clamping down\n",
opts->queue_size, ctrl->ctrl.sqsize + 1);
opts->queue_size = ctrl->ctrl.sqsize + 1;
}
ret = nvme_fc_init_aen_ops(ctrl); ret = nvme_fc_init_aen_ops(ctrl);
if (ret) if (ret)
goto out_term_aen_ops; goto out_term_aen_ops;
......
...@@ -210,25 +210,6 @@ void nvme_mpath_add_disk(struct nvme_ns_head *head) ...@@ -210,25 +210,6 @@ void nvme_mpath_add_disk(struct nvme_ns_head *head)
mutex_unlock(&head->subsys->lock); mutex_unlock(&head->subsys->lock);
} }
void nvme_mpath_add_disk_links(struct nvme_ns *ns)
{
struct kobject *slave_disk_kobj, *holder_disk_kobj;
if (!ns->head->disk)
return;
slave_disk_kobj = &disk_to_dev(ns->disk)->kobj;
if (sysfs_create_link(ns->head->disk->slave_dir, slave_disk_kobj,
kobject_name(slave_disk_kobj)))
return;
holder_disk_kobj = &disk_to_dev(ns->head->disk)->kobj;
if (sysfs_create_link(ns->disk->part0.holder_dir, holder_disk_kobj,
kobject_name(holder_disk_kobj)))
sysfs_remove_link(ns->head->disk->slave_dir,
kobject_name(slave_disk_kobj));
}
void nvme_mpath_remove_disk(struct nvme_ns_head *head) void nvme_mpath_remove_disk(struct nvme_ns_head *head)
{ {
if (!head->disk) if (!head->disk)
...@@ -243,14 +224,3 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head) ...@@ -243,14 +224,3 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
blk_cleanup_queue(head->disk->queue); blk_cleanup_queue(head->disk->queue);
put_disk(head->disk); put_disk(head->disk);
} }
void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
{
if (!ns->head->disk)
return;
sysfs_remove_link(ns->disk->part0.holder_dir,
kobject_name(&disk_to_dev(ns->head->disk)->kobj));
sysfs_remove_link(ns->head->disk->slave_dir,
kobject_name(&disk_to_dev(ns->disk)->kobj));
}
...@@ -410,9 +410,7 @@ bool nvme_req_needs_failover(struct request *req, blk_status_t error); ...@@ -410,9 +410,7 @@ bool nvme_req_needs_failover(struct request *req, blk_status_t error);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
void nvme_mpath_add_disk(struct nvme_ns_head *head); void nvme_mpath_add_disk(struct nvme_ns_head *head);
void nvme_mpath_add_disk_links(struct nvme_ns *ns);
void nvme_mpath_remove_disk(struct nvme_ns_head *head); void nvme_mpath_remove_disk(struct nvme_ns_head *head);
void nvme_mpath_remove_disk_links(struct nvme_ns *ns);
static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
{ {
...@@ -454,12 +452,6 @@ static inline void nvme_mpath_add_disk(struct nvme_ns_head *head) ...@@ -454,12 +452,6 @@ static inline void nvme_mpath_add_disk(struct nvme_ns_head *head)
static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
{ {
} }
static inline void nvme_mpath_add_disk_links(struct nvme_ns *ns)
{
}
static inline void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
{
}
static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
{ {
} }
......
...@@ -1153,12 +1153,6 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) ...@@ -1153,12 +1153,6 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
if (!(csts & NVME_CSTS_CFS) && !nssro) if (!(csts & NVME_CSTS_CFS) && !nssro)
return false; return false;
/* If PCI error recovery process is happening, we cannot reset or
* the recovery mechanism will surely fail.
*/
if (pci_channel_offline(to_pci_dev(dev->dev)))
return false;
return true; return true;
} }
...@@ -1189,6 +1183,13 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) ...@@ -1189,6 +1183,13 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
struct nvme_command cmd; struct nvme_command cmd;
u32 csts = readl(dev->bar + NVME_REG_CSTS); u32 csts = readl(dev->bar + NVME_REG_CSTS);
/* If PCI error recovery process is happening, we cannot reset or
* the recovery mechanism will surely fail.
*/
mb();
if (pci_channel_offline(to_pci_dev(dev->dev)))
return BLK_EH_RESET_TIMER;
/* /*
* Reset immediately if the controller is failed * Reset immediately if the controller is failed
*/ */
...@@ -1913,7 +1914,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) ...@@ -1913,7 +1914,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
int result, nr_io_queues; int result, nr_io_queues;
unsigned long size; unsigned long size;
nr_io_queues = num_present_cpus(); nr_io_queues = num_possible_cpus();
result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
if (result < 0) if (result < 0)
return result; return result;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment