Commit 8467b0ed authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-5.18/drivers-2022-04-01' of git://git.kernel.dk/linux-block

Pull block driver fixes from Jens Axboe:
 "Followup block driver updates and fixes for the 5.18-rc1 merge window.
  In detail:

   - NVMe pull request
       - Fix multipath hang when disk goes live over reconnect (Anton
         Eidelman)
       - fix RCU hole that allowed for endless looping in multipath
         round robin (Chris Leech)
       - remove redundant assignment after left shift (Colin Ian King)
       - add quirks for Samsung X5 SSDs (Monish Kumar R)
       - fix the read-only state for zoned namespaces with unsupposed
         features (Pankaj Raghav)
       - use a private workqueue instead of the system workqueue in
         nvmet (Sagi Grimberg)
       - allow duplicate NSIDs for private namespaces (Sungup Moon)
       - expose use_threaded_interrupts read-only in sysfs (Xin Hao)"

   - nbd minor allocation fix (Zhang)

   - drbd fixes and maintainer addition (Lars, Jakob, Christoph)

   - n64cart build fix (Jackie)

   - loop compat ioctl fix (Carlos)

   - misc fixes (Colin, Dongli)"

* tag 'for-5.18/drivers-2022-04-01' of git://git.kernel.dk/linux-block:
  drbd: remove check of list iterator against head past the loop body
  drbd: remove usage of list iterator variable after loop
  nbd: fix possible overflow on 'first_minor' in nbd_dev_add()
  MAINTAINERS: add drbd co-maintainer
  drbd: fix potential silent data corruption
  loop: fix ioctl calls using compat_loop_info
  nvme-multipath: fix hang when disk goes live over reconnect
  nvme: fix RCU hole that allowed for endless looping in multipath round robin
  nvme: allow duplicate NSIDs for private namespaces
  nvmet: remove redundant assignment after left shift
  nvmet: use a private workqueue instead of the system workqueue
  nvme-pci: add quirks for Samsung X5 SSDs
  nvme-pci: expose use_threaded_interrupts read-only in sysfs
  nvme: fix the read-only state for zoned namespaces with unsupposed features
  n64cart: convert bi_disk to bi_bdev->bd_disk fix build
  xen/blkfront: fix comment for need_copy
  xen-blkback: remove redundant assignment to variable i
parents d589ae0d 2651ee5a
...@@ -6052,6 +6052,7 @@ F: drivers/scsi/dpt/ ...@@ -6052,6 +6052,7 @@ F: drivers/scsi/dpt/
DRBD DRIVER DRBD DRIVER
M: Philipp Reisner <philipp.reisner@linbit.com> M: Philipp Reisner <philipp.reisner@linbit.com>
M: Lars Ellenberg <lars.ellenberg@linbit.com> M: Lars Ellenberg <lars.ellenberg@linbit.com>
M: Christoph Böhmwalder <christoph.boehmwalder@linbit.com>
L: drbd-dev@lists.linbit.com L: drbd-dev@lists.linbit.com
S: Supported S: Supported
W: http://www.drbd.org W: http://www.drbd.org
......
...@@ -171,7 +171,7 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr, ...@@ -171,7 +171,7 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
unsigned int set_size) unsigned int set_size)
{ {
struct drbd_request *r; struct drbd_request *r;
struct drbd_request *req = NULL; struct drbd_request *req = NULL, *tmp = NULL;
int expect_epoch = 0; int expect_epoch = 0;
int expect_size = 0; int expect_size = 0;
...@@ -225,8 +225,11 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr, ...@@ -225,8 +225,11 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
* to catch requests being barrier-acked "unexpectedly". * to catch requests being barrier-acked "unexpectedly".
* It usually should find the same req again, or some READ preceding it. */ * It usually should find the same req again, or some READ preceding it. */
list_for_each_entry(req, &connection->transfer_log, tl_requests) list_for_each_entry(req, &connection->transfer_log, tl_requests)
if (req->epoch == expect_epoch) if (req->epoch == expect_epoch) {
tmp = req;
break; break;
}
req = list_prepare_entry(tmp, &connection->transfer_log, tl_requests);
list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) { list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
if (req->epoch != expect_epoch) if (req->epoch != expect_epoch)
break; break;
......
...@@ -180,7 +180,8 @@ void start_new_tl_epoch(struct drbd_connection *connection) ...@@ -180,7 +180,8 @@ void start_new_tl_epoch(struct drbd_connection *connection)
void complete_master_bio(struct drbd_device *device, void complete_master_bio(struct drbd_device *device,
struct bio_and_error *m) struct bio_and_error *m)
{ {
m->bio->bi_status = errno_to_blk_status(m->error); if (unlikely(m->error))
m->bio->bi_status = errno_to_blk_status(m->error);
bio_endio(m->bio); bio_endio(m->bio);
dec_ap_bio(device); dec_ap_bio(device);
} }
...@@ -332,17 +333,21 @@ static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct dr ...@@ -332,17 +333,21 @@ static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct dr
static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req) static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
{ {
struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
struct drbd_request *iter = req;
if (!connection) if (!connection)
return; return;
if (connection->req_next != req) if (connection->req_next != req)
return; return;
list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
const unsigned s = req->rq_state; req = NULL;
if (s & RQ_NET_QUEUED) list_for_each_entry_continue(iter, &connection->transfer_log, tl_requests) {
const unsigned int s = iter->rq_state;
if (s & RQ_NET_QUEUED) {
req = iter;
break; break;
}
} }
if (&req->tl_requests == &connection->transfer_log)
req = NULL;
connection->req_next = req; connection->req_next = req;
} }
...@@ -358,17 +363,21 @@ static void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, st ...@@ -358,17 +363,21 @@ static void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, st
static void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req) static void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
{ {
struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
struct drbd_request *iter = req;
if (!connection) if (!connection)
return; return;
if (connection->req_ack_pending != req) if (connection->req_ack_pending != req)
return; return;
list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
const unsigned s = req->rq_state; req = NULL;
if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING)) list_for_each_entry_continue(iter, &connection->transfer_log, tl_requests) {
const unsigned int s = iter->rq_state;
if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING)) {
req = iter;
break; break;
}
} }
if (&req->tl_requests == &connection->transfer_log)
req = NULL;
connection->req_ack_pending = req; connection->req_ack_pending = req;
} }
...@@ -384,17 +393,21 @@ static void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, s ...@@ -384,17 +393,21 @@ static void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, s
static void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req) static void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
{ {
struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
struct drbd_request *iter = req;
if (!connection) if (!connection)
return; return;
if (connection->req_not_net_done != req) if (connection->req_not_net_done != req)
return; return;
list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
const unsigned s = req->rq_state; req = NULL;
if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE)) list_for_each_entry_continue(iter, &connection->transfer_log, tl_requests) {
const unsigned int s = iter->rq_state;
if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE)) {
req = iter;
break; break;
}
} }
if (&req->tl_requests == &connection->transfer_log)
req = NULL;
connection->req_not_net_done = req; connection->req_not_net_done = req;
} }
......
...@@ -1591,6 +1591,7 @@ struct compat_loop_info { ...@@ -1591,6 +1591,7 @@ struct compat_loop_info {
compat_ulong_t lo_inode; /* ioctl r/o */ compat_ulong_t lo_inode; /* ioctl r/o */
compat_dev_t lo_rdevice; /* ioctl r/o */ compat_dev_t lo_rdevice; /* ioctl r/o */
compat_int_t lo_offset; compat_int_t lo_offset;
compat_int_t lo_encrypt_type; /* obsolete, ignored */
compat_int_t lo_encrypt_key_size; /* ioctl w/o */ compat_int_t lo_encrypt_key_size; /* ioctl w/o */
compat_int_t lo_flags; /* ioctl r/o */ compat_int_t lo_flags; /* ioctl r/o */
char lo_name[LO_NAME_SIZE]; char lo_name[LO_NAME_SIZE];
......
...@@ -88,7 +88,7 @@ static void n64cart_submit_bio(struct bio *bio) ...@@ -88,7 +88,7 @@ static void n64cart_submit_bio(struct bio *bio)
{ {
struct bio_vec bvec; struct bio_vec bvec;
struct bvec_iter iter; struct bvec_iter iter;
struct device *dev = bio->bi_disk->private_data; struct device *dev = bio->bi_bdev->bd_disk->private_data;
u32 pos = bio->bi_iter.bi_sector << SECTOR_SHIFT; u32 pos = bio->bi_iter.bi_sector << SECTOR_SHIFT;
bio_for_each_segment(bvec, bio, iter) { bio_for_each_segment(bvec, bio, iter) {
......
...@@ -1800,17 +1800,6 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs) ...@@ -1800,17 +1800,6 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
refcount_set(&nbd->refs, 0); refcount_set(&nbd->refs, 0);
INIT_LIST_HEAD(&nbd->list); INIT_LIST_HEAD(&nbd->list);
disk->major = NBD_MAJOR; disk->major = NBD_MAJOR;
/* Too big first_minor can cause duplicate creation of
* sysfs files/links, since index << part_shift might overflow, or
* MKDEV() expect that the max bits of first_minor is 20.
*/
disk->first_minor = index << part_shift;
if (disk->first_minor < index || disk->first_minor > MINORMASK) {
err = -EINVAL;
goto out_free_work;
}
disk->minors = 1 << part_shift; disk->minors = 1 << part_shift;
disk->fops = &nbd_fops; disk->fops = &nbd_fops;
disk->private_data = nbd; disk->private_data = nbd;
...@@ -1915,8 +1904,19 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) ...@@ -1915,8 +1904,19 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
if (!netlink_capable(skb, CAP_SYS_ADMIN)) if (!netlink_capable(skb, CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
if (info->attrs[NBD_ATTR_INDEX]) if (info->attrs[NBD_ATTR_INDEX]) {
index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
/*
* Too big first_minor can cause duplicate creation of
* sysfs files/links, since index << part_shift might overflow, or
* MKDEV() expect that the max bits of first_minor is 20.
*/
if (index < 0 || index > MINORMASK >> part_shift) {
printk(KERN_ERR "nbd: illegal input index %d\n", index);
return -EINVAL;
}
}
if (!info->attrs[NBD_ATTR_SOCKETS]) { if (!info->attrs[NBD_ATTR_SOCKETS]) {
printk(KERN_ERR "nbd: must specify at least one socket\n"); printk(KERN_ERR "nbd: must specify at least one socket\n");
return -EINVAL; return -EINVAL;
......
...@@ -931,7 +931,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req, ...@@ -931,7 +931,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
if (rc) if (rc)
goto unmap; goto unmap;
for (n = 0, i = 0; n < nseg; n++) { for (n = 0; n < nseg; n++) {
uint8_t first_sect, last_sect; uint8_t first_sect, last_sect;
if ((n % SEGS_PER_INDIRECT_FRAME) == 0) { if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
......
...@@ -576,7 +576,7 @@ struct setup_rw_req { ...@@ -576,7 +576,7 @@ struct setup_rw_req {
struct blkif_request *ring_req; struct blkif_request *ring_req;
grant_ref_t gref_head; grant_ref_t gref_head;
unsigned int id; unsigned int id;
/* Only used when persistent grant is used and it's a read request */ /* Only used when persistent grant is used and it's a write request */
bool need_copy; bool need_copy;
unsigned int bvec_off; unsigned int bvec_off;
char *bvec_data; char *bvec_data;
......
...@@ -1830,9 +1830,6 @@ static void nvme_update_disk_info(struct gendisk *disk, ...@@ -1830,9 +1830,6 @@ static void nvme_update_disk_info(struct gendisk *disk,
nvme_config_discard(disk, ns); nvme_config_discard(disk, ns);
blk_queue_max_write_zeroes_sectors(disk->queue, blk_queue_max_write_zeroes_sectors(disk->queue,
ns->ctrl->max_zeroes_sectors); ns->ctrl->max_zeroes_sectors);
set_disk_ro(disk, (id->nsattr & NVME_NS_ATTR_RO) ||
test_bit(NVME_NS_FORCE_RO, &ns->flags));
} }
static inline bool nvme_first_scan(struct gendisk *disk) static inline bool nvme_first_scan(struct gendisk *disk)
...@@ -1891,6 +1888,8 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id) ...@@ -1891,6 +1888,8 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
goto out_unfreeze; goto out_unfreeze;
} }
set_disk_ro(ns->disk, (id->nsattr & NVME_NS_ATTR_RO) ||
test_bit(NVME_NS_FORCE_RO, &ns->flags));
set_bit(NVME_NS_READY, &ns->flags); set_bit(NVME_NS_READY, &ns->flags);
blk_mq_unfreeze_queue(ns->disk->queue); blk_mq_unfreeze_queue(ns->disk->queue);
...@@ -1903,6 +1902,9 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id) ...@@ -1903,6 +1902,9 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
if (nvme_ns_head_multipath(ns->head)) { if (nvme_ns_head_multipath(ns->head)) {
blk_mq_freeze_queue(ns->head->disk->queue); blk_mq_freeze_queue(ns->head->disk->queue);
nvme_update_disk_info(ns->head->disk, ns, id); nvme_update_disk_info(ns->head->disk, ns, id);
set_disk_ro(ns->head->disk,
(id->nsattr & NVME_NS_ATTR_RO) ||
test_bit(NVME_NS_FORCE_RO, &ns->flags));
nvme_mpath_revalidate_paths(ns); nvme_mpath_revalidate_paths(ns);
blk_stack_limits(&ns->head->disk->queue->limits, blk_stack_limits(&ns->head->disk->queue->limits,
&ns->queue->limits, 0); &ns->queue->limits, 0);
...@@ -3589,15 +3591,20 @@ static const struct attribute_group *nvme_dev_attr_groups[] = { ...@@ -3589,15 +3591,20 @@ static const struct attribute_group *nvme_dev_attr_groups[] = {
NULL, NULL,
}; };
static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys, static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl,
unsigned nsid) unsigned nsid)
{ {
struct nvme_ns_head *h; struct nvme_ns_head *h;
lockdep_assert_held(&subsys->lock); lockdep_assert_held(&ctrl->subsys->lock);
list_for_each_entry(h, &subsys->nsheads, entry) { list_for_each_entry(h, &ctrl->subsys->nsheads, entry) {
if (h->ns_id != nsid) /*
* Private namespaces can share NSIDs under some conditions.
* In that case we can't use the same ns_head for namespaces
* with the same NSID.
*/
if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h))
continue; continue;
if (!list_empty(&h->list) && nvme_tryget_ns_head(h)) if (!list_empty(&h->list) && nvme_tryget_ns_head(h))
return h; return h;
...@@ -3791,7 +3798,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, ...@@ -3791,7 +3798,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
} }
mutex_lock(&ctrl->subsys->lock); mutex_lock(&ctrl->subsys->lock);
head = nvme_find_ns_head(ctrl->subsys, nsid); head = nvme_find_ns_head(ctrl, nsid);
if (!head) { if (!head) {
ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, ids); ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, ids);
if (ret) { if (ret) {
...@@ -3988,6 +3995,16 @@ static void nvme_ns_remove(struct nvme_ns *ns) ...@@ -3988,6 +3995,16 @@ static void nvme_ns_remove(struct nvme_ns *ns)
set_capacity(ns->disk, 0); set_capacity(ns->disk, 0);
nvme_fault_inject_fini(&ns->fault_inject); nvme_fault_inject_fini(&ns->fault_inject);
/*
* Ensure that !NVME_NS_READY is seen by other threads to prevent
* this ns going back into current_path.
*/
synchronize_srcu(&ns->head->srcu);
/* wait for concurrent submissions */
if (nvme_mpath_clear_current_path(ns))
synchronize_srcu(&ns->head->srcu);
mutex_lock(&ns->ctrl->subsys->lock); mutex_lock(&ns->ctrl->subsys->lock);
list_del_rcu(&ns->siblings); list_del_rcu(&ns->siblings);
if (list_empty(&ns->head->list)) { if (list_empty(&ns->head->list)) {
...@@ -3999,10 +4016,6 @@ static void nvme_ns_remove(struct nvme_ns *ns) ...@@ -3999,10 +4016,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
/* guarantee not available in head->list */ /* guarantee not available in head->list */
synchronize_rcu(); synchronize_rcu();
/* wait for concurrent submissions */
if (nvme_mpath_clear_current_path(ns))
synchronize_srcu(&ns->head->srcu);
if (!nvme_ns_head_multipath(ns->head)) if (!nvme_ns_head_multipath(ns->head))
nvme_cdev_del(&ns->cdev, &ns->cdev_device); nvme_cdev_del(&ns->cdev, &ns->cdev_device);
del_gendisk(ns->disk); del_gendisk(ns->disk);
...@@ -4480,6 +4493,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl) ...@@ -4480,6 +4493,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
if (ctrl->queue_count > 1) { if (ctrl->queue_count > 1) {
nvme_queue_scan(ctrl); nvme_queue_scan(ctrl);
nvme_start_queues(ctrl); nvme_start_queues(ctrl);
nvme_mpath_update(ctrl);
} }
nvme_change_uevent(ctrl, "NVME_EVENT=connected"); nvme_change_uevent(ctrl, "NVME_EVENT=connected");
......
...@@ -482,10 +482,11 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) ...@@ -482,10 +482,11 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
/* /*
* Add a multipath node if the subsystems supports multiple controllers. * Add a multipath node if the subsystems supports multiple controllers.
* We also do this for private namespaces as the namespace sharing data could * We also do this for private namespaces as the namespace sharing flag
* change after a rescan. * could change after a rescan.
*/ */
if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || !multipath) if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
!nvme_is_unique_nsid(ctrl, head) || !multipath)
return 0; return 0;
head->disk = blk_alloc_disk(ctrl->numa_node); head->disk = blk_alloc_disk(ctrl->numa_node);
...@@ -612,8 +613,17 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc, ...@@ -612,8 +613,17 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
ns->ana_grpid = le32_to_cpu(desc->grpid); ns->ana_grpid = le32_to_cpu(desc->grpid);
ns->ana_state = desc->state; ns->ana_state = desc->state;
clear_bit(NVME_NS_ANA_PENDING, &ns->flags); clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
/*
if (nvme_state_is_live(ns->ana_state)) * nvme_mpath_set_live() will trigger I/O to the multipath path device
* and in turn to this path device. However we cannot accept this I/O
* if the controller is not live. This may deadlock if called from
* nvme_mpath_init_identify() and the ctrl will never complete
* initialization, preventing I/O from completing. For this case we
* will reprocess the ANA log page in nvme_mpath_update() once the
* controller is ready.
*/
if (nvme_state_is_live(ns->ana_state) &&
ns->ctrl->state == NVME_CTRL_LIVE)
nvme_mpath_set_live(ns); nvme_mpath_set_live(ns);
} }
...@@ -700,6 +710,18 @@ static void nvme_ana_work(struct work_struct *work) ...@@ -700,6 +710,18 @@ static void nvme_ana_work(struct work_struct *work)
nvme_read_ana_log(ctrl); nvme_read_ana_log(ctrl);
} }
void nvme_mpath_update(struct nvme_ctrl *ctrl)
{
u32 nr_change_groups = 0;
if (!ctrl->ana_log_buf)
return;
mutex_lock(&ctrl->ana_lock);
nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state);
mutex_unlock(&ctrl->ana_lock);
}
static void nvme_anatt_timeout(struct timer_list *t) static void nvme_anatt_timeout(struct timer_list *t)
{ {
struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer); struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
......
...@@ -723,6 +723,25 @@ static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, ...@@ -723,6 +723,25 @@ static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
return queue_live; return queue_live;
return __nvme_check_ready(ctrl, rq, queue_live); return __nvme_check_ready(ctrl, rq, queue_live);
} }
/*
* NSID shall be unique for all shared namespaces, or if at least one of the
* following conditions is met:
* 1. Namespace Management is supported by the controller
* 2. ANA is supported by the controller
* 3. NVM Set are supported by the controller
*
* In other case, private namespace are not required to report a unique NSID.
*/
static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl,
struct nvme_ns_head *head)
{
return head->shared ||
(ctrl->oacs & NVME_CTRL_OACS_NS_MNGT_SUPP) ||
(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) ||
(ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS);
}
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen); void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
...@@ -782,6 +801,7 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id); ...@@ -782,6 +801,7 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
void nvme_mpath_remove_disk(struct nvme_ns_head *head); void nvme_mpath_remove_disk(struct nvme_ns_head *head);
int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id); int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl); void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
void nvme_mpath_update(struct nvme_ctrl *ctrl);
void nvme_mpath_uninit(struct nvme_ctrl *ctrl); void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
void nvme_mpath_stop(struct nvme_ctrl *ctrl); void nvme_mpath_stop(struct nvme_ctrl *ctrl);
bool nvme_mpath_clear_current_path(struct nvme_ns *ns); bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
...@@ -853,6 +873,9 @@ static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, ...@@ -853,6 +873,9 @@ static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
"Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n"); "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
return 0; return 0;
} }
static inline void nvme_mpath_update(struct nvme_ctrl *ctrl)
{
}
static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl) static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
{ {
} }
......
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
#define NVME_MAX_SEGS 127 #define NVME_MAX_SEGS 127
static int use_threaded_interrupts; static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0); module_param(use_threaded_interrupts, int, 0444);
static bool use_cmb_sqes = true; static bool use_cmb_sqes = true;
module_param(use_cmb_sqes, bool, 0444); module_param(use_cmb_sqes, bool, 0444);
...@@ -3467,7 +3467,10 @@ static const struct pci_device_id nvme_id_table[] = { ...@@ -3467,7 +3467,10 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_128_BYTES_SQES | NVME_QUIRK_128_BYTES_SQES |
NVME_QUIRK_SHARED_TAGS | NVME_QUIRK_SHARED_TAGS |
NVME_QUIRK_SKIP_CID_GEN }, NVME_QUIRK_SKIP_CID_GEN },
{ PCI_DEVICE(0x144d, 0xa808), /* Samsung X5 */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY|
NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ 0, } { 0, }
}; };
......
...@@ -988,7 +988,7 @@ void nvmet_execute_async_event(struct nvmet_req *req) ...@@ -988,7 +988,7 @@ void nvmet_execute_async_event(struct nvmet_req *req)
ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req; ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
mutex_unlock(&ctrl->lock); mutex_unlock(&ctrl->lock);
schedule_work(&ctrl->async_event_work); queue_work(nvmet_wq, &ctrl->async_event_work);
} }
void nvmet_execute_keep_alive(struct nvmet_req *req) void nvmet_execute_keep_alive(struct nvmet_req *req)
......
...@@ -1555,7 +1555,7 @@ static void nvmet_port_release(struct config_item *item) ...@@ -1555,7 +1555,7 @@ static void nvmet_port_release(struct config_item *item)
struct nvmet_port *port = to_nvmet_port(item); struct nvmet_port *port = to_nvmet_port(item);
/* Let inflight controllers teardown complete */ /* Let inflight controllers teardown complete */
flush_scheduled_work(); flush_workqueue(nvmet_wq);
list_del(&port->global_entry); list_del(&port->global_entry);
kfree(port->ana_state); kfree(port->ana_state);
......
...@@ -20,6 +20,9 @@ struct workqueue_struct *zbd_wq; ...@@ -20,6 +20,9 @@ struct workqueue_struct *zbd_wq;
static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
static DEFINE_IDA(cntlid_ida); static DEFINE_IDA(cntlid_ida);
struct workqueue_struct *nvmet_wq;
EXPORT_SYMBOL_GPL(nvmet_wq);
/* /*
* This read/write semaphore is used to synchronize access to configuration * This read/write semaphore is used to synchronize access to configuration
* information on a target system that will result in discovery log page * information on a target system that will result in discovery log page
...@@ -205,7 +208,7 @@ void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, ...@@ -205,7 +208,7 @@ void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
list_add_tail(&aen->entry, &ctrl->async_events); list_add_tail(&aen->entry, &ctrl->async_events);
mutex_unlock(&ctrl->lock); mutex_unlock(&ctrl->lock);
schedule_work(&ctrl->async_event_work); queue_work(nvmet_wq, &ctrl->async_event_work);
} }
static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid) static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
...@@ -385,7 +388,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work) ...@@ -385,7 +388,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
if (reset_tbkas) { if (reset_tbkas) {
pr_debug("ctrl %d reschedule traffic based keep-alive timer\n", pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
ctrl->cntlid); ctrl->cntlid);
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
return; return;
} }
...@@ -403,7 +406,7 @@ void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) ...@@ -403,7 +406,7 @@ void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
pr_debug("ctrl %d start keep-alive timer for %d secs\n", pr_debug("ctrl %d start keep-alive timer for %d secs\n",
ctrl->cntlid, ctrl->kato); ctrl->cntlid, ctrl->kato);
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
} }
void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
...@@ -1120,7 +1123,7 @@ static inline u8 nvmet_cc_iocqes(u32 cc) ...@@ -1120,7 +1123,7 @@ static inline u8 nvmet_cc_iocqes(u32 cc)
static inline bool nvmet_css_supported(u8 cc_css) static inline bool nvmet_css_supported(u8 cc_css)
{ {
switch (cc_css <<= NVME_CC_CSS_SHIFT) { switch (cc_css << NVME_CC_CSS_SHIFT) {
case NVME_CC_CSS_NVM: case NVME_CC_CSS_NVM:
case NVME_CC_CSS_CSI: case NVME_CC_CSS_CSI:
return true; return true;
...@@ -1478,7 +1481,7 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) ...@@ -1478,7 +1481,7 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
mutex_lock(&ctrl->lock); mutex_lock(&ctrl->lock);
if (!(ctrl->csts & NVME_CSTS_CFS)) { if (!(ctrl->csts & NVME_CSTS_CFS)) {
ctrl->csts |= NVME_CSTS_CFS; ctrl->csts |= NVME_CSTS_CFS;
schedule_work(&ctrl->fatal_err_work); queue_work(nvmet_wq, &ctrl->fatal_err_work);
} }
mutex_unlock(&ctrl->lock); mutex_unlock(&ctrl->lock);
} }
...@@ -1619,9 +1622,15 @@ static int __init nvmet_init(void) ...@@ -1619,9 +1622,15 @@ static int __init nvmet_init(void)
goto out_free_zbd_work_queue; goto out_free_zbd_work_queue;
} }
nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
if (!nvmet_wq) {
error = -ENOMEM;
goto out_free_buffered_work_queue;
}
error = nvmet_init_discovery(); error = nvmet_init_discovery();
if (error) if (error)
goto out_free_work_queue; goto out_free_nvmet_work_queue;
error = nvmet_init_configfs(); error = nvmet_init_configfs();
if (error) if (error)
...@@ -1630,7 +1639,9 @@ static int __init nvmet_init(void) ...@@ -1630,7 +1639,9 @@ static int __init nvmet_init(void)
out_exit_discovery: out_exit_discovery:
nvmet_exit_discovery(); nvmet_exit_discovery();
out_free_work_queue: out_free_nvmet_work_queue:
destroy_workqueue(nvmet_wq);
out_free_buffered_work_queue:
destroy_workqueue(buffered_io_wq); destroy_workqueue(buffered_io_wq);
out_free_zbd_work_queue: out_free_zbd_work_queue:
destroy_workqueue(zbd_wq); destroy_workqueue(zbd_wq);
...@@ -1642,6 +1653,7 @@ static void __exit nvmet_exit(void) ...@@ -1642,6 +1653,7 @@ static void __exit nvmet_exit(void)
nvmet_exit_configfs(); nvmet_exit_configfs();
nvmet_exit_discovery(); nvmet_exit_discovery();
ida_destroy(&cntlid_ida); ida_destroy(&cntlid_ida);
destroy_workqueue(nvmet_wq);
destroy_workqueue(buffered_io_wq); destroy_workqueue(buffered_io_wq);
destroy_workqueue(zbd_wq); destroy_workqueue(zbd_wq);
......
...@@ -1491,7 +1491,7 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport) ...@@ -1491,7 +1491,7 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (!nvmet_fc_tgt_a_get(assoc)) if (!nvmet_fc_tgt_a_get(assoc))
continue; continue;
if (!schedule_work(&assoc->del_work)) if (!queue_work(nvmet_wq, &assoc->del_work))
/* already deleting - release local reference */ /* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc); nvmet_fc_tgt_a_put(assoc);
} }
...@@ -1546,7 +1546,7 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port, ...@@ -1546,7 +1546,7 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
continue; continue;
assoc->hostport->invalid = 1; assoc->hostport->invalid = 1;
noassoc = false; noassoc = false;
if (!schedule_work(&assoc->del_work)) if (!queue_work(nvmet_wq, &assoc->del_work))
/* already deleting - release local reference */ /* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc); nvmet_fc_tgt_a_put(assoc);
} }
...@@ -1592,7 +1592,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) ...@@ -1592,7 +1592,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
nvmet_fc_tgtport_put(tgtport); nvmet_fc_tgtport_put(tgtport);
if (found_ctrl) { if (found_ctrl) {
if (!schedule_work(&assoc->del_work)) if (!queue_work(nvmet_wq, &assoc->del_work))
/* already deleting - release local reference */ /* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc); nvmet_fc_tgt_a_put(assoc);
return; return;
...@@ -2060,7 +2060,7 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port, ...@@ -2060,7 +2060,7 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
iod->rqstdatalen = lsreqbuf_len; iod->rqstdatalen = lsreqbuf_len;
iod->hosthandle = hosthandle; iod->hosthandle = hosthandle;
schedule_work(&iod->work); queue_work(nvmet_wq, &iod->work);
return 0; return 0;
} }
......
...@@ -360,7 +360,7 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport, ...@@ -360,7 +360,7 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
spin_lock(&rport->lock); spin_lock(&rport->lock);
list_add_tail(&rport->ls_list, &tls_req->ls_list); list_add_tail(&rport->ls_list, &tls_req->ls_list);
spin_unlock(&rport->lock); spin_unlock(&rport->lock);
schedule_work(&rport->ls_work); queue_work(nvmet_wq, &rport->ls_work);
return ret; return ret;
} }
...@@ -393,7 +393,7 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport, ...@@ -393,7 +393,7 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
spin_lock(&rport->lock); spin_lock(&rport->lock);
list_add_tail(&rport->ls_list, &tls_req->ls_list); list_add_tail(&rport->ls_list, &tls_req->ls_list);
spin_unlock(&rport->lock); spin_unlock(&rport->lock);
schedule_work(&rport->ls_work); queue_work(nvmet_wq, &rport->ls_work);
} }
return 0; return 0;
...@@ -448,7 +448,7 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle, ...@@ -448,7 +448,7 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
spin_lock(&tport->lock); spin_lock(&tport->lock);
list_add_tail(&tport->ls_list, &tls_req->ls_list); list_add_tail(&tport->ls_list, &tls_req->ls_list);
spin_unlock(&tport->lock); spin_unlock(&tport->lock);
schedule_work(&tport->ls_work); queue_work(nvmet_wq, &tport->ls_work);
return ret; return ret;
} }
...@@ -480,7 +480,7 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport, ...@@ -480,7 +480,7 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
spin_lock(&tport->lock); spin_lock(&tport->lock);
list_add_tail(&tport->ls_list, &tls_req->ls_list); list_add_tail(&tport->ls_list, &tls_req->ls_list);
spin_unlock(&tport->lock); spin_unlock(&tport->lock);
schedule_work(&tport->ls_work); queue_work(nvmet_wq, &tport->ls_work);
} }
return 0; return 0;
...@@ -520,7 +520,7 @@ fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport) ...@@ -520,7 +520,7 @@ fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
tgt_rscn->tport = tgtport->private; tgt_rscn->tport = tgtport->private;
INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work); INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
schedule_work(&tgt_rscn->work); queue_work(nvmet_wq, &tgt_rscn->work);
} }
static void static void
...@@ -739,7 +739,7 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport, ...@@ -739,7 +739,7 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work); INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
kref_init(&tfcp_req->ref); kref_init(&tfcp_req->ref);
schedule_work(&tfcp_req->fcp_rcv_work); queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work);
return 0; return 0;
} }
...@@ -921,7 +921,7 @@ fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport, ...@@ -921,7 +921,7 @@ fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
{ {
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq); struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
schedule_work(&tfcp_req->tio_done_work); queue_work(nvmet_wq, &tfcp_req->tio_done_work);
} }
static void static void
...@@ -976,7 +976,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport, ...@@ -976,7 +976,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
if (abortio) if (abortio)
/* leave the reference while the work item is scheduled */ /* leave the reference while the work item is scheduled */
WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work)); WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work));
else { else {
/* /*
* as the io has already had the done callback made, * as the io has already had the done callback made,
......
...@@ -283,7 +283,7 @@ static void nvmet_file_execute_flush(struct nvmet_req *req) ...@@ -283,7 +283,7 @@ static void nvmet_file_execute_flush(struct nvmet_req *req)
if (!nvmet_check_transfer_len(req, 0)) if (!nvmet_check_transfer_len(req, 0))
return; return;
INIT_WORK(&req->f.work, nvmet_file_flush_work); INIT_WORK(&req->f.work, nvmet_file_flush_work);
schedule_work(&req->f.work); queue_work(nvmet_wq, &req->f.work);
} }
static void nvmet_file_execute_discard(struct nvmet_req *req) static void nvmet_file_execute_discard(struct nvmet_req *req)
...@@ -343,7 +343,7 @@ static void nvmet_file_execute_dsm(struct nvmet_req *req) ...@@ -343,7 +343,7 @@ static void nvmet_file_execute_dsm(struct nvmet_req *req)
if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req))) if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
return; return;
INIT_WORK(&req->f.work, nvmet_file_dsm_work); INIT_WORK(&req->f.work, nvmet_file_dsm_work);
schedule_work(&req->f.work); queue_work(nvmet_wq, &req->f.work);
} }
static void nvmet_file_write_zeroes_work(struct work_struct *w) static void nvmet_file_write_zeroes_work(struct work_struct *w)
...@@ -373,7 +373,7 @@ static void nvmet_file_execute_write_zeroes(struct nvmet_req *req) ...@@ -373,7 +373,7 @@ static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
if (!nvmet_check_transfer_len(req, 0)) if (!nvmet_check_transfer_len(req, 0))
return; return;
INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work); INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
schedule_work(&req->f.work); queue_work(nvmet_wq, &req->f.work);
} }
u16 nvmet_file_parse_io_cmd(struct nvmet_req *req) u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
......
...@@ -166,7 +166,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -166,7 +166,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
iod->req.transfer_len = blk_rq_payload_bytes(req); iod->req.transfer_len = blk_rq_payload_bytes(req);
} }
schedule_work(&iod->work); queue_work(nvmet_wq, &iod->work);
return BLK_STS_OK; return BLK_STS_OK;
} }
...@@ -187,7 +187,7 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg) ...@@ -187,7 +187,7 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
return; return;
} }
schedule_work(&iod->work); queue_work(nvmet_wq, &iod->work);
} }
static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl, static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
......
...@@ -366,6 +366,7 @@ struct nvmet_req { ...@@ -366,6 +366,7 @@ struct nvmet_req {
extern struct workqueue_struct *buffered_io_wq; extern struct workqueue_struct *buffered_io_wq;
extern struct workqueue_struct *zbd_wq; extern struct workqueue_struct *zbd_wq;
extern struct workqueue_struct *nvmet_wq;
static inline void nvmet_set_result(struct nvmet_req *req, u32 result) static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
{ {
......
...@@ -283,7 +283,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req) ...@@ -283,7 +283,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
if (req->p.use_workqueue || effects) { if (req->p.use_workqueue || effects) {
INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work); INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
req->p.rq = rq; req->p.rq = rq;
schedule_work(&req->p.work); queue_work(nvmet_wq, &req->p.work);
} else { } else {
rq->end_io_data = req; rq->end_io_data = req;
blk_execute_rq_nowait(rq, false, nvmet_passthru_req_done); blk_execute_rq_nowait(rq, false, nvmet_passthru_req_done);
......
...@@ -1584,7 +1584,7 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, ...@@ -1584,7 +1584,7 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
if (queue->host_qid == 0) { if (queue->host_qid == 0) {
/* Let inflight controller teardown complete */ /* Let inflight controller teardown complete */
flush_scheduled_work(); flush_workqueue(nvmet_wq);
} }
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
...@@ -1669,7 +1669,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) ...@@ -1669,7 +1669,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
if (disconnect) { if (disconnect) {
rdma_disconnect(queue->cm_id); rdma_disconnect(queue->cm_id);
schedule_work(&queue->release_work); queue_work(nvmet_wq, &queue->release_work);
} }
} }
...@@ -1699,7 +1699,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, ...@@ -1699,7 +1699,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
mutex_unlock(&nvmet_rdma_queue_mutex); mutex_unlock(&nvmet_rdma_queue_mutex);
pr_err("failed to connect queue %d\n", queue->idx); pr_err("failed to connect queue %d\n", queue->idx);
schedule_work(&queue->release_work); queue_work(nvmet_wq, &queue->release_work);
} }
/** /**
...@@ -1773,7 +1773,7 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, ...@@ -1773,7 +1773,7 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
if (!queue) { if (!queue) {
struct nvmet_rdma_port *port = cm_id->context; struct nvmet_rdma_port *port = cm_id->context;
schedule_delayed_work(&port->repair_work, 0); queue_delayed_work(nvmet_wq, &port->repair_work, 0);
break; break;
} }
fallthrough; fallthrough;
...@@ -1903,7 +1903,7 @@ static void nvmet_rdma_repair_port_work(struct work_struct *w) ...@@ -1903,7 +1903,7 @@ static void nvmet_rdma_repair_port_work(struct work_struct *w)
nvmet_rdma_disable_port(port); nvmet_rdma_disable_port(port);
ret = nvmet_rdma_enable_port(port); ret = nvmet_rdma_enable_port(port);
if (ret) if (ret)
schedule_delayed_work(&port->repair_work, 5 * HZ); queue_delayed_work(nvmet_wq, &port->repair_work, 5 * HZ);
} }
static int nvmet_rdma_add_port(struct nvmet_port *nport) static int nvmet_rdma_add_port(struct nvmet_port *nport)
...@@ -2053,7 +2053,7 @@ static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data ...@@ -2053,7 +2053,7 @@ static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data
} }
mutex_unlock(&nvmet_rdma_queue_mutex); mutex_unlock(&nvmet_rdma_queue_mutex);
flush_scheduled_work(); flush_workqueue(nvmet_wq);
} }
static struct ib_client nvmet_rdma_ib_client = { static struct ib_client nvmet_rdma_ib_client = {
......
...@@ -1269,7 +1269,7 @@ static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue) ...@@ -1269,7 +1269,7 @@ static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
spin_lock(&queue->state_lock); spin_lock(&queue->state_lock);
if (queue->state != NVMET_TCP_Q_DISCONNECTING) { if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
queue->state = NVMET_TCP_Q_DISCONNECTING; queue->state = NVMET_TCP_Q_DISCONNECTING;
schedule_work(&queue->release_work); queue_work(nvmet_wq, &queue->release_work);
} }
spin_unlock(&queue->state_lock); spin_unlock(&queue->state_lock);
} }
...@@ -1684,7 +1684,7 @@ static void nvmet_tcp_listen_data_ready(struct sock *sk) ...@@ -1684,7 +1684,7 @@ static void nvmet_tcp_listen_data_ready(struct sock *sk)
goto out; goto out;
if (sk->sk_state == TCP_LISTEN) if (sk->sk_state == TCP_LISTEN)
schedule_work(&port->accept_work); queue_work(nvmet_wq, &port->accept_work);
out: out:
read_unlock_bh(&sk->sk_callback_lock); read_unlock_bh(&sk->sk_callback_lock);
} }
...@@ -1815,7 +1815,7 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq) ...@@ -1815,7 +1815,7 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
if (sq->qid == 0) { if (sq->qid == 0) {
/* Let inflight controller teardown complete */ /* Let inflight controller teardown complete */
flush_scheduled_work(); flush_workqueue(nvmet_wq);
} }
queue->nr_cmds = sq->size * 2; queue->nr_cmds = sq->size * 2;
...@@ -1876,12 +1876,12 @@ static void __exit nvmet_tcp_exit(void) ...@@ -1876,12 +1876,12 @@ static void __exit nvmet_tcp_exit(void)
nvmet_unregister_transport(&nvmet_tcp_ops); nvmet_unregister_transport(&nvmet_tcp_ops);
flush_scheduled_work(); flush_workqueue(nvmet_wq);
mutex_lock(&nvmet_tcp_queue_mutex); mutex_lock(&nvmet_tcp_queue_mutex);
list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
kernel_sock_shutdown(queue->sock, SHUT_RDWR); kernel_sock_shutdown(queue->sock, SHUT_RDWR);
mutex_unlock(&nvmet_tcp_queue_mutex); mutex_unlock(&nvmet_tcp_queue_mutex);
flush_scheduled_work(); flush_workqueue(nvmet_wq);
destroy_workqueue(nvmet_tcp_wq); destroy_workqueue(nvmet_tcp_wq);
} }
......
...@@ -346,6 +346,7 @@ enum { ...@@ -346,6 +346,7 @@ enum {
NVME_CTRL_ONCS_TIMESTAMP = 1 << 6, NVME_CTRL_ONCS_TIMESTAMP = 1 << 6,
NVME_CTRL_VWC_PRESENT = 1 << 0, NVME_CTRL_VWC_PRESENT = 1 << 0,
NVME_CTRL_OACS_SEC_SUPP = 1 << 0, NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
NVME_CTRL_OACS_NS_MNGT_SUPP = 1 << 3,
NVME_CTRL_OACS_DIRECTIVES = 1 << 5, NVME_CTRL_OACS_DIRECTIVES = 1 << 5,
NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8, NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8,
NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1, NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1,
......
...@@ -45,7 +45,7 @@ struct loop_info { ...@@ -45,7 +45,7 @@ struct loop_info {
unsigned long lo_inode; /* ioctl r/o */ unsigned long lo_inode; /* ioctl r/o */
__kernel_old_dev_t lo_rdevice; /* ioctl r/o */ __kernel_old_dev_t lo_rdevice; /* ioctl r/o */
int lo_offset; int lo_offset;
int lo_encrypt_type; int lo_encrypt_type; /* obsolete, ignored */
int lo_encrypt_key_size; /* ioctl w/o */ int lo_encrypt_key_size; /* ioctl w/o */
int lo_flags; int lo_flags;
char lo_name[LO_NAME_SIZE]; char lo_name[LO_NAME_SIZE];
...@@ -61,7 +61,7 @@ struct loop_info64 { ...@@ -61,7 +61,7 @@ struct loop_info64 {
__u64 lo_offset; __u64 lo_offset;
__u64 lo_sizelimit;/* bytes, 0 == max available */ __u64 lo_sizelimit;/* bytes, 0 == max available */
__u32 lo_number; /* ioctl r/o */ __u32 lo_number; /* ioctl r/o */
__u32 lo_encrypt_type; __u32 lo_encrypt_type; /* obsolete, ignored */
__u32 lo_encrypt_key_size; /* ioctl w/o */ __u32 lo_encrypt_key_size; /* ioctl w/o */
__u32 lo_flags; __u32 lo_flags;
__u8 lo_file_name[LO_NAME_SIZE]; __u8 lo_file_name[LO_NAME_SIZE];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment