Commit e63c8eb1 authored by Jens Axboe's avatar Jens Axboe

Merge tag 'nvme-5.13-2021-04-15' of git://git.infradead.org/nvme into for-5.13/drivers

Pull NVMe updates from Christoph:

"nvme updates for Linux 5.13

 - refactor the ioctl code
 - fix a segmentation fault during io parsing error in nvmet-tcp
   (Elad Grupi)
 - fix NULL derefence in nvme_ctrl_fast_io_fail_tmo_show/store
   (Gopal Tiwari)
 - properly respect the sgl_threshold flag in nvme-pci (Niklas Cassel)
 - misc cleanups (Niklas Cassel, Amit Engel, Minwoo Im, Colin Ian King)"

* tag 'nvme-5.13-2021-04-15' of git://git.infradead.org/nvme:
  nvme: fix NULL derefence in nvme_ctrl_fast_io_fail_tmo_show/store
  nvme: let namespace probing continue for unsupported features
  nvme: factor out nvme_ns_open and nvme_ns_release helpers
  nvme: move nvme_ns_head_ops to multipath.c
  nvme: factor out a nvme_tryget_ns_head helper
  nvme: move the ioctl code to a separate file
  nvme: don't bother to look up a namespace for controller ioctls
  nvme: simplify block device ioctl handling for the !multipath case
  nvme: simplify the compat ioctl handling
  nvme: factor out a nvme_ns_ioctl helper
  nvme: pass a user pointer to nvme_nvm_ioctl
  nvme: cleanup setting the disk name
  nvme: add a nvme_ns_head_multipath helper
  nvme: remove single trailing whitespace
  nvme-multipath: remove single trailing whitespace
  nvme-pci: remove single trailing whitespace
  nvme-pci: don't simple map sgl when sgls are disabled
  nvmet: fix a spelling mistake "nubmer" -> "number"
  nvmet-fc: simplify nvmet_fc_alloc_hostport
  nvmet-tcp: fix a segmentation fault during io parsing error
parents f8ee34a9 d6609084
......@@ -9,7 +9,7 @@ obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o
obj-$(CONFIG_NVME_FC) += nvme-fc.o
obj-$(CONFIG_NVME_TCP) += nvme-tcp.o
nvme-core-y := core.o
nvme-core-y := core.o ioctl.o
nvme-core-$(CONFIG_TRACING) += trace.o
nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
nvme-core-$(CONFIG_NVM) += lightnvm.o
......
This diff is collapsed.
This diff is collapsed.
......@@ -930,15 +930,15 @@ static int nvme_nvm_user_vcmd(struct nvme_ns *ns, int admin,
return ret;
}
int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg)
int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *argp)
{
switch (cmd) {
case NVME_NVM_IOCTL_ADMIN_VIO:
return nvme_nvm_user_vcmd(ns, 1, (void __user *)arg);
return nvme_nvm_user_vcmd(ns, 1, argp);
case NVME_NVM_IOCTL_IO_VIO:
return nvme_nvm_user_vcmd(ns, 0, (void __user *)arg);
return nvme_nvm_user_vcmd(ns, 0, argp);
case NVME_NVM_IOCTL_SUBMIT_VIO:
return nvme_nvm_submit_vio(ns, (void __user *)arg);
return nvme_nvm_submit_vio(ns, argp);
default:
return -ENOTTY;
}
......
......@@ -50,19 +50,19 @@ void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
* and those that have a single controller and use the controller node
* directly.
*/
void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
struct nvme_ctrl *ctrl, int *flags)
{
if (!multipath) {
sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
} else if (ns->head->disk) {
sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
ctrl->instance, ns->head->instance);
*flags = GENHD_FL_HIDDEN;
} else {
sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
ns->head->instance);
bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name, int *flags)
{
if (!multipath)
return false;
if (!ns->head->disk) {
sprintf(disk_name, "nvme%dn%d", ns->ctrl->subsys->instance,
ns->head->instance);
return true;
}
sprintf(disk_name, "nvme%dc%dn%d", ns->ctrl->subsys->instance,
ns->ctrl->instance, ns->head->instance);
*flags = GENHD_FL_HIDDEN;
return true;
}
void nvme_failover_req(struct request *req)
......@@ -294,7 +294,7 @@ static bool nvme_available_path(struct nvme_ns_head *head)
return false;
}
blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
static blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
{
struct nvme_ns_head *head = bio->bi_bdev->bd_disk->private_data;
struct device *dev = disk_to_dev(head->disk);
......@@ -334,6 +334,29 @@ blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
return ret;
}
static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode)
{
if (!nvme_tryget_ns_head(bdev->bd_disk->private_data))
return -ENXIO;
return 0;
}
static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode)
{
nvme_put_ns_head(disk->private_data);
}
const struct block_device_operations nvme_ns_head_ops = {
.owner = THIS_MODULE,
.submit_bio = nvme_ns_head_submit_bio,
.open = nvme_ns_head_open,
.release = nvme_ns_head_release,
.ioctl = nvme_ns_head_ioctl,
.getgeo = nvme_getgeo,
.report_zones = nvme_report_zones,
.pr_ops = &nvme_pr_ops,
};
static void nvme_requeue_work(struct work_struct *work)
{
struct nvme_ns_head *head =
......@@ -674,7 +697,7 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
queue_work(nvme_wq, &ns->ctrl->ana_work);
}
} else {
ns->ana_state = NVME_ANA_OPTIMIZED;
ns->ana_state = NVME_ANA_OPTIMIZED;
nvme_mpath_set_live(ns);
}
......
......@@ -413,8 +413,8 @@ struct nvme_ns_head {
bool shared;
int instance;
struct nvme_effects_log *effects;
#ifdef CONFIG_NVME_MULTIPATH
struct gendisk *disk;
#ifdef CONFIG_NVME_MULTIPATH
struct bio_list requeue_list;
spinlock_t requeue_lock;
struct work_struct requeue_work;
......@@ -425,6 +425,11 @@ struct nvme_ns_head {
#endif
};
static inline bool nvme_ns_head_multipath(struct nvme_ns_head *head)
{
return IS_ENABLED(CONFIG_NVME_MULTIPATH) && head->disk;
}
enum nvme_ns_features {
NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */
NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */
......@@ -642,16 +647,28 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
void nvme_queue_scan(struct nvme_ctrl *ctrl);
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
void *log, size_t size, u64 offset);
struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
struct nvme_ns_head **head, int *srcu_idx);
void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx);
bool nvme_tryget_ns_head(struct nvme_ns_head *head);
void nvme_put_ns_head(struct nvme_ns_head *head);
struct nvme_ctrl *nvme_find_get_live_ctrl(struct nvme_subsystem *subsys);
int nvme_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
long nvme_dev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo);
extern const struct attribute_group *nvme_ns_id_attr_groups[];
extern const struct pr_ops nvme_pr_ops;
extern const struct block_device_operations nvme_ns_head_ops;
#ifdef CONFIG_NVME_MULTIPATH
......@@ -663,8 +680,7 @@ static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
struct nvme_ctrl *ctrl, int *flags);
bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name, int *flags);
void nvme_failover_req(struct request *req);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
......@@ -676,7 +692,6 @@ void nvme_mpath_stop(struct nvme_ctrl *ctrl);
bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
blk_qc_t nvme_ns_head_submit_bio(struct bio *bio);
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
{
......@@ -703,16 +718,11 @@ static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
{
return false;
}
/*
* Without the multipath code enabled, multiple controller per subsystems are
* visible as devices and thus we cannot use the subsystem instance.
*/
static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
struct nvme_ctrl *ctrl, int *flags)
static inline bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name,
int *flags)
{
sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
return false;
}
static inline void nvme_failover_req(struct request *req)
{
}
......@@ -800,7 +810,7 @@ static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
void nvme_nvm_unregister(struct nvme_ns *ns);
extern const struct attribute_group nvme_nvm_attr_group;
int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg);
int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *argp);
#else
static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
int node)
......@@ -810,7 +820,7 @@ static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd,
unsigned long arg)
void __user *argp)
{
return -ENOTTY;
}
......
......@@ -854,7 +854,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
return nvme_setup_prp_simple(dev, req,
&cmnd->rw, &bv);
if (iod->nvmeq->qid &&
if (iod->nvmeq->qid && sgl_threshold &&
dev->ctrl.sgls & ((1 << 0) | (1 << 1)))
return nvme_setup_sgl_simple(dev, req,
&cmnd->rw, &bv);
......@@ -2172,7 +2172,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (nr_io_queues == 0)
return 0;
clear_bit(NVMEQ_ENABLED, &adminq->flags);
if (dev->cmb_use_sqes) {
......
......@@ -96,7 +96,7 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
dev_warn(ns->ctrl->device,
"zone operations:%x not supported for namespace:%u\n",
le16_to_cpu(id->zoc), ns->head->ns_id);
status = -EINVAL;
status = -ENODEV;
goto free_data;
}
......@@ -105,7 +105,7 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
dev_warn(ns->ctrl->device,
"invalid zone size:%llu for namespace:%u\n",
ns->zsze, ns->head->ns_id);
status = -EINVAL;
status = -ENODEV;
goto free_data;
}
......
......@@ -1150,7 +1150,7 @@ static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
return -EINVAL;
if (len > NVMET_MN_MAX_SIZE) {
pr_err("Model nubmer size can not exceed %d Bytes\n",
pr_err("Model number size can not exceed %d Bytes\n",
NVMET_MN_MAX_SIZE);
return -EINVAL;
}
......
......@@ -1020,61 +1020,76 @@ nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport)
nvmet_fc_hostport_put(hostport);
}
static struct nvmet_fc_hostport *
nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
{
struct nvmet_fc_hostport *host;
lockdep_assert_held(&tgtport->lock);
list_for_each_entry(host, &tgtport->host_list, host_list) {
if (host->hosthandle == hosthandle && !host->invalid) {
if (nvmet_fc_hostport_get(host))
return (host);
}
}
return NULL;
}
static struct nvmet_fc_hostport *
nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
{
struct nvmet_fc_hostport *newhost, *host, *match = NULL;
struct nvmet_fc_hostport *newhost, *match = NULL;
unsigned long flags;
/* if LLDD not implemented, leave as NULL */
if (!hosthandle)
return NULL;
/* take reference for what will be the newly allocated hostport */
/*
* take reference for what will be the newly allocated hostport if
* we end up using a new allocation
*/
if (!nvmet_fc_tgtport_get(tgtport))
return ERR_PTR(-EINVAL);
spin_lock_irqsave(&tgtport->lock, flags);
match = nvmet_fc_match_hostport(tgtport, hosthandle);
spin_unlock_irqrestore(&tgtport->lock, flags);
if (match) {
/* no new allocation - release reference */
nvmet_fc_tgtport_put(tgtport);
return match;
}
newhost = kzalloc(sizeof(*newhost), GFP_KERNEL);
if (!newhost) {
spin_lock_irqsave(&tgtport->lock, flags);
list_for_each_entry(host, &tgtport->host_list, host_list) {
if (host->hosthandle == hosthandle && !host->invalid) {
if (nvmet_fc_hostport_get(host)) {
match = host;
break;
}
}
}
spin_unlock_irqrestore(&tgtport->lock, flags);
/* no allocation - release reference */
/* no new allocation - release reference */
nvmet_fc_tgtport_put(tgtport);
return (match) ? match : ERR_PTR(-ENOMEM);
return ERR_PTR(-ENOMEM);
}
newhost->tgtport = tgtport;
newhost->hosthandle = hosthandle;
INIT_LIST_HEAD(&newhost->host_list);
kref_init(&newhost->ref);
spin_lock_irqsave(&tgtport->lock, flags);
list_for_each_entry(host, &tgtport->host_list, host_list) {
if (host->hosthandle == hosthandle && !host->invalid) {
if (nvmet_fc_hostport_get(host)) {
match = host;
break;
}
}
}
match = nvmet_fc_match_hostport(tgtport, hosthandle);
if (match) {
/* new allocation not needed */
kfree(newhost);
newhost = NULL;
/* releasing allocation - release reference */
newhost = match;
/* no new allocation - release reference */
nvmet_fc_tgtport_put(tgtport);
} else
} else {
newhost->tgtport = tgtport;
newhost->hosthandle = hosthandle;
INIT_LIST_HEAD(&newhost->host_list);
kref_init(&newhost->ref);
list_add_tail(&newhost->host_list, &tgtport->host_list);
}
spin_unlock_irqrestore(&tgtport->lock, flags);
return (match) ? match : newhost;
return newhost;
}
static void
......
......@@ -537,11 +537,36 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
struct nvmet_tcp_cmd *cmd =
container_of(req, struct nvmet_tcp_cmd, req);
struct nvmet_tcp_queue *queue = cmd->queue;
struct nvme_sgl_desc *sgl;
u32 len;
if (unlikely(cmd == queue->cmd)) {
sgl = &cmd->req.cmd->common.dptr.sgl;
len = le32_to_cpu(sgl->length);
/*
* Wait for inline data before processing the response.
* Avoid using helpers, this might happen before
* nvmet_req_init is completed.
*/
if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
len && len < cmd->req.port->inline_data_size &&
nvme_is_write(cmd->req.cmd))
return;
}
llist_add(&cmd->lentry, &queue->resp_list);
queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
}
static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
{
if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
nvmet_tcp_queue_response(&cmd->req);
else
cmd->req.execute(&cmd->req);
}
static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
{
u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
......@@ -973,7 +998,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
le32_to_cpu(req->cmd->common.dptr.sgl.length));
nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
return -EAGAIN;
return 0;
}
ret = nvmet_tcp_map_data(queue->cmd);
......@@ -1116,10 +1141,8 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
}
nvmet_tcp_unmap_pdu_iovec(cmd);
if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
cmd->rbytes_done == cmd->req.transfer_len) {
cmd->req.execute(&cmd->req);
}
if (cmd->rbytes_done == cmd->req.transfer_len)
nvmet_tcp_execute_request(cmd);
nvmet_prepare_receive_pdu(queue);
return 0;
......@@ -1156,9 +1179,9 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
goto out;
}
if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
cmd->rbytes_done == cmd->req.transfer_len)
cmd->req.execute(&cmd->req);
if (cmd->rbytes_done == cmd->req.transfer_len)
nvmet_tcp_execute_request(cmd);
ret = 0;
out:
nvmet_prepare_receive_pdu(queue);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment