Commit 47454caf authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-5.12-2021-03-05' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

 - NVMe fixes:
      - more device quirks (Julian Einwag, Zoltán Böszörményi, Pascal
        Terjan)
      - fix a hwmon error return (Daniel Wagner)
      - fix the keep alive timeout initialization (Martin George)
      - ensure the model_number can't be changed on a used subsystem
        (Max Gurtovoy)

 - rsxx missing -EFAULT on copy_to_user() failure (Dan)

 - rsxx remove unused linux.h include (Tian)

 - kill unused RQF_SORTED (Jean)

 - updated outdated BFQ comments (Joseph)

 - revert work-around commit for bd_size_lock, since we removed the
   offending user in this merge window (Damien)

* tag 'block-5.12-2021-03-05' of git://git.kernel.dk/linux-block:
  nvmet: model_number must be immutable once set
  nvme-fabrics: fix kato initialization
  nvme-hwmon: Return error code when registration fails
  nvme-pci: add quirks for Lexar 256GB SSD
  nvme-pci: mark Kingston SKC2000 as not supporting the deepest power state
  nvme-pci: mark Seagate Nytro XM1440 as QUIRK_NO_NS_DESC_LIST.
  rsxx: Return -EFAULT if copy_to_user() fails
  block/bfq: update comments and default value in docs for fifo_expire
  rsxx: remove unused including <linux/version.h>
  block: Drop leftover references to RQF_SORTED
  block: revert "block: fix bd_size_lock use"
parents f292e873 a2b658e4
......@@ -430,13 +430,13 @@ fifo_expire_async
-----------------
This parameter is used to set the timeout of asynchronous requests. Default
value of this is 248ms.
value of this is 250ms.
fifo_expire_sync
----------------
This parameter is used to set the timeout of synchronous requests. Default
value of this is 124ms. In case to favor synchronous requests over asynchronous
value of this is 125ms. In case to favor synchronous requests over asynchronous
one, this value should be decreased relative to fifo_expire_async.
low_latency
......
......@@ -162,7 +162,7 @@ BFQ_BFQQ_FNS(split_coop);
BFQ_BFQQ_FNS(softrt_update);
#undef BFQ_BFQQ_FNS \
/* Expiration time of sync (0) and async (1) requests, in ns. */
/* Expiration time of async (0) and sync (1) requests, in ns. */
static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
/* Maximum backwards seek (magic number lifted from CFQ), in KiB. */
......
......@@ -292,7 +292,6 @@ static const char *const cmd_flag_name[] = {
#define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
static const char *const rqf_name[] = {
RQF_NAME(SORTED),
RQF_NAME(STARTED),
RQF_NAME(SOFTBARRIER),
RQF_NAME(FLUSH_SEQ),
......
......@@ -385,7 +385,6 @@ bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
bool has_sched,
struct request *rq)
{
/*
......@@ -402,9 +401,6 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq))
return true;
if (has_sched)
rq->rq_flags |= RQF_SORTED;
return false;
}
......@@ -418,7 +414,7 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
WARN_ON(e && (rq->tag != BLK_MQ_NO_TAG));
if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
if (blk_mq_sched_bypass_insert(hctx, rq)) {
/*
* Firstly normal IO request is inserted to scheduler queue or
* sw queue, meantime we add flush request to dispatch queue(
......
......@@ -45,11 +45,10 @@ static void disk_release_events(struct gendisk *disk);
void set_capacity(struct gendisk *disk, sector_t sectors)
{
struct block_device *bdev = disk->part0;
unsigned long flags;
spin_lock_irqsave(&bdev->bd_size_lock, flags);
spin_lock(&bdev->bd_size_lock);
i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
spin_unlock_irqrestore(&bdev->bd_size_lock, flags);
spin_unlock(&bdev->bd_size_lock);
}
EXPORT_SYMBOL(set_capacity);
......
......@@ -88,11 +88,9 @@ static int (*check_part[])(struct parsed_partitions *) = {
static void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
{
unsigned long flags;
spin_lock_irqsave(&bdev->bd_size_lock, flags);
spin_lock(&bdev->bd_size_lock);
i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
spin_unlock_irqrestore(&bdev->bd_size_lock, flags);
spin_unlock(&bdev->bd_size_lock);
}
static struct parsed_partitions *allocate_partitions(struct gendisk *hd)
......
......@@ -165,15 +165,17 @@ static ssize_t rsxx_cram_read(struct file *fp, char __user *ubuf,
{
struct rsxx_cardinfo *card = file_inode(fp)->i_private;
char *buf;
ssize_t st;
int st;
buf = kzalloc(cnt, GFP_KERNEL);
if (!buf)
return -ENOMEM;
st = rsxx_creg_read(card, CREG_ADD_CRAM + (u32)*ppos, cnt, buf, 1);
if (!st)
st = copy_to_user(ubuf, buf, cnt);
if (!st) {
if (copy_to_user(ubuf, buf, cnt))
st = -EFAULT;
}
kfree(buf);
if (st)
return st;
......
......@@ -11,7 +11,6 @@
#ifndef __RSXX_PRIV_H__
#define __RSXX_PRIV_H__
#include <linux/version.h>
#include <linux/semaphore.h>
#include <linux/fs.h>
......
......@@ -630,7 +630,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
opts->queue_size = NVMF_DEF_QUEUE_SIZE;
opts->nr_io_queues = num_online_cpus();
opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY;
opts->kato = NVME_DEFAULT_KATO;
opts->kato = 0;
opts->duplicate_connect = false;
opts->fast_io_fail_tmo = NVMF_DEF_FAIL_FAST_TMO;
opts->hdr_digest = false;
......@@ -893,6 +893,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
opts->nr_write_queues = 0;
opts->nr_poll_queues = 0;
opts->duplicate_connect = true;
} else {
if (!opts->kato)
opts->kato = NVME_DEFAULT_KATO;
}
if (ctrl_loss_tmo < 0) {
opts->max_reconnects = -1;
......
......@@ -248,6 +248,7 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl)
if (IS_ERR(hwmon)) {
dev_warn(dev, "Failed to instantiate hwmon device\n");
kfree(data);
return PTR_ERR(hwmon);
}
ctrl->hwmon_device = hwmon;
return 0;
......
......@@ -3234,7 +3234,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
{ PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
NVME_QUIRK_NO_NS_DESC_LIST, },
{ PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */
......@@ -3248,6 +3249,9 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */
.driver_data = NVME_QUIRK_LIGHTNVM, },
{ PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */
......@@ -3265,6 +3269,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x1d97, 0x2263), /* SPCC */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x2646, 0x2262), /* KINGSTON SKC2000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
......
......@@ -313,27 +313,40 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
}
static void nvmet_id_set_model_number(struct nvme_id_ctrl *id,
struct nvmet_subsys *subsys)
static u16 nvmet_set_model_number(struct nvmet_subsys *subsys)
{
const char *model = NVMET_DEFAULT_CTRL_MODEL;
struct nvmet_subsys_model *subsys_model;
u16 status = 0;
rcu_read_lock();
subsys_model = rcu_dereference(subsys->model);
if (subsys_model)
model = subsys_model->number;
memcpy_and_pad(id->mn, sizeof(id->mn), model, strlen(model), ' ');
rcu_read_unlock();
mutex_lock(&subsys->lock);
if (!subsys->model_number) {
subsys->model_number =
kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL);
if (!subsys->model_number)
status = NVME_SC_INTERNAL;
}
mutex_unlock(&subsys->lock);
return status;
}
static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvmet_subsys *subsys = ctrl->subsys;
struct nvme_id_ctrl *id;
u32 cmd_capsule_size;
u16 status = 0;
/*
* If there is no model number yet, set it now. It will then remain
* stable for the life time of the subsystem.
*/
if (!subsys->model_number) {
status = nvmet_set_model_number(subsys);
if (status)
goto out;
}
id = kzalloc(sizeof(*id), GFP_KERNEL);
if (!id) {
status = NVME_SC_INTERNAL;
......@@ -347,7 +360,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
memset(id->sn, ' ', sizeof(id->sn));
bin2hex(id->sn, &ctrl->subsys->serial,
min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
nvmet_id_set_model_number(id, ctrl->subsys);
memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
strlen(subsys->model_number), ' ');
memcpy_and_pad(id->fr, sizeof(id->fr),
UTS_RELEASE, strlen(UTS_RELEASE), ' ');
......
......@@ -1118,16 +1118,12 @@ static ssize_t nvmet_subsys_attr_model_show(struct config_item *item,
char *page)
{
struct nvmet_subsys *subsys = to_subsys(item);
struct nvmet_subsys_model *subsys_model;
char *model = NVMET_DEFAULT_CTRL_MODEL;
int ret;
rcu_read_lock();
subsys_model = rcu_dereference(subsys->model);
if (subsys_model)
model = subsys_model->number;
ret = snprintf(page, PAGE_SIZE, "%s\n", model);
rcu_read_unlock();
mutex_lock(&subsys->lock);
ret = snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number ?
subsys->model_number : NVMET_DEFAULT_CTRL_MODEL);
mutex_unlock(&subsys->lock);
return ret;
}
......@@ -1138,14 +1134,17 @@ static bool nvmet_is_ascii(const char c)
return c >= 0x20 && c <= 0x7e;
}
static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
const char *page, size_t count)
{
struct nvmet_subsys *subsys = to_subsys(item);
struct nvmet_subsys_model *new_model;
char *new_model_number;
int pos = 0, len;
if (subsys->model_number) {
pr_err("Can't set model number. %s is already assigned\n",
subsys->model_number);
return -EINVAL;
}
len = strcspn(page, "\n");
if (!len)
return -EINVAL;
......@@ -1155,28 +1154,25 @@ static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
return -EINVAL;
}
new_model_number = kmemdup_nul(page, len, GFP_KERNEL);
if (!new_model_number)
subsys->model_number = kmemdup_nul(page, len, GFP_KERNEL);
if (!subsys->model_number)
return -ENOMEM;
return count;
}
new_model = kzalloc(sizeof(*new_model) + len + 1, GFP_KERNEL);
if (!new_model) {
kfree(new_model_number);
return -ENOMEM;
}
memcpy(new_model->number, new_model_number, len);
static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
const char *page, size_t count)
{
struct nvmet_subsys *subsys = to_subsys(item);
ssize_t ret;
down_write(&nvmet_config_sem);
mutex_lock(&subsys->lock);
new_model = rcu_replace_pointer(subsys->model, new_model,
mutex_is_locked(&subsys->lock));
ret = nvmet_subsys_attr_model_store_locked(subsys, page, count);
mutex_unlock(&subsys->lock);
up_write(&nvmet_config_sem);
kfree_rcu(new_model, rcuhead);
kfree(new_model_number);
return count;
return ret;
}
CONFIGFS_ATTR(nvmet_subsys_, attr_model);
......
......@@ -1532,7 +1532,7 @@ static void nvmet_subsys_free(struct kref *ref)
nvmet_passthru_subsys_free(subsys);
kfree(subsys->subsysnqn);
kfree_rcu(subsys->model, rcuhead);
kfree(subsys->model_number);
kfree(subsys);
}
......
......@@ -208,11 +208,6 @@ struct nvmet_ctrl {
bool pi_support;
};
struct nvmet_subsys_model {
struct rcu_head rcuhead;
char number[];
};
struct nvmet_subsys {
enum nvme_subsys_type type;
......@@ -242,7 +237,7 @@ struct nvmet_subsys {
struct config_group namespaces_group;
struct config_group allowed_hosts_group;
struct nvmet_subsys_model __rcu *model;
char *model_number;
#ifdef CONFIG_NVME_TARGET_PASSTHRU
struct nvme_ctrl *passthru_ctrl;
......
......@@ -65,8 +65,6 @@ typedef void (rq_end_io_fn)(struct request *, blk_status_t);
* request flags */
typedef __u32 __bitwise req_flags_t;
/* elevator knows about this request */
#define RQF_SORTED ((__force req_flags_t)(1 << 0))
/* drive already may have started this one */
#define RQF_STARTED ((__force req_flags_t)(1 << 1))
/* may not be passed by ioscheduler */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment