Commit 462abc9d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-5.19-2022-06-16' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

 - NVMe pull request from Christoph
      - Quirks, quirks, quirks to work around buggy consumer grade
        devices (Keith Bush, Ning Wang, Stefan Reiter, Rasheed Hsueh)
      - Better kernel messages for devices that need quirking (Keith
        Bush)
      - Make a kernel message more useful (Thomas Weißschuh)

 - MD pull request from Song, with a few fixes

 - blk-mq sysfs locking fixes (Ming)

 - BFQ stats fix (Bart)

 - blk-mq offline queue fix (Bart)

 - blk-mq flush request tag fix (Ming)

* tag 'block-5.19-2022-06-16' of git://git.kernel.dk/linux-block:
  block/bfq: Enable I/O statistics
  blk-mq: don't clear flush_rq from tags->rqs[]
  blk-mq: avoid to touch q->elevator without any protection
  blk-mq: protect q->elevator by ->sysfs_lock in blk_mq_elv_switch_none
  block: Fix handling of offline queues in blk_mq_alloc_request_hctx()
  md/raid5-ppl: Fix argument order in bio_alloc_bioset()
  Revert "md: don't unregister sync_thread with reconfig_mutex held"
  nvme-pci: disable write zeros support on UMIC and Samsung SSDs
  nvme-pci: avoid the deepest sleep state on ZHITAI TiPro7000 SSDs
  nvme-pci: sk hynix p31 has bogus namespace ids
  nvme-pci: smi has bogus namespace ids
  nvme-pci: phison e12 has bogus namespace ids
  nvme-pci: add NVME_QUIRK_BOGUS_NID for ADATA XPG GAMMIX S50
  nvme-pci: add trouble shooting steps for timeouts
  nvme: add bug report info for global duplicate id
  nvme: add device name to warning in uuid_show()
parents f8e174c3 b96f3cab
...@@ -7046,6 +7046,7 @@ static void bfq_exit_queue(struct elevator_queue *e) ...@@ -7046,6 +7046,7 @@ static void bfq_exit_queue(struct elevator_queue *e)
spin_unlock_irq(&bfqd->lock); spin_unlock_irq(&bfqd->lock);
#endif #endif
blk_stat_disable_accounting(bfqd->queue);
wbt_enable_default(bfqd->queue); wbt_enable_default(bfqd->queue);
kfree(bfqd); kfree(bfqd);
...@@ -7188,7 +7189,12 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) ...@@ -7188,7 +7189,12 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
bfq_init_root_group(bfqd->root_group, bfqd); bfq_init_root_group(bfqd->root_group, bfqd);
bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group); bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
/* We dispatch from request queue wide instead of hw queue */
blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
wbt_disable_default(q); wbt_disable_default(q);
blk_stat_enable_accounting(q);
return 0; return 0;
out_free: out_free:
......
...@@ -564,6 +564,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) ...@@ -564,6 +564,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
int ret; int ret;
if (!e) { if (!e) {
blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
q->elevator = NULL; q->elevator = NULL;
q->nr_requests = q->tag_set->queue_depth; q->nr_requests = q->tag_set->queue_depth;
return 0; return 0;
......
...@@ -579,6 +579,8 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, ...@@ -579,6 +579,8 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
if (!blk_mq_hw_queue_mapped(data.hctx)) if (!blk_mq_hw_queue_mapped(data.hctx))
goto out_queue_exit; goto out_queue_exit;
cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask); cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
if (cpu >= nr_cpu_ids)
goto out_queue_exit;
data.ctx = __blk_mq_get_ctx(q, cpu); data.ctx = __blk_mq_get_ctx(q, cpu);
if (!q->elevator) if (!q->elevator)
...@@ -2140,20 +2142,6 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) ...@@ -2140,20 +2142,6 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
} }
EXPORT_SYMBOL(blk_mq_run_hw_queue); EXPORT_SYMBOL(blk_mq_run_hw_queue);
/*
* Is the request queue handled by an IO scheduler that does not respect
* hardware queues when dispatching?
*/
static bool blk_mq_has_sqsched(struct request_queue *q)
{
struct elevator_queue *e = q->elevator;
if (e && e->type->ops.dispatch_request &&
!(e->type->elevator_features & ELEVATOR_F_MQ_AWARE))
return true;
return false;
}
/* /*
* Return prefered queue to dispatch from (if any) for non-mq aware IO * Return prefered queue to dispatch from (if any) for non-mq aware IO
* scheduler. * scheduler.
...@@ -2186,7 +2174,7 @@ void blk_mq_run_hw_queues(struct request_queue *q, bool async) ...@@ -2186,7 +2174,7 @@ void blk_mq_run_hw_queues(struct request_queue *q, bool async)
unsigned long i; unsigned long i;
sq_hctx = NULL; sq_hctx = NULL;
if (blk_mq_has_sqsched(q)) if (blk_queue_sq_sched(q))
sq_hctx = blk_mq_get_sq_hctx(q); sq_hctx = blk_mq_get_sq_hctx(q);
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
if (blk_mq_hctx_stopped(hctx)) if (blk_mq_hctx_stopped(hctx))
...@@ -2214,7 +2202,7 @@ void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs) ...@@ -2214,7 +2202,7 @@ void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
unsigned long i; unsigned long i;
sq_hctx = NULL; sq_hctx = NULL;
if (blk_mq_has_sqsched(q)) if (blk_queue_sq_sched(q))
sq_hctx = blk_mq_get_sq_hctx(q); sq_hctx = blk_mq_get_sq_hctx(q);
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
if (blk_mq_hctx_stopped(hctx)) if (blk_mq_hctx_stopped(hctx))
...@@ -3443,8 +3431,9 @@ static void blk_mq_exit_hctx(struct request_queue *q, ...@@ -3443,8 +3431,9 @@ static void blk_mq_exit_hctx(struct request_queue *q,
if (blk_mq_hw_queue_mapped(hctx)) if (blk_mq_hw_queue_mapped(hctx))
blk_mq_tag_idle(hctx); blk_mq_tag_idle(hctx);
blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx], if (blk_queue_init_done(q))
set->queue_depth, flush_rq); blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
set->queue_depth, flush_rq);
if (set->ops->exit_request) if (set->ops->exit_request)
set->ops->exit_request(set, flush_rq, hctx_idx); set->ops->exit_request(set, flush_rq, hctx_idx);
...@@ -4438,12 +4427,14 @@ static bool blk_mq_elv_switch_none(struct list_head *head, ...@@ -4438,12 +4427,14 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
if (!qe) if (!qe)
return false; return false;
/* q->elevator needs protection from ->sysfs_lock */
mutex_lock(&q->sysfs_lock);
INIT_LIST_HEAD(&qe->node); INIT_LIST_HEAD(&qe->node);
qe->q = q; qe->q = q;
qe->type = q->elevator->type; qe->type = q->elevator->type;
list_add(&qe->node, head); list_add(&qe->node, head);
mutex_lock(&q->sysfs_lock);
/* /*
* After elevator_switch_mq, the previous elevator_queue will be * After elevator_switch_mq, the previous elevator_queue will be
* released by elevator_release. The reference of the io scheduler * released by elevator_release. The reference of the io scheduler
......
...@@ -421,6 +421,8 @@ static int kyber_init_sched(struct request_queue *q, struct elevator_type *e) ...@@ -421,6 +421,8 @@ static int kyber_init_sched(struct request_queue *q, struct elevator_type *e)
blk_stat_enable_accounting(q); blk_stat_enable_accounting(q);
blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
eq->elevator_data = kqd; eq->elevator_data = kqd;
q->elevator = eq; q->elevator = eq;
...@@ -1033,7 +1035,6 @@ static struct elevator_type kyber_sched = { ...@@ -1033,7 +1035,6 @@ static struct elevator_type kyber_sched = {
#endif #endif
.elevator_attrs = kyber_sched_attrs, .elevator_attrs = kyber_sched_attrs,
.elevator_name = "kyber", .elevator_name = "kyber",
.elevator_features = ELEVATOR_F_MQ_AWARE,
.elevator_owner = THIS_MODULE, .elevator_owner = THIS_MODULE,
}; };
......
...@@ -642,6 +642,9 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e) ...@@ -642,6 +642,9 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
spin_lock_init(&dd->lock); spin_lock_init(&dd->lock);
spin_lock_init(&dd->zone_lock); spin_lock_init(&dd->zone_lock);
/* We dispatch from request queue wide instead of hw queue */
blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
q->elevator = eq; q->elevator = eq;
return 0; return 0;
......
...@@ -3725,7 +3725,7 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv, ...@@ -3725,7 +3725,7 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) { if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
if (mddev->sync_thread) { if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev, false); md_reap_sync_thread(mddev);
} }
} else if (decipher_sync_action(mddev, mddev->recovery) != st_idle) } else if (decipher_sync_action(mddev, mddev->recovery) != st_idle)
return -EBUSY; return -EBUSY;
......
...@@ -4831,7 +4831,7 @@ action_store(struct mddev *mddev, const char *page, size_t len) ...@@ -4831,7 +4831,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
flush_workqueue(md_misc_wq); flush_workqueue(md_misc_wq);
if (mddev->sync_thread) { if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev, true); md_reap_sync_thread(mddev);
} }
mddev_unlock(mddev); mddev_unlock(mddev);
} }
...@@ -6197,7 +6197,7 @@ static void __md_stop_writes(struct mddev *mddev) ...@@ -6197,7 +6197,7 @@ static void __md_stop_writes(struct mddev *mddev)
flush_workqueue(md_misc_wq); flush_workqueue(md_misc_wq);
if (mddev->sync_thread) { if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev, true); md_reap_sync_thread(mddev);
} }
del_timer_sync(&mddev->safemode_timer); del_timer_sync(&mddev->safemode_timer);
...@@ -9303,7 +9303,7 @@ void md_check_recovery(struct mddev *mddev) ...@@ -9303,7 +9303,7 @@ void md_check_recovery(struct mddev *mddev)
* ->spare_active and clear saved_raid_disk * ->spare_active and clear saved_raid_disk
*/ */
set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev, true); md_reap_sync_thread(mddev);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
...@@ -9338,7 +9338,7 @@ void md_check_recovery(struct mddev *mddev) ...@@ -9338,7 +9338,7 @@ void md_check_recovery(struct mddev *mddev)
goto unlock; goto unlock;
} }
if (mddev->sync_thread) { if (mddev->sync_thread) {
md_reap_sync_thread(mddev, true); md_reap_sync_thread(mddev);
goto unlock; goto unlock;
} }
/* Set RUNNING before clearing NEEDED to avoid /* Set RUNNING before clearing NEEDED to avoid
...@@ -9411,18 +9411,14 @@ void md_check_recovery(struct mddev *mddev) ...@@ -9411,18 +9411,14 @@ void md_check_recovery(struct mddev *mddev)
} }
EXPORT_SYMBOL(md_check_recovery); EXPORT_SYMBOL(md_check_recovery);
void md_reap_sync_thread(struct mddev *mddev, bool reconfig_mutex_held) void md_reap_sync_thread(struct mddev *mddev)
{ {
struct md_rdev *rdev; struct md_rdev *rdev;
sector_t old_dev_sectors = mddev->dev_sectors; sector_t old_dev_sectors = mddev->dev_sectors;
bool is_reshaped = false; bool is_reshaped = false;
if (reconfig_mutex_held)
mddev_unlock(mddev);
/* resync has finished, collect result */ /* resync has finished, collect result */
md_unregister_thread(&mddev->sync_thread); md_unregister_thread(&mddev->sync_thread);
if (reconfig_mutex_held)
mddev_lock_nointr(mddev);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
mddev->degraded != mddev->raid_disks) { mddev->degraded != mddev->raid_disks) {
......
...@@ -719,7 +719,7 @@ extern struct md_thread *md_register_thread( ...@@ -719,7 +719,7 @@ extern struct md_thread *md_register_thread(
extern void md_unregister_thread(struct md_thread **threadp); extern void md_unregister_thread(struct md_thread **threadp);
extern void md_wakeup_thread(struct md_thread *thread); extern void md_wakeup_thread(struct md_thread *thread);
extern void md_check_recovery(struct mddev *mddev); extern void md_check_recovery(struct mddev *mddev);
extern void md_reap_sync_thread(struct mddev *mddev, bool reconfig_mutex_held); extern void md_reap_sync_thread(struct mddev *mddev);
extern int mddev_init_writes_pending(struct mddev *mddev); extern int mddev_init_writes_pending(struct mddev *mddev);
extern bool md_write_start(struct mddev *mddev, struct bio *bi); extern bool md_write_start(struct mddev *mddev, struct bio *bi);
extern void md_write_inc(struct mddev *mddev, struct bio *bi); extern void md_write_inc(struct mddev *mddev, struct bio *bi);
......
...@@ -629,9 +629,9 @@ static void ppl_do_flush(struct ppl_io_unit *io) ...@@ -629,9 +629,9 @@ static void ppl_do_flush(struct ppl_io_unit *io)
if (bdev) { if (bdev) {
struct bio *bio; struct bio *bio;
bio = bio_alloc_bioset(bdev, 0, GFP_NOIO, bio = bio_alloc_bioset(bdev, 0,
REQ_OP_WRITE | REQ_PREFLUSH, REQ_OP_WRITE | REQ_PREFLUSH,
&ppl_conf->flush_bs); GFP_NOIO, &ppl_conf->flush_bs);
bio->bi_private = io; bio->bi_private = io;
bio->bi_end_io = ppl_flush_endio; bio->bi_end_io = ppl_flush_endio;
......
...@@ -3285,8 +3285,8 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, ...@@ -3285,8 +3285,8 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
* we have no UUID set * we have no UUID set
*/ */
if (uuid_is_null(&ids->uuid)) { if (uuid_is_null(&ids->uuid)) {
printk_ratelimited(KERN_WARNING dev_warn_ratelimited(dev,
"No UUID available providing old NGUID\n"); "No UUID available providing old NGUID\n");
return sysfs_emit(buf, "%pU\n", ids->nguid); return sysfs_emit(buf, "%pU\n", ids->nguid);
} }
return sysfs_emit(buf, "%pU\n", &ids->uuid); return sysfs_emit(buf, "%pU\n", &ids->uuid);
...@@ -3863,6 +3863,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, ...@@ -3863,6 +3863,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
if (ret) { if (ret) {
dev_err(ctrl->device, dev_err(ctrl->device,
"globally duplicate IDs for nsid %d\n", nsid); "globally duplicate IDs for nsid %d\n", nsid);
nvme_print_device_info(ctrl);
return ret; return ret;
} }
......
...@@ -503,6 +503,7 @@ struct nvme_ctrl_ops { ...@@ -503,6 +503,7 @@ struct nvme_ctrl_ops {
void (*submit_async_event)(struct nvme_ctrl *ctrl); void (*submit_async_event)(struct nvme_ctrl *ctrl);
void (*delete_ctrl)(struct nvme_ctrl *ctrl); void (*delete_ctrl)(struct nvme_ctrl *ctrl);
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
void (*print_device_info)(struct nvme_ctrl *ctrl);
}; };
/* /*
...@@ -548,6 +549,33 @@ static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags, ...@@ -548,6 +549,33 @@ static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags,
return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id)); return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id));
} }
/*
* Return the length of the string without the space padding
*/
static inline int nvme_strlen(char *s, int len)
{
while (s[len - 1] == ' ')
len--;
return len;
}
static inline void nvme_print_device_info(struct nvme_ctrl *ctrl)
{
struct nvme_subsystem *subsys = ctrl->subsys;
if (ctrl->ops->print_device_info) {
ctrl->ops->print_device_info(ctrl);
return;
}
dev_err(ctrl->device,
"VID:%04x model:%.*s firmware:%.*s\n", subsys->vendor_id,
nvme_strlen(subsys->model, sizeof(subsys->model)),
subsys->model, nvme_strlen(subsys->firmware_rev,
sizeof(subsys->firmware_rev)),
subsys->firmware_rev);
}
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj, void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
const char *dev_name); const char *dev_name);
......
...@@ -1334,6 +1334,14 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) ...@@ -1334,6 +1334,14 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
dev_warn(dev->ctrl.device, dev_warn(dev->ctrl.device,
"controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
csts, result); csts, result);
if (csts != ~0)
return;
dev_warn(dev->ctrl.device,
"Does your device have a faulty power saving mode enabled?\n");
dev_warn(dev->ctrl.device,
"Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n");
} }
static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
...@@ -2976,6 +2984,21 @@ static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size) ...@@ -2976,6 +2984,21 @@ static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
return snprintf(buf, size, "%s\n", dev_name(&pdev->dev)); return snprintf(buf, size, "%s\n", dev_name(&pdev->dev));
} }
static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl)
{
struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev);
struct nvme_subsystem *subsys = ctrl->subsys;
dev_err(ctrl->device,
"VID:DID %04x:%04x model:%.*s firmware:%.*s\n",
pdev->vendor, pdev->device,
nvme_strlen(subsys->model, sizeof(subsys->model)),
subsys->model, nvme_strlen(subsys->firmware_rev,
sizeof(subsys->firmware_rev)),
subsys->firmware_rev);
}
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.name = "pcie", .name = "pcie",
.module = THIS_MODULE, .module = THIS_MODULE,
...@@ -2987,6 +3010,7 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { ...@@ -2987,6 +3010,7 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.free_ctrl = nvme_pci_free_ctrl, .free_ctrl = nvme_pci_free_ctrl,
.submit_async_event = nvme_pci_submit_async_event, .submit_async_event = nvme_pci_submit_async_event,
.get_address = nvme_pci_get_address, .get_address = nvme_pci_get_address,
.print_device_info = nvme_pci_print_device_info,
}; };
static int nvme_dev_map(struct nvme_dev *dev) static int nvme_dev_map(struct nvme_dev *dev)
...@@ -3421,7 +3445,8 @@ static const struct pci_device_id nvme_id_table[] = { ...@@ -3421,7 +3445,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */ { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_BOGUS_NID, }, .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST, }, .driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
NVME_QUIRK_NO_NS_DESC_LIST, }, NVME_QUIRK_NO_NS_DESC_LIST, },
...@@ -3437,6 +3462,8 @@ static const struct pci_device_id nvme_id_table[] = { ...@@ -3437,6 +3462,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
NVME_QUIRK_DISABLE_WRITE_ZEROES| NVME_QUIRK_DISABLE_WRITE_ZEROES|
NVME_QUIRK_IGNORE_DEV_SUBNQN, }, NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */ { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */
...@@ -3449,10 +3476,20 @@ static const struct pci_device_id nvme_id_table[] = { ...@@ -3449,10 +3476,20 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_IGNORE_DEV_SUBNQN, }, NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */ { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x1c5c, 0x174a), /* SK Hynix P31 SSD */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */ { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x1d97, 0x2263), /* SPCC */ { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x144d, 0xa80b), /* Samsung PM9B1 256G and 512G */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x144d, 0xa809), /* Samsung MZALQ256HBJD 256G */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x1cc4, 0x6303), /* UMIS RPJTJ512MGE1QDY 512G */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x1cc4, 0x6302), /* UMIS RPJTJ256MGE1QDY 256G */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x2646, 0x2262), /* KINGSTON SKC2000 NVMe SSD */ { PCI_DEVICE(0x2646, 0x2262), /* KINGSTON SKC2000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
...@@ -3463,6 +3500,10 @@ static const struct pci_device_id nvme_id_table[] = { ...@@ -3463,6 +3500,10 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_BOGUS_NID, }, .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */ { PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */
.driver_data = NVME_QUIRK_BOGUS_NID, }, .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065), { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
......
...@@ -575,6 +575,7 @@ struct request_queue { ...@@ -575,6 +575,7 @@ struct request_queue {
#define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */ #define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */
#define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */ #define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */
#define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */ #define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */
#define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */
#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_SAME_COMP) | \ (1 << QUEUE_FLAG_SAME_COMP) | \
...@@ -616,6 +617,7 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); ...@@ -616,6 +617,7 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) #define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
#define blk_queue_nowait(q) test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags) #define blk_queue_nowait(q) test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags)
#define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
extern void blk_set_pm_only(struct request_queue *q); extern void blk_set_pm_only(struct request_queue *q);
extern void blk_clear_pm_only(struct request_queue *q); extern void blk_clear_pm_only(struct request_queue *q);
...@@ -1006,8 +1008,6 @@ void disk_set_independent_access_ranges(struct gendisk *disk, ...@@ -1006,8 +1008,6 @@ void disk_set_independent_access_ranges(struct gendisk *disk,
*/ */
/* Supports zoned block devices sequential write constraint */ /* Supports zoned block devices sequential write constraint */
#define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0) #define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0)
/* Supports scheduling on multiple hardware queues */
#define ELEVATOR_F_MQ_AWARE (1U << 1)
extern void blk_queue_required_elevator_features(struct request_queue *q, extern void blk_queue_required_elevator_features(struct request_queue *q,
unsigned int features); unsigned int features);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment