Commit fb649bda authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-5.18-2022-04-15' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

 - Moving of lower_48_bits() to the block layer and a fix for the
   unaligned_be48 added with that originally (Alexander, Keith)

 - Fix a bad WARN_ON() for trim size checking (Ming)

 - A polled IO timeout fix for null_blk (Ming)

 - Silence IO error printing for dead disks (Christoph)

 - Compat mode range fix (Khazhismel)

 - NVMe pull request via Christoph:
     - Tone down the error logging added this merge window a bit
       (Chaitanya Kulkarni)
     - Quirk devices with non-unique unique identifiers (Christoph)

* tag 'block-5.18-2022-04-15' of git://git.kernel.dk/linux-block:
  block: don't print I/O error warning for dead disks
  block/compat_ioctl: fix range check in BLKGETSIZE
  nvme-pci: disable namespace identifiers for Qemu controllers
  nvme-pci: disable namespace identifiers for the MAXIO MAP1002/1202
  nvme: add a quirk to disable namespace identifiers
  nvme: don't print verbose errors for internal passthrough requests
  block: null_blk: end timed out poll request
  block: fix offset/size check in bio_trim()
  asm-generic: fix __get_unaligned_be48() on 32 bit platforms
  block: move lower_48_bits() to block
parents 0647b9cc 89a2ee91
...@@ -1598,7 +1598,7 @@ EXPORT_SYMBOL(bio_split); ...@@ -1598,7 +1598,7 @@ EXPORT_SYMBOL(bio_split);
void bio_trim(struct bio *bio, sector_t offset, sector_t size) void bio_trim(struct bio *bio, sector_t offset, sector_t size)
{ {
if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS || if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS ||
offset + size > bio->bi_iter.bi_size)) offset + size > bio_sectors(bio)))
return; return;
size <<= 9; size <<= 9;
......
...@@ -794,7 +794,8 @@ bool blk_update_request(struct request *req, blk_status_t error, ...@@ -794,7 +794,8 @@ bool blk_update_request(struct request *req, blk_status_t error,
#endif #endif
if (unlikely(error && !blk_rq_is_passthrough(req) && if (unlikely(error && !blk_rq_is_passthrough(req) &&
!(req->rq_flags & RQF_QUIET))) { !(req->rq_flags & RQF_QUIET)) &&
!test_bit(GD_DEAD, &req->q->disk->state)) {
blk_print_req_error(req, error); blk_print_req_error(req, error);
trace_block_rq_error(req, error, nr_bytes); trace_block_rq_error(req, error, nr_bytes);
} }
......
...@@ -629,7 +629,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) ...@@ -629,7 +629,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
return compat_put_long(argp, return compat_put_long(argp,
(bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512); (bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512);
case BLKGETSIZE: case BLKGETSIZE:
if (bdev_nr_sectors(bdev) > ~0UL) if (bdev_nr_sectors(bdev) > ~(compat_ulong_t)0)
return -EFBIG; return -EFBIG;
return compat_put_ulong(argp, bdev_nr_sectors(bdev)); return compat_put_ulong(argp, bdev_nr_sectors(bdev));
......
...@@ -1600,7 +1600,7 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res) ...@@ -1600,7 +1600,7 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
* Only fake timeouts need to execute blk_mq_complete_request() here. * Only fake timeouts need to execute blk_mq_complete_request() here.
*/ */
cmd->error = BLK_STS_TIMEOUT; cmd->error = BLK_STS_TIMEOUT;
if (cmd->fake_timeout) if (cmd->fake_timeout || hctx->type == HCTX_TYPE_POLL)
blk_mq_complete_request(rq); blk_mq_complete_request(rq);
return BLK_EH_DONE; return BLK_EH_DONE;
} }
......
...@@ -366,7 +366,7 @@ static inline void nvme_end_req(struct request *req) ...@@ -366,7 +366,7 @@ static inline void nvme_end_req(struct request *req)
{ {
blk_status_t status = nvme_error_status(nvme_req(req)->status); blk_status_t status = nvme_error_status(nvme_req(req)->status);
if (unlikely(nvme_req(req)->status != NVME_SC_SUCCESS)) if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET)))
nvme_log_error(req); nvme_log_error(req);
nvme_end_req_zoned(req); nvme_end_req_zoned(req);
nvme_trace_bio_complete(req); nvme_trace_bio_complete(req);
...@@ -1015,6 +1015,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, ...@@ -1015,6 +1015,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
goto out; goto out;
} }
req->rq_flags |= RQF_QUIET;
ret = nvme_execute_rq(req, at_head); ret = nvme_execute_rq(req, at_head);
if (result && ret >= 0) if (result && ret >= 0)
*result = nvme_req(req)->result; *result = nvme_req(req)->result;
...@@ -1287,6 +1288,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids, ...@@ -1287,6 +1288,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
warn_str, cur->nidl); warn_str, cur->nidl);
return -1; return -1;
} }
if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
return NVME_NIDT_EUI64_LEN;
memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN); memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
return NVME_NIDT_EUI64_LEN; return NVME_NIDT_EUI64_LEN;
case NVME_NIDT_NGUID: case NVME_NIDT_NGUID:
...@@ -1295,6 +1298,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids, ...@@ -1295,6 +1298,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
warn_str, cur->nidl); warn_str, cur->nidl);
return -1; return -1;
} }
if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
return NVME_NIDT_NGUID_LEN;
memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN); memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
return NVME_NIDT_NGUID_LEN; return NVME_NIDT_NGUID_LEN;
case NVME_NIDT_UUID: case NVME_NIDT_UUID:
...@@ -1303,6 +1308,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids, ...@@ -1303,6 +1308,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
warn_str, cur->nidl); warn_str, cur->nidl);
return -1; return -1;
} }
if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
return NVME_NIDT_UUID_LEN;
uuid_copy(&ids->uuid, data + sizeof(*cur)); uuid_copy(&ids->uuid, data + sizeof(*cur));
return NVME_NIDT_UUID_LEN; return NVME_NIDT_UUID_LEN;
case NVME_NIDT_CSI: case NVME_NIDT_CSI:
...@@ -1399,12 +1406,18 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid, ...@@ -1399,12 +1406,18 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
if ((*id)->ncap == 0) /* namespace not allocated or attached */ if ((*id)->ncap == 0) /* namespace not allocated or attached */
goto out_free_id; goto out_free_id;
if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
dev_info(ctrl->device,
"Ignoring bogus Namespace Identifiers\n");
} else {
if (ctrl->vs >= NVME_VS(1, 1, 0) && if (ctrl->vs >= NVME_VS(1, 1, 0) &&
!memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64)); memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
if (ctrl->vs >= NVME_VS(1, 2, 0) && if (ctrl->vs >= NVME_VS(1, 2, 0) &&
!memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid)); memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
}
return 0; return 0;
......
...@@ -144,6 +144,11 @@ enum nvme_quirks { ...@@ -144,6 +144,11 @@ enum nvme_quirks {
* encoding the generation sequence number. * encoding the generation sequence number.
*/ */
NVME_QUIRK_SKIP_CID_GEN = (1 << 17), NVME_QUIRK_SKIP_CID_GEN = (1 << 17),
/*
* Reports garbage in the namespace identifiers (eui64, nguid, uuid).
*/
NVME_QUIRK_BOGUS_NID = (1 << 18),
}; };
/* /*
......
...@@ -3409,7 +3409,10 @@ static const struct pci_device_id nvme_id_table[] = { ...@@ -3409,7 +3409,10 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_IDENTIFY_CNS | .driver_data = NVME_QUIRK_IDENTIFY_CNS |
NVME_QUIRK_DISABLE_WRITE_ZEROES, }, NVME_QUIRK_DISABLE_WRITE_ZEROES |
NVME_QUIRK_BOGUS_NID, },
{ PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST, }, .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
{ PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
...@@ -3447,6 +3450,10 @@ static const struct pci_device_id nvme_id_table[] = { ...@@ -3447,6 +3450,10 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065), { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
......
...@@ -143,7 +143,7 @@ static inline void put_unaligned_be48(const u64 val, void *p) ...@@ -143,7 +143,7 @@ static inline void put_unaligned_be48(const u64 val, void *p)
static inline u64 __get_unaligned_be48(const u8 *p) static inline u64 __get_unaligned_be48(const u8 *p)
{ {
return (u64)p[0] << 40 | (u64)p[1] << 32 | p[2] << 24 | return (u64)p[0] << 40 | (u64)p[1] << 32 | (u64)p[2] << 24 |
p[3] << 16 | p[4] << 8 | p[5]; p[3] << 16 | p[4] << 8 | p[5];
} }
......
...@@ -63,15 +63,6 @@ ...@@ -63,15 +63,6 @@
} \ } \
) )
/**
* lower_48_bits() - return bits 0-47 of a number
* @n: the number we're accessing
*/
static inline u64 lower_48_bits(u64 n)
{
return n & ((1ull << 48) - 1);
}
/** /**
* upper_32_bits - return bits 32-63 of a number * upper_32_bits - return bits 32-63 of a number
* @n: the number we're accessing * @n: the number we're accessing
......
...@@ -59,6 +59,15 @@ struct crc64_pi_tuple { ...@@ -59,6 +59,15 @@ struct crc64_pi_tuple {
__u8 ref_tag[6]; __u8 ref_tag[6];
}; };
/**
* lower_48_bits() - return bits 0-47 of a number
* @n: the number we're accessing
*/
static inline u64 lower_48_bits(u64 n)
{
return n & ((1ull << 48) - 1);
}
static inline u64 ext_pi_ref_tag(struct request *rq) static inline u64 ext_pi_ref_tag(struct request *rq)
{ {
unsigned int shift = ilog2(queue_logical_block_size(rq->q)); unsigned int shift = ilog2(queue_logical_block_size(rq->q));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment