Commit 7fb019c4 authored by Martin K. Petersen's avatar Martin K. Petersen

scsi: sd: Switch to using scsi_device VPD pages

Use the VPD pages already provided by the SCSI midlayer. No need to request
them individually in the SCSI disk driver.

Link: https://lore.kernel.org/r/20220302053559.32147-8-martin.petersen@oracle.comReviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarBart Van Assche <bvanassche@acm.org>
Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent e38d9e83
......@@ -2844,39 +2844,39 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
static void sd_read_block_limits(struct scsi_disk *sdkp)
{
unsigned int sector_sz = sdkp->device->sector_size;
const int vpd_len = 64;
unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
struct scsi_vpd *vpd;
if (!buffer ||
/* Block Limits VPD */
scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
rcu_read_lock();
vpd = rcu_dereference(sdkp->device->vpd_pgb0);
if (!vpd || vpd->len < 16)
goto out;
blk_queue_io_min(sdkp->disk->queue,
get_unaligned_be16(&buffer[6]) * sector_sz);
get_unaligned_be16(&vpd->data[6]) * sector_sz);
sdkp->max_xfer_blocks = get_unaligned_be32(&buffer[8]);
sdkp->opt_xfer_blocks = get_unaligned_be32(&buffer[12]);
sdkp->max_xfer_blocks = get_unaligned_be32(&vpd->data[8]);
sdkp->opt_xfer_blocks = get_unaligned_be32(&vpd->data[12]);
if (buffer[3] == 0x3c) {
if (vpd->len >= 64) {
unsigned int lba_count, desc_count;
sdkp->max_ws_blocks = (u32)get_unaligned_be64(&buffer[36]);
sdkp->max_ws_blocks = (u32)get_unaligned_be64(&vpd->data[36]);
if (!sdkp->lbpme)
goto out;
lba_count = get_unaligned_be32(&buffer[20]);
desc_count = get_unaligned_be32(&buffer[24]);
lba_count = get_unaligned_be32(&vpd->data[20]);
desc_count = get_unaligned_be32(&vpd->data[24]);
if (lba_count && desc_count)
sdkp->max_unmap_blocks = lba_count;
sdkp->unmap_granularity = get_unaligned_be32(&buffer[28]);
sdkp->unmap_granularity = get_unaligned_be32(&vpd->data[28]);
if (buffer[32] & 0x80)
if (vpd->data[32] & 0x80)
sdkp->unmap_alignment =
get_unaligned_be32(&buffer[32]) & ~(1 << 31);
get_unaligned_be32(&vpd->data[32]) & ~(1 << 31);
if (!sdkp->lbpvpd) { /* LBP VPD page not provided */
......@@ -2898,7 +2898,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
}
out:
kfree(buffer);
rcu_read_unlock();
}
/**
......@@ -2908,18 +2908,21 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
static void sd_read_block_characteristics(struct scsi_disk *sdkp)
{
struct request_queue *q = sdkp->disk->queue;
unsigned char *buffer;
struct scsi_vpd *vpd;
u16 rot;
const int vpd_len = 64;
u8 zoned;
buffer = kmalloc(vpd_len, GFP_KERNEL);
rcu_read_lock();
vpd = rcu_dereference(sdkp->device->vpd_pgb1);
if (!buffer ||
/* Block Device Characteristics VPD */
scsi_get_vpd_page(sdkp->device, 0xb1, buffer, vpd_len))
goto out;
if (!vpd || vpd->len < 8) {
rcu_read_unlock();
return;
}
rot = get_unaligned_be16(&buffer[4]);
rot = get_unaligned_be16(&vpd->data[4]);
zoned = (vpd->data[8] >> 4) & 3;
rcu_read_unlock();
if (rot == 1) {
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
......@@ -2930,7 +2933,7 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
/* Host-managed */
blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HM);
} else {
sdkp->zoned = (buffer[8] >> 4) & 3;
sdkp->zoned = zoned;
if (sdkp->zoned == 1) {
/* Host-aware */
blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HA);
......@@ -2941,7 +2944,7 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
}
if (!sdkp->first_scan)
goto out;
return;
if (blk_queue_is_zoned(q)) {
sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
......@@ -2954,9 +2957,6 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
sd_printk(KERN_NOTICE, sdkp,
"Drive-managed SMR disk\n");
}
out:
kfree(buffer);
}
/**
......@@ -2965,24 +2965,24 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
*/
static void sd_read_block_provisioning(struct scsi_disk *sdkp)
{
unsigned char *buffer;
const int vpd_len = 8;
struct scsi_vpd *vpd;
if (sdkp->lbpme == 0)
return;
buffer = kmalloc(vpd_len, GFP_KERNEL);
rcu_read_lock();
vpd = rcu_dereference(sdkp->device->vpd_pgb2);
if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb2, buffer, vpd_len))
goto out;
if (!vpd || vpd->len < 8) {
rcu_read_unlock();
return;
}
sdkp->lbpvpd = 1;
sdkp->lbpu = (buffer[5] >> 7) & 1; /* UNMAP */
sdkp->lbpws = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */
sdkp->lbpws10 = (buffer[5] >> 5) & 1; /* WRITE SAME(10) with UNMAP */
out:
kfree(buffer);
sdkp->lbpu = (vpd->data[5] >> 7) & 1; /* UNMAP */
sdkp->lbpws = (vpd->data[5] >> 6) & 1; /* WRITE SAME(16) w/ UNMAP */
sdkp->lbpws10 = (vpd->data[5] >> 5) & 1; /* WRITE SAME(10) w/ UNMAP */
rcu_read_unlock();
}
static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment