Commit da0af3c5 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-6.3-2023-04-06' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:

 - Ensure that ublk always reads the whole sqe upfront (me)

 - Fix for a block size probing issue with ublk (Ming)

 - Fix for the bio based polling (Keith)

 - NVMe pull request via Christoph:
      - fix discard support without oncs (Keith Busch)

 - Partition scan error handling regression fix (Yu)

* tag 'block-6.3-2023-04-06' of git://git.kernel.dk/linux:
  block: don't set GD_NEED_PART_SCAN if scan partition failed
  block: ublk: make sure that block size is set correctly
  ublk: read any SQE values upfront
  nvme: fix discard support without oncs
  blk-mq: directly poll requests
parents d3f05a4c 3723091e
...@@ -1359,8 +1359,6 @@ bool blk_rq_is_poll(struct request *rq) ...@@ -1359,8 +1359,6 @@ bool blk_rq_is_poll(struct request *rq)
return false; return false;
if (rq->mq_hctx->type != HCTX_TYPE_POLL) if (rq->mq_hctx->type != HCTX_TYPE_POLL)
return false; return false;
if (WARN_ON_ONCE(!rq->bio))
return false;
return true; return true;
} }
EXPORT_SYMBOL_GPL(blk_rq_is_poll); EXPORT_SYMBOL_GPL(blk_rq_is_poll);
...@@ -1368,7 +1366,7 @@ EXPORT_SYMBOL_GPL(blk_rq_is_poll); ...@@ -1368,7 +1366,7 @@ EXPORT_SYMBOL_GPL(blk_rq_is_poll);
static void blk_rq_poll_completion(struct request *rq, struct completion *wait) static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
{ {
do { do {
bio_poll(rq->bio, NULL, 0); blk_mq_poll(rq->q, blk_rq_to_qc(rq), NULL, 0);
cond_resched(); cond_resched();
} while (!completion_done(wait)); } while (!completion_done(wait));
} }
......
...@@ -368,7 +368,6 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode) ...@@ -368,7 +368,6 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
if (disk->open_partitions) if (disk->open_partitions)
return -EBUSY; return -EBUSY;
set_bit(GD_NEED_PART_SCAN, &disk->state);
/* /*
* If the device is opened exclusively by current thread already, it's * If the device is opened exclusively by current thread already, it's
* safe to scan partitons, otherwise, use bd_prepare_to_claim() to * safe to scan partitons, otherwise, use bd_prepare_to_claim() to
...@@ -381,12 +380,19 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode) ...@@ -381,12 +380,19 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
return ret; return ret;
} }
set_bit(GD_NEED_PART_SCAN, &disk->state);
bdev = blkdev_get_by_dev(disk_devt(disk), mode & ~FMODE_EXCL, NULL); bdev = blkdev_get_by_dev(disk_devt(disk), mode & ~FMODE_EXCL, NULL);
if (IS_ERR(bdev)) if (IS_ERR(bdev))
ret = PTR_ERR(bdev); ret = PTR_ERR(bdev);
else else
blkdev_put(bdev, mode & ~FMODE_EXCL); blkdev_put(bdev, mode & ~FMODE_EXCL);
/*
* If blkdev_get_by_dev() failed early, GD_NEED_PART_SCAN is still set,
* and this will cause that re-assemble partitioned raid device will
* creat partition for underlying disk.
*/
clear_bit(GD_NEED_PART_SCAN, &disk->state);
if (!(mode & FMODE_EXCL)) if (!(mode & FMODE_EXCL))
bd_abort_claiming(disk->part0, disk_scan_partitions); bd_abort_claiming(disk->part0, disk_scan_partitions);
return ret; return ret;
......
...@@ -246,7 +246,7 @@ static int ublk_validate_params(const struct ublk_device *ub) ...@@ -246,7 +246,7 @@ static int ublk_validate_params(const struct ublk_device *ub)
if (ub->params.types & UBLK_PARAM_TYPE_BASIC) { if (ub->params.types & UBLK_PARAM_TYPE_BASIC) {
const struct ublk_param_basic *p = &ub->params.basic; const struct ublk_param_basic *p = &ub->params.basic;
if (p->logical_bs_shift > PAGE_SHIFT) if (p->logical_bs_shift > PAGE_SHIFT || p->logical_bs_shift < 9)
return -EINVAL; return -EINVAL;
if (p->logical_bs_shift > p->physical_bs_shift) if (p->logical_bs_shift > p->physical_bs_shift)
...@@ -1261,9 +1261,10 @@ static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id, ...@@ -1261,9 +1261,10 @@ static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
ublk_queue_cmd(ubq, req); ublk_queue_cmd(ubq, req);
} }
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags) static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
unsigned int issue_flags,
struct ublksrv_io_cmd *ub_cmd)
{ {
struct ublksrv_io_cmd *ub_cmd = (struct ublksrv_io_cmd *)cmd->cmd;
struct ublk_device *ub = cmd->file->private_data; struct ublk_device *ub = cmd->file->private_data;
struct ublk_queue *ubq; struct ublk_queue *ubq;
struct ublk_io *io; struct ublk_io *io;
...@@ -1362,6 +1363,23 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags) ...@@ -1362,6 +1363,23 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
return -EIOCBQUEUED; return -EIOCBQUEUED;
} }
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
{
struct ublksrv_io_cmd *ub_src = (struct ublksrv_io_cmd *) cmd->cmd;
struct ublksrv_io_cmd ub_cmd;
/*
* Not necessary for async retry, but let's keep it simple and always
* copy the values to avoid any potential reuse.
*/
ub_cmd.q_id = READ_ONCE(ub_src->q_id);
ub_cmd.tag = READ_ONCE(ub_src->tag);
ub_cmd.result = READ_ONCE(ub_src->result);
ub_cmd.addr = READ_ONCE(ub_src->addr);
return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
}
static const struct file_operations ublk_ch_fops = { static const struct file_operations ublk_ch_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.open = ublk_ch_open, .open = ublk_ch_open,
...@@ -1952,6 +1970,8 @@ static int ublk_ctrl_set_params(struct ublk_device *ub, ...@@ -1952,6 +1970,8 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
/* clear all we don't support yet */ /* clear all we don't support yet */
ub->params.types &= UBLK_PARAM_TYPE_ALL; ub->params.types &= UBLK_PARAM_TYPE_ALL;
ret = ublk_validate_params(ub); ret = ublk_validate_params(ub);
if (ret)
ub->params.types = 0;
} }
mutex_unlock(&ub->mutex); mutex_unlock(&ub->mutex);
......
...@@ -1674,6 +1674,9 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) ...@@ -1674,6 +1674,9 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
struct request_queue *queue = disk->queue; struct request_queue *queue = disk->queue;
u32 size = queue_logical_block_size(queue); u32 size = queue_logical_block_size(queue);
if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);
if (ctrl->max_discard_sectors == 0) { if (ctrl->max_discard_sectors == 0) {
blk_queue_max_discard_sectors(queue, 0); blk_queue_max_discard_sectors(queue, 0);
return; return;
...@@ -1688,9 +1691,6 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) ...@@ -1688,9 +1691,6 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
if (queue->limits.max_discard_sectors) if (queue->limits.max_discard_sectors)
return; return;
if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);
blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors); blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors);
blk_queue_max_discard_segments(queue, ctrl->max_discard_segments); blk_queue_max_discard_segments(queue, ctrl->max_discard_segments);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment