Commit a8ce5f52 authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe

ublk_drv: cancel device even though disk isn't up

Each ublk queue is started before adding disk, we have to cancel queues in
ublk_stop_dev() so that ubq daemon can be exited, otherwise DEL_DEV command
may hang forever.

Also avoid to cancel queues two times by checking if queue is ready,
otherwise use-after-free on io_uring may be triggered because ublk_stop_dev
is called by ublk_remove() too.

Fixes: 71f28f31 ("ublk_drv: add io_uring based userspace block driver")
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20220730092750.1118167-2-ming.lei@redhat.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent e97424fd
...@@ -788,16 +788,27 @@ static void ublk_daemon_monitor_work(struct work_struct *work) ...@@ -788,16 +788,27 @@ static void ublk_daemon_monitor_work(struct work_struct *work)
UBLK_DAEMON_MONITOR_PERIOD); UBLK_DAEMON_MONITOR_PERIOD);
} }
static inline bool ublk_queue_ready(struct ublk_queue *ubq)
{
return ubq->nr_io_ready == ubq->q_depth;
}
static void ublk_cancel_queue(struct ublk_queue *ubq) static void ublk_cancel_queue(struct ublk_queue *ubq)
{ {
int i; int i;
if (!ublk_queue_ready(ubq))
return;
for (i = 0; i < ubq->q_depth; i++) { for (i = 0; i < ubq->q_depth; i++) {
struct ublk_io *io = &ubq->ios[i]; struct ublk_io *io = &ubq->ios[i];
if (io->flags & UBLK_IO_FLAG_ACTIVE) if (io->flags & UBLK_IO_FLAG_ACTIVE)
io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0); io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0);
} }
/* all io commands are canceled */
ubq->nr_io_ready = 0;
} }
/* Cancel all pending commands, must be called after del_gendisk() returns */ /* Cancel all pending commands, must be called after del_gendisk() returns */
...@@ -818,19 +829,14 @@ static void ublk_stop_dev(struct ublk_device *ub) ...@@ -818,19 +829,14 @@ static void ublk_stop_dev(struct ublk_device *ub)
del_gendisk(ub->ub_disk); del_gendisk(ub->ub_disk);
ub->dev_info.state = UBLK_S_DEV_DEAD; ub->dev_info.state = UBLK_S_DEV_DEAD;
ub->dev_info.ublksrv_pid = -1; ub->dev_info.ublksrv_pid = -1;
ublk_cancel_dev(ub);
put_disk(ub->ub_disk); put_disk(ub->ub_disk);
ub->ub_disk = NULL; ub->ub_disk = NULL;
unlock: unlock:
ublk_cancel_dev(ub);
mutex_unlock(&ub->mutex); mutex_unlock(&ub->mutex);
cancel_delayed_work_sync(&ub->monitor_work); cancel_delayed_work_sync(&ub->monitor_work);
} }
static inline bool ublk_queue_ready(struct ublk_queue *ubq)
{
return ubq->nr_io_ready == ubq->q_depth;
}
/* device can only be started after all IOs are ready */ /* device can only be started after all IOs are ready */
static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq) static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment