Commit 818680d1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-6.5-2023-07-28' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:
 "A few fixes that should go into the current kernel release, mainly:

   - Set of fixes for dasd (Stefan)

   - Handle interruptible waits returning because of a signal for ublk
     (Ming)"

* tag 'block-6.5-2023-07-28' of git://git.kernel.dk/linux:
  ublk: return -EINTR if breaking from waiting for existed users in DEL_DEV
  ublk: fail to recover device if queue setup is interrupted
  ublk: fail to start device if queue setup is interrupted
  block: Fix a source code comment in include/uapi/linux/blkzoned.h
  s390/dasd: print copy pair message only for the correct error
  s390/dasd: fix hanging device after request requeue
  s390/dasd: use correct number of retries for ERP requests
  s390/dasd: fix hanging device after quiesce/resume
parents 9c655058 3e9dce80
...@@ -1847,7 +1847,8 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd) ...@@ -1847,7 +1847,8 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
if (ublksrv_pid <= 0) if (ublksrv_pid <= 0)
return -EINVAL; return -EINVAL;
wait_for_completion_interruptible(&ub->completion); if (wait_for_completion_interruptible(&ub->completion) != 0)
return -EINTR;
schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD); schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD);
...@@ -2125,8 +2126,8 @@ static int ublk_ctrl_del_dev(struct ublk_device **p_ub) ...@@ -2125,8 +2126,8 @@ static int ublk_ctrl_del_dev(struct ublk_device **p_ub)
* - the device number is freed already, we will not find this * - the device number is freed already, we will not find this
* device via ublk_get_device_from_id() * device via ublk_get_device_from_id()
*/ */
wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx)); if (wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx)))
return -EINTR;
return 0; return 0;
} }
...@@ -2323,7 +2324,9 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub, ...@@ -2323,7 +2324,9 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n", pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
__func__, ub->dev_info.nr_hw_queues, header->dev_id); __func__, ub->dev_info.nr_hw_queues, header->dev_id);
/* wait until new ubq_daemon sending all FETCH_REQ */ /* wait until new ubq_daemon sending all FETCH_REQ */
wait_for_completion_interruptible(&ub->completion); if (wait_for_completion_interruptible(&ub->completion))
return -EINTR;
pr_devel("%s: All new ubq_daemons(nr: %d) are ready, dev id %d\n", pr_devel("%s: All new ubq_daemons(nr: %d) are ready, dev id %d\n",
__func__, ub->dev_info.nr_hw_queues, header->dev_id); __func__, ub->dev_info.nr_hw_queues, header->dev_id);
......
...@@ -2943,41 +2943,32 @@ static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) ...@@ -2943,41 +2943,32 @@ static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
* Requeue a request back to the block request queue * Requeue a request back to the block request queue
* only works for block requests * only works for block requests
*/ */
static int _dasd_requeue_request(struct dasd_ccw_req *cqr) static void _dasd_requeue_request(struct dasd_ccw_req *cqr)
{ {
struct dasd_block *block = cqr->block;
struct request *req; struct request *req;
if (!block)
return -EINVAL;
/* /*
* If the request is an ERP request there is nothing to requeue. * If the request is an ERP request there is nothing to requeue.
* This will be done with the remaining original request. * This will be done with the remaining original request.
*/ */
if (cqr->refers) if (cqr->refers)
return 0; return;
spin_lock_irq(&cqr->dq->lock); spin_lock_irq(&cqr->dq->lock);
req = (struct request *) cqr->callback_data; req = (struct request *) cqr->callback_data;
blk_mq_requeue_request(req, true); blk_mq_requeue_request(req, true);
spin_unlock_irq(&cqr->dq->lock); spin_unlock_irq(&cqr->dq->lock);
return 0; return;
} }
/* static int _dasd_requests_to_flushqueue(struct dasd_block *block,
* Go through all request on the dasd_block request queue, cancel them struct list_head *flush_queue)
* on the respective dasd_device, and return them to the generic
* block layer.
*/
static int dasd_flush_block_queue(struct dasd_block *block)
{ {
struct dasd_ccw_req *cqr, *n; struct dasd_ccw_req *cqr, *n;
int rc, i;
struct list_head flush_queue;
unsigned long flags; unsigned long flags;
int rc, i;
INIT_LIST_HEAD(&flush_queue); spin_lock_irqsave(&block->queue_lock, flags);
spin_lock_bh(&block->queue_lock);
rc = 0; rc = 0;
restart: restart:
list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
...@@ -2992,13 +2983,32 @@ static int dasd_flush_block_queue(struct dasd_block *block) ...@@ -2992,13 +2983,32 @@ static int dasd_flush_block_queue(struct dasd_block *block)
* is returned from the dasd_device layer. * is returned from the dasd_device layer.
*/ */
cqr->callback = _dasd_wake_block_flush_cb; cqr->callback = _dasd_wake_block_flush_cb;
for (i = 0; cqr != NULL; cqr = cqr->refers, i++) for (i = 0; cqr; cqr = cqr->refers, i++)
list_move_tail(&cqr->blocklist, &flush_queue); list_move_tail(&cqr->blocklist, flush_queue);
if (i > 1) if (i > 1)
/* moved more than one request - need to restart */ /* moved more than one request - need to restart */
goto restart; goto restart;
} }
spin_unlock_bh(&block->queue_lock); spin_unlock_irqrestore(&block->queue_lock, flags);
return rc;
}
/*
* Go through all request on the dasd_block request queue, cancel them
* on the respective dasd_device, and return them to the generic
* block layer.
*/
static int dasd_flush_block_queue(struct dasd_block *block)
{
struct dasd_ccw_req *cqr, *n;
struct list_head flush_queue;
unsigned long flags;
int rc;
INIT_LIST_HEAD(&flush_queue);
rc = _dasd_requests_to_flushqueue(block, &flush_queue);
/* Now call the callback function of flushed requests */ /* Now call the callback function of flushed requests */
restart_cb: restart_cb:
list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
...@@ -3881,75 +3891,36 @@ EXPORT_SYMBOL_GPL(dasd_generic_space_avail); ...@@ -3881,75 +3891,36 @@ EXPORT_SYMBOL_GPL(dasd_generic_space_avail);
*/ */
int dasd_generic_requeue_all_requests(struct dasd_device *device) int dasd_generic_requeue_all_requests(struct dasd_device *device)
{ {
struct dasd_block *block = device->block;
struct list_head requeue_queue; struct list_head requeue_queue;
struct dasd_ccw_req *cqr, *n; struct dasd_ccw_req *cqr, *n;
struct dasd_ccw_req *refers;
int rc; int rc;
INIT_LIST_HEAD(&requeue_queue); if (!block)
spin_lock_irq(get_ccwdev_lock(device->cdev)); return 0;
rc = 0;
list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
/* Check status and move request to flush_queue */
if (cqr->status == DASD_CQR_IN_IO) {
rc = device->discipline->term_IO(cqr);
if (rc) {
/* unable to terminate requeust */
dev_err(&device->cdev->dev,
"Unable to terminate request %p "
"on suspend\n", cqr);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
dasd_put_device(device);
return rc;
}
}
list_move_tail(&cqr->devlist, &requeue_queue);
}
spin_unlock_irq(get_ccwdev_lock(device->cdev));
list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) { INIT_LIST_HEAD(&requeue_queue);
wait_event(dasd_flush_wq, rc = _dasd_requests_to_flushqueue(block, &requeue_queue);
(cqr->status != DASD_CQR_CLEAR_PENDING));
/* /* Now call the callback function of flushed requests */
* requeue requests to blocklayer will only work restart_cb:
* for block device requests list_for_each_entry_safe(cqr, n, &requeue_queue, blocklist) {
wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
/* Process finished ERP request. */
if (cqr->refers) {
spin_lock_bh(&block->queue_lock);
__dasd_process_erp(block->base, cqr);
spin_unlock_bh(&block->queue_lock);
/* restart list_for_xx loop since dasd_process_erp
* might remove multiple elements
*/ */
if (_dasd_requeue_request(cqr)) goto restart_cb;
continue;
/* remove requests from device and block queue */
list_del_init(&cqr->devlist);
while (cqr->refers != NULL) {
refers = cqr->refers;
/* remove the request from the block queue */
list_del(&cqr->blocklist);
/* free the finished erp request */
dasd_free_erp_request(cqr, cqr->memdev);
cqr = refers;
} }
_dasd_requeue_request(cqr);
/*
* _dasd_requeue_request already checked for a valid
* blockdevice, no need to check again
* all erp requests (cqr->refers) have a cqr->block
* pointer copy from the original cqr
*/
list_del_init(&cqr->blocklist); list_del_init(&cqr->blocklist);
cqr->block->base->discipline->free_cp( cqr->block->base->discipline->free_cp(
cqr, (struct request *) cqr->callback_data); cqr, (struct request *) cqr->callback_data);
} }
/*
* if requests remain then they are internal request
* and go back to the device queue
*/
if (!list_empty(&requeue_queue)) {
/* move freeze_queue to start of the ccw_queue */
spin_lock_irq(get_ccwdev_lock(device->cdev));
list_splice_tail(&requeue_queue, &device->ccw_queue);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
}
dasd_schedule_device_bh(device); dasd_schedule_device_bh(device);
return rc; return rc;
} }
......
...@@ -1050,7 +1050,7 @@ dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense) ...@@ -1050,7 +1050,7 @@ dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense)
dev_err(&device->cdev->dev, "An I/O request was rejected" dev_err(&device->cdev->dev, "An I/O request was rejected"
" because writing is inhibited\n"); " because writing is inhibited\n");
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
} else if (sense[7] & SNS7_INVALID_ON_SEC) { } else if (sense[7] == SNS7_INVALID_ON_SEC) {
dev_err(&device->cdev->dev, "An I/O request was rejected on a copy pair secondary device\n"); dev_err(&device->cdev->dev, "An I/O request was rejected on a copy pair secondary device\n");
/* suppress dump of sense data for this error */ /* suppress dump of sense data for this error */
set_bit(DASD_CQR_SUPPRESS_CR, &erp->refers->flags); set_bit(DASD_CQR_SUPPRESS_CR, &erp->refers->flags);
...@@ -2441,7 +2441,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr) ...@@ -2441,7 +2441,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
erp->block = cqr->block; erp->block = cqr->block;
erp->magic = cqr->magic; erp->magic = cqr->magic;
erp->expires = cqr->expires; erp->expires = cqr->expires;
erp->retries = 256; erp->retries = device->default_retries;
erp->buildclk = get_tod_clock(); erp->buildclk = get_tod_clock();
erp->status = DASD_CQR_FILLED; erp->status = DASD_CQR_FILLED;
......
...@@ -131,6 +131,7 @@ static int dasd_ioctl_resume(struct dasd_block *block) ...@@ -131,6 +131,7 @@ static int dasd_ioctl_resume(struct dasd_block *block)
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
dasd_schedule_block_bh(block); dasd_schedule_block_bh(block);
dasd_schedule_device_bh(base);
return 0; return 0;
} }
......
...@@ -51,13 +51,13 @@ enum blk_zone_type { ...@@ -51,13 +51,13 @@ enum blk_zone_type {
* *
* The Zone Condition state machine in the ZBC/ZAC standards maps the above * The Zone Condition state machine in the ZBC/ZAC standards maps the above
* deinitions as: * deinitions as:
* - ZC1: Empty | BLK_ZONE_EMPTY * - ZC1: Empty | BLK_ZONE_COND_EMPTY
* - ZC2: Implicit Open | BLK_ZONE_COND_IMP_OPEN * - ZC2: Implicit Open | BLK_ZONE_COND_IMP_OPEN
* - ZC3: Explicit Open | BLK_ZONE_COND_EXP_OPEN * - ZC3: Explicit Open | BLK_ZONE_COND_EXP_OPEN
* - ZC4: Closed | BLK_ZONE_CLOSED * - ZC4: Closed | BLK_ZONE_COND_CLOSED
* - ZC5: Full | BLK_ZONE_FULL * - ZC5: Full | BLK_ZONE_COND_FULL
* - ZC6: Read Only | BLK_ZONE_READONLY * - ZC6: Read Only | BLK_ZONE_COND_READONLY
* - ZC7: Offline | BLK_ZONE_OFFLINE * - ZC7: Offline | BLK_ZONE_COND_OFFLINE
* *
* Conditions 0x5 to 0xC are reserved by the current ZBC/ZAC spec and should * Conditions 0x5 to 0xC are reserved by the current ZBC/ZAC spec and should
* be considered invalid. * be considered invalid.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment