Commit c5576876 authored by Stefan Haberland's avatar Stefan Haberland Committed by Martin Schwidefsky

s390/dasd: fix hanging device after resume with internal error 13

If too many ccw requests are pre-build before a suspend/resume cycle
the device might not get enough memory to do path verification
during resume.
Requeue requests to the block device request queue on suspend and free
pre-build ccw requests.
Signed-off-by: default avatarStefan Haberland <stefan.haberland@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent d42e1712
...@@ -2751,6 +2751,26 @@ static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) ...@@ -2751,6 +2751,26 @@ static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
wake_up(&dasd_flush_wq); wake_up(&dasd_flush_wq);
} }
/*
* Requeue a request back to the block request queue
* only works for block requests
*/
static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
{
struct dasd_block *block = cqr->block;
struct request *req;
unsigned long flags;
if (!block)
return -EINVAL;
spin_lock_irqsave(&block->queue_lock, flags);
req = (struct request *) cqr->callback_data;
blk_requeue_request(block->request_queue, req);
spin_unlock_irqrestore(&block->queue_lock, flags);
return 0;
}
/* /*
* Go through all request on the dasd_block request queue, cancel them * Go through all request on the dasd_block request queue, cancel them
* on the respective dasd_device, and return them to the generic * on the respective dasd_device, and return them to the generic
...@@ -3469,10 +3489,11 @@ EXPORT_SYMBOL_GPL(dasd_generic_verify_path); ...@@ -3469,10 +3489,11 @@ EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
int dasd_generic_pm_freeze(struct ccw_device *cdev) int dasd_generic_pm_freeze(struct ccw_device *cdev)
{ {
struct dasd_device *device = dasd_device_from_cdev(cdev);
struct list_head freeze_queue;
struct dasd_ccw_req *cqr, *n; struct dasd_ccw_req *cqr, *n;
struct dasd_ccw_req *refers;
int rc; int rc;
struct list_head freeze_queue;
struct dasd_device *device = dasd_device_from_cdev(cdev);
if (IS_ERR(device)) if (IS_ERR(device))
return PTR_ERR(device); return PTR_ERR(device);
...@@ -3485,7 +3506,8 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev) ...@@ -3485,7 +3506,8 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
/* disallow new I/O */ /* disallow new I/O */
dasd_device_set_stop_bits(device, DASD_STOPPED_PM); dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
/* clear active requests */
/* clear active requests and requeue them to block layer if possible */
INIT_LIST_HEAD(&freeze_queue); INIT_LIST_HEAD(&freeze_queue);
spin_lock_irq(get_ccwdev_lock(cdev)); spin_lock_irq(get_ccwdev_lock(cdev));
rc = 0; rc = 0;
...@@ -3505,7 +3527,6 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev) ...@@ -3505,7 +3527,6 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
} }
list_move_tail(&cqr->devlist, &freeze_queue); list_move_tail(&cqr->devlist, &freeze_queue);
} }
spin_unlock_irq(get_ccwdev_lock(cdev)); spin_unlock_irq(get_ccwdev_lock(cdev));
list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) { list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) {
...@@ -3513,12 +3534,38 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev) ...@@ -3513,12 +3534,38 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
(cqr->status != DASD_CQR_CLEAR_PENDING)); (cqr->status != DASD_CQR_CLEAR_PENDING));
if (cqr->status == DASD_CQR_CLEARED) if (cqr->status == DASD_CQR_CLEARED)
cqr->status = DASD_CQR_QUEUED; cqr->status = DASD_CQR_QUEUED;
/* requeue requests to blocklayer will only work for
block device requests */
if (_dasd_requeue_request(cqr))
continue;
/* remove requests from device and block queue */
list_del_init(&cqr->devlist);
while (cqr->refers != NULL) {
refers = cqr->refers;
/* remove the request from the block queue */
list_del(&cqr->blocklist);
/* free the finished erp request */
dasd_free_erp_request(cqr, cqr->memdev);
cqr = refers;
}
if (cqr->block)
list_del_init(&cqr->blocklist);
cqr->block->base->discipline->free_cp(
cqr, (struct request *) cqr->callback_data);
} }
/*
* if requests remain then they are internal request
* and go back to the device queue
*/
if (!list_empty(&freeze_queue)) {
/* move freeze_queue to start of the ccw_queue */ /* move freeze_queue to start of the ccw_queue */
spin_lock_irq(get_ccwdev_lock(cdev)); spin_lock_irq(get_ccwdev_lock(cdev));
list_splice_tail(&freeze_queue, &device->ccw_queue); list_splice_tail(&freeze_queue, &device->ccw_queue);
spin_unlock_irq(get_ccwdev_lock(cdev)); spin_unlock_irq(get_ccwdev_lock(cdev));
}
dasd_put_device(device); dasd_put_device(device);
return rc; return rc;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment