Commit c7b3e923 authored by Sebastian Ott's avatar Sebastian Ott Committed by Martin Schwidefsky

s390/scm: convert tasklet

Drop the tasklet that was used to complete requests in favor of
block layer helpers that finish the IO on the CPU that initiated
it.
Signed-off-by: default avatarSebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 12d90762
...@@ -338,21 +338,6 @@ static void __scmrq_log_error(struct scm_request *scmrq) ...@@ -338,21 +338,6 @@ static void __scmrq_log_error(struct scm_request *scmrq)
scmrq->error); scmrq->error);
} }
void scm_blk_irq(struct scm_device *scmdev, void *data, int error)
{
struct scm_request *scmrq = data;
struct scm_blk_dev *bdev = scmrq->bdev;
scmrq->error = error;
if (error)
__scmrq_log_error(scmrq);
spin_lock(&bdev->lock);
list_add_tail(&scmrq->list, &bdev->finished_requests);
spin_unlock(&bdev->lock);
tasklet_hi_schedule(&bdev->tasklet);
}
static void scm_blk_handle_error(struct scm_request *scmrq) static void scm_blk_handle_error(struct scm_request *scmrq)
{ {
struct scm_blk_dev *bdev = scmrq->bdev; struct scm_blk_dev *bdev = scmrq->bdev;
...@@ -383,30 +368,25 @@ static void scm_blk_handle_error(struct scm_request *scmrq) ...@@ -383,30 +368,25 @@ static void scm_blk_handle_error(struct scm_request *scmrq)
scm_request_requeue(scmrq); scm_request_requeue(scmrq);
} }
static void scm_blk_tasklet(struct scm_blk_dev *bdev) void scm_blk_irq(struct scm_device *scmdev, void *data, int error)
{ {
struct scm_request *scmrq; struct scm_request *scmrq = data;
unsigned long flags;
spin_lock_irqsave(&bdev->lock, flags);
while (!list_empty(&bdev->finished_requests)) {
scmrq = list_first_entry(&bdev->finished_requests,
struct scm_request, list);
list_del(&scmrq->list);
spin_unlock_irqrestore(&bdev->lock, flags);
if (scmrq->error && scmrq->retries-- > 0) { scmrq->error = error;
if (error) {
__scmrq_log_error(scmrq);
if (scmrq->retries-- > 0) {
scm_blk_handle_error(scmrq); scm_blk_handle_error(scmrq);
return;
/* Request restarted or requeued, handle next. */ }
spin_lock_irqsave(&bdev->lock, flags);
continue;
} }
scm_request_finish(scmrq); scm_request_finish(scmrq);
spin_lock_irqsave(&bdev->lock, flags); }
}
spin_unlock_irqrestore(&bdev->lock, flags); static void scm_blk_request_done(struct request *req)
{
blk_mq_end_request(req, 0);
} }
static const struct block_device_operations scm_blk_devops = { static const struct block_device_operations scm_blk_devops = {
...@@ -415,6 +395,7 @@ static const struct block_device_operations scm_blk_devops = { ...@@ -415,6 +395,7 @@ static const struct block_device_operations scm_blk_devops = {
static const struct blk_mq_ops scm_mq_ops = { static const struct blk_mq_ops scm_mq_ops = {
.queue_rq = scm_blk_request, .queue_rq = scm_blk_request,
.complete = scm_blk_request_done,
}; };
int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
...@@ -434,11 +415,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) ...@@ -434,11 +415,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
bdev->state = SCM_OPER; bdev->state = SCM_OPER;
spin_lock_init(&bdev->rq_lock); spin_lock_init(&bdev->rq_lock);
spin_lock_init(&bdev->lock); spin_lock_init(&bdev->lock);
INIT_LIST_HEAD(&bdev->finished_requests);
atomic_set(&bdev->queued_reqs, 0); atomic_set(&bdev->queued_reqs, 0);
tasklet_init(&bdev->tasklet,
(void (*)(unsigned long)) scm_blk_tasklet,
(unsigned long) bdev);
bdev->tag_set.ops = &scm_mq_ops; bdev->tag_set.ops = &scm_mq_ops;
bdev->tag_set.nr_hw_queues = 1; bdev->tag_set.nr_hw_queues = 1;
...@@ -502,7 +479,6 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) ...@@ -502,7 +479,6 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
void scm_blk_dev_cleanup(struct scm_blk_dev *bdev) void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
{ {
tasklet_kill(&bdev->tasklet);
del_gendisk(bdev->gendisk); del_gendisk(bdev->gendisk);
blk_cleanup_queue(bdev->gendisk->queue); blk_cleanup_queue(bdev->gendisk->queue);
blk_mq_free_tag_set(&bdev->tag_set); blk_mq_free_tag_set(&bdev->tag_set);
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
#define SCM_QUEUE_DELAY 5 #define SCM_QUEUE_DELAY 5
struct scm_blk_dev { struct scm_blk_dev {
struct tasklet_struct tasklet;
struct request_queue *rq; struct request_queue *rq;
struct gendisk *gendisk; struct gendisk *gendisk;
struct blk_mq_tag_set tag_set; struct blk_mq_tag_set tag_set;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment