Commit 131d08e1 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: split the blk-mq case from elevator_init

There is almost no shared logic, which leads to a very confusing code
flow.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDamien Le Moal <damien.lemoal@wdc.com>
Tested-by: default avatarDamien Le Moal <damien.lemoal@wdc.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent acddf3b3
...@@ -2573,7 +2573,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, ...@@ -2573,7 +2573,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
if (!(set->flags & BLK_MQ_F_NO_SCHED)) { if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
int ret; int ret;
ret = elevator_init(q); ret = elevator_init_mq(q);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
} }
......
...@@ -232,6 +232,7 @@ static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq ...@@ -232,6 +232,7 @@ static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq
} }
int elevator_init(struct request_queue *); int elevator_init(struct request_queue *);
int elevator_init_mq(struct request_queue *q);
void elevator_exit(struct request_queue *, struct elevator_queue *); void elevator_exit(struct request_queue *, struct elevator_queue *);
int elv_register_queue(struct request_queue *q); int elv_register_queue(struct request_queue *q);
void elv_unregister_queue(struct request_queue *q); void elv_unregister_queue(struct request_queue *q);
......
...@@ -199,6 +199,11 @@ static void elevator_release(struct kobject *kobj) ...@@ -199,6 +199,11 @@ static void elevator_release(struct kobject *kobj)
kfree(e); kfree(e);
} }
/*
* Use the default elevator specified by config boot param for non-mq devices,
* or by config option. Don't try to load modules as we could be running off
* async and request_module() isn't allowed from async.
*/
int elevator_init(struct request_queue *q) int elevator_init(struct request_queue *q)
{ {
struct elevator_type *e = NULL; struct elevator_type *e = NULL;
...@@ -212,46 +217,22 @@ int elevator_init(struct request_queue *q) ...@@ -212,46 +217,22 @@ int elevator_init(struct request_queue *q)
if (unlikely(q->elevator)) if (unlikely(q->elevator))
goto out_unlock; goto out_unlock;
/* if (*chosen_elevator) {
* Use the default elevator specified by config boot param for
* non-mq devices, or by config option. Don't try to load modules
* as we could be running off async and request_module() isn't
* allowed from async.
*/
if (!q->mq_ops && *chosen_elevator) {
e = elevator_get(q, chosen_elevator, false); e = elevator_get(q, chosen_elevator, false);
if (!e) if (!e)
printk(KERN_ERR "I/O scheduler %s not found\n", printk(KERN_ERR "I/O scheduler %s not found\n",
chosen_elevator); chosen_elevator);
} }
if (!e)
e = elevator_get(q, CONFIG_DEFAULT_IOSCHED, false);
if (!e) { if (!e) {
/* printk(KERN_ERR
* For blk-mq devices, we default to using mq-deadline, "Default I/O scheduler not found. Using noop.\n");
* if available, for single queue devices. If deadline e = elevator_get(q, "noop", false);
* isn't available OR we have multiple queues, default
* to "none".
*/
if (q->mq_ops) {
if (q->nr_hw_queues == 1)
e = elevator_get(q, "mq-deadline", false);
if (!e)
goto out_unlock;
} else
e = elevator_get(q, CONFIG_DEFAULT_IOSCHED, false);
if (!e) {
printk(KERN_ERR
"Default I/O scheduler not found. " \
"Using noop.\n");
e = elevator_get(q, "noop", false);
}
} }
if (e->uses_mq) err = e->ops.sq.elevator_init_fn(q, e);
err = blk_mq_init_sched(q, e);
else
err = e->ops.sq.elevator_init_fn(q, e);
if (err) if (err)
elevator_put(e); elevator_put(e);
out_unlock: out_unlock:
...@@ -992,6 +973,40 @@ static int elevator_switch_mq(struct request_queue *q, ...@@ -992,6 +973,40 @@ static int elevator_switch_mq(struct request_queue *q,
return ret; return ret;
} }
/*
* For blk-mq devices, we default to using mq-deadline, if available, for single
* queue devices. If deadline isn't available OR we have multiple queues,
* default to "none".
*/
int elevator_init_mq(struct request_queue *q)
{
struct elevator_type *e;
int err = 0;
if (q->nr_hw_queues != 1)
return 0;
/*
* q->sysfs_lock must be held to provide mutual exclusion between
* elevator_switch() and here.
*/
mutex_lock(&q->sysfs_lock);
if (unlikely(q->elevator))
goto out_unlock;
e = elevator_get(q, "mq-deadline", false);
if (!e)
goto out_unlock;
err = blk_mq_init_sched(q, e);
if (err)
elevator_put(e);
out_unlock:
mutex_unlock(&q->sysfs_lock);
return err;
}
/* /*
* switch to new_e io scheduler. be careful not to introduce deadlocks - * switch to new_e io scheduler. be careful not to introduce deadlocks -
* we don't free the old io scheduler, before we have allocated what we * we don't free the old io scheduler, before we have allocated what we
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment