Commit 91cc8fbc authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Richard Weinberger

ubi: block: set BLK_MQ_F_BLOCKING

Set BLK_MQ_F_BLOCKING so that the block layer always calls ->queue_rq
from process context and drop the driver internal workqueue.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarRichard Weinberger <richard@nod.at>
parent 3432e574
...@@ -35,7 +35,6 @@ ...@@ -35,7 +35,6 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/mtd/ubi.h> #include <linux/mtd/ubi.h>
#include <linux/workqueue.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/blk-mq.h> #include <linux/blk-mq.h>
#include <linux/hdreg.h> #include <linux/hdreg.h>
...@@ -62,7 +61,6 @@ struct ubiblock_param { ...@@ -62,7 +61,6 @@ struct ubiblock_param {
}; };
struct ubiblock_pdu { struct ubiblock_pdu {
struct work_struct work;
struct ubi_sgl usgl; struct ubi_sgl usgl;
}; };
...@@ -82,8 +80,6 @@ struct ubiblock { ...@@ -82,8 +80,6 @@ struct ubiblock {
struct gendisk *gd; struct gendisk *gd;
struct request_queue *rq; struct request_queue *rq;
struct workqueue_struct *wq;
struct mutex dev_mutex; struct mutex dev_mutex;
struct list_head list; struct list_head list;
struct blk_mq_tag_set tag_set; struct blk_mq_tag_set tag_set;
...@@ -181,20 +177,29 @@ static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id) ...@@ -181,20 +177,29 @@ static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
return NULL; return NULL;
} }
static int ubiblock_read(struct ubiblock_pdu *pdu) static blk_status_t ubiblock_read(struct request *req)
{ {
int ret, leb, offset, bytes_left, to_read; struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
u64 pos;
struct request *req = blk_mq_rq_from_pdu(pdu);
struct ubiblock *dev = req->q->queuedata; struct ubiblock *dev = req->q->queuedata;
u64 pos = blk_rq_pos(req) << 9;
int to_read = blk_rq_bytes(req);
int bytes_left = to_read;
/* Get LEB:offset address to read from */
int offset = do_div(pos, dev->leb_size);
int leb = pos;
struct req_iterator iter;
struct bio_vec bvec;
int ret;
to_read = blk_rq_bytes(req); blk_mq_start_request(req);
pos = blk_rq_pos(req) << 9;
/* Get LEB:offset address to read from */ /*
offset = do_div(pos, dev->leb_size); * It is safe to ignore the return value of blk_rq_map_sg() because
leb = pos; * the number of sg entries is limited to UBI_MAX_SG_COUNT
bytes_left = to_read; * and ubi_read_sg() will check that limit.
*/
ubi_sgl_init(&pdu->usgl);
blk_rq_map_sg(req->q, req, pdu->usgl.sg);
while (bytes_left) { while (bytes_left) {
/* /*
...@@ -206,14 +211,17 @@ static int ubiblock_read(struct ubiblock_pdu *pdu) ...@@ -206,14 +211,17 @@ static int ubiblock_read(struct ubiblock_pdu *pdu)
ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read); ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read);
if (ret < 0) if (ret < 0)
return ret; break;
bytes_left -= to_read; bytes_left -= to_read;
to_read = bytes_left; to_read = bytes_left;
leb += 1; leb += 1;
offset = 0; offset = 0;
} }
return 0;
rq_for_each_segment(bvec, req, iter)
flush_dcache_page(bvec.bv_page);
return errno_to_blk_status(ret);
} }
static int ubiblock_open(struct block_device *bdev, fmode_t mode) static int ubiblock_open(struct block_device *bdev, fmode_t mode)
...@@ -289,47 +297,15 @@ static const struct block_device_operations ubiblock_ops = { ...@@ -289,47 +297,15 @@ static const struct block_device_operations ubiblock_ops = {
.getgeo = ubiblock_getgeo, .getgeo = ubiblock_getgeo,
}; };
static void ubiblock_do_work(struct work_struct *work)
{
int ret;
struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work);
struct request *req = blk_mq_rq_from_pdu(pdu);
struct req_iterator iter;
struct bio_vec bvec;
blk_mq_start_request(req);
/*
* It is safe to ignore the return value of blk_rq_map_sg() because
* the number of sg entries is limited to UBI_MAX_SG_COUNT
* and ubi_read_sg() will check that limit.
*/
blk_rq_map_sg(req->q, req, pdu->usgl.sg);
ret = ubiblock_read(pdu);
rq_for_each_segment(bvec, req, iter)
flush_dcache_page(bvec.bv_page);
blk_mq_end_request(req, errno_to_blk_status(ret));
}
static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx, static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd) const struct blk_mq_queue_data *bd)
{ {
struct request *req = bd->rq; switch (req_op(bd->rq)) {
struct ubiblock *dev = hctx->queue->queuedata;
struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
switch (req_op(req)) {
case REQ_OP_READ: case REQ_OP_READ:
ubi_sgl_init(&pdu->usgl); return ubiblock_read(bd->rq);
queue_work(dev->wq, &pdu->work);
return BLK_STS_OK;
default: default:
return BLK_STS_IOERR; return BLK_STS_IOERR;
} }
} }
static int ubiblock_init_request(struct blk_mq_tag_set *set, static int ubiblock_init_request(struct blk_mq_tag_set *set,
...@@ -339,8 +315,6 @@ static int ubiblock_init_request(struct blk_mq_tag_set *set, ...@@ -339,8 +315,6 @@ static int ubiblock_init_request(struct blk_mq_tag_set *set,
struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req); struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT); sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT);
INIT_WORK(&pdu->work, ubiblock_do_work);
return 0; return 0;
} }
...@@ -404,7 +378,7 @@ int ubiblock_create(struct ubi_volume_info *vi) ...@@ -404,7 +378,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
dev->tag_set.ops = &ubiblock_mq_ops; dev->tag_set.ops = &ubiblock_mq_ops;
dev->tag_set.queue_depth = 64; dev->tag_set.queue_depth = 64;
dev->tag_set.numa_node = NUMA_NO_NODE; dev->tag_set.numa_node = NUMA_NO_NODE;
dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu); dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
dev->tag_set.driver_data = dev; dev->tag_set.driver_data = dev;
dev->tag_set.nr_hw_queues = 1; dev->tag_set.nr_hw_queues = 1;
...@@ -442,31 +416,18 @@ int ubiblock_create(struct ubi_volume_info *vi) ...@@ -442,31 +416,18 @@ int ubiblock_create(struct ubi_volume_info *vi)
dev->rq = gd->queue; dev->rq = gd->queue;
blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT); blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
/*
* Create one workqueue per volume (per registered block device).
* Remember workqueues are cheap, they're not threads.
*/
dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
if (!dev->wq) {
ret = -ENOMEM;
goto out_remove_minor;
}
list_add_tail(&dev->list, &ubiblock_devices); list_add_tail(&dev->list, &ubiblock_devices);
/* Must be the last step: anyone can call file ops from now on */ /* Must be the last step: anyone can call file ops from now on */
ret = add_disk(dev->gd); ret = add_disk(dev->gd);
if (ret) if (ret)
goto out_destroy_wq; goto out_remove_minor;
dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)", dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
dev->ubi_num, dev->vol_id, vi->name); dev->ubi_num, dev->vol_id, vi->name);
mutex_unlock(&devices_mutex); mutex_unlock(&devices_mutex);
return 0; return 0;
out_destroy_wq:
list_del(&dev->list);
destroy_workqueue(dev->wq);
out_remove_minor: out_remove_minor:
idr_remove(&ubiblock_minor_idr, gd->first_minor); idr_remove(&ubiblock_minor_idr, gd->first_minor);
out_cleanup_disk: out_cleanup_disk:
...@@ -485,8 +446,6 @@ static void ubiblock_cleanup(struct ubiblock *dev) ...@@ -485,8 +446,6 @@ static void ubiblock_cleanup(struct ubiblock *dev)
{ {
/* Stop new requests to arrive */ /* Stop new requests to arrive */
del_gendisk(dev->gd); del_gendisk(dev->gd);
/* Flush pending work */
destroy_workqueue(dev->wq);
/* Finally destroy the blk queue */ /* Finally destroy the blk queue */
dev_info(disk_to_dev(dev->gd), "released"); dev_info(disk_to_dev(dev->gd), "released");
put_disk(dev->gd); put_disk(dev->gd);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment