Commit 466d89a6 authored by Keith Busch's avatar Keith Busch Committed by Mike Snitzer

dm: prepare for allocating blk-mq clone requests in target

For blk-mq request-based DM the responsibility of allocating a cloned
request will be transfered from DM core to the target type.

To prepare for conditionally using this new model the original
request's 'special' now points to the dm_rq_target_io because the
clone is allocated later in the block layer rather than in DM core.
Signed-off-by: default avatarKeith Busch <keith.busch@intel.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 2eb6e1e3
...@@ -1016,7 +1016,7 @@ static void end_clone_bio(struct bio *clone, int error) ...@@ -1016,7 +1016,7 @@ static void end_clone_bio(struct bio *clone, int error)
* the md may be freed in dm_put() at the end of this function. * the md may be freed in dm_put() at the end of this function.
* Or do dm_get() before calling this function and dm_put() later. * Or do dm_get() before calling this function and dm_put() later.
*/ */
static void rq_completed(struct mapped_device *md, int rw, int run_queue) static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
{ {
atomic_dec(&md->pending[rw]); atomic_dec(&md->pending[rw]);
...@@ -1050,7 +1050,8 @@ static void free_rq_clone(struct request *clone) ...@@ -1050,7 +1050,8 @@ static void free_rq_clone(struct request *clone)
/* /*
* Complete the clone and the original request. * Complete the clone and the original request.
* Must be called without queue lock. * Must be called without clone's queue lock held,
* see end_clone_request() for more details.
*/ */
static void dm_end_request(struct request *clone, int error) static void dm_end_request(struct request *clone, int error)
{ {
...@@ -1079,7 +1080,8 @@ static void dm_end_request(struct request *clone, int error) ...@@ -1079,7 +1080,8 @@ static void dm_end_request(struct request *clone, int error)
static void dm_unprep_request(struct request *rq) static void dm_unprep_request(struct request *rq)
{ {
struct request *clone = rq->special; struct dm_rq_target_io *tio = rq->special;
struct request *clone = tio->clone;
rq->special = NULL; rq->special = NULL;
rq->cmd_flags &= ~REQ_DONTPREP; rq->cmd_flags &= ~REQ_DONTPREP;
...@@ -1090,12 +1092,10 @@ static void dm_unprep_request(struct request *rq) ...@@ -1090,12 +1092,10 @@ static void dm_unprep_request(struct request *rq)
/* /*
* Requeue the original request of a clone. * Requeue the original request of a clone.
*/ */
static void dm_requeue_unmapped_request(struct request *clone) static void dm_requeue_unmapped_original_request(struct mapped_device *md,
struct request *rq)
{ {
int rw = rq_data_dir(clone); int rw = rq_data_dir(rq);
struct dm_rq_target_io *tio = clone->end_io_data;
struct mapped_device *md = tio->md;
struct request *rq = tio->orig;
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
unsigned long flags; unsigned long flags;
...@@ -1105,7 +1105,14 @@ static void dm_requeue_unmapped_request(struct request *clone) ...@@ -1105,7 +1105,14 @@ static void dm_requeue_unmapped_request(struct request *clone)
blk_requeue_request(q, rq); blk_requeue_request(q, rq);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
rq_completed(md, rw, 0); rq_completed(md, rw, false);
}
static void dm_requeue_unmapped_request(struct request *clone)
{
struct dm_rq_target_io *tio = clone->end_io_data;
dm_requeue_unmapped_original_request(tio->md, tio->orig);
} }
static void __stop_queue(struct request_queue *q) static void __stop_queue(struct request_queue *q)
...@@ -1175,8 +1182,8 @@ static void dm_done(struct request *clone, int error, bool mapped) ...@@ -1175,8 +1182,8 @@ static void dm_done(struct request *clone, int error, bool mapped)
static void dm_softirq_done(struct request *rq) static void dm_softirq_done(struct request *rq)
{ {
bool mapped = true; bool mapped = true;
struct request *clone = rq->completion_data; struct dm_rq_target_io *tio = rq->special;
struct dm_rq_target_io *tio = clone->end_io_data; struct request *clone = tio->clone;
if (rq->cmd_flags & REQ_FAILED) if (rq->cmd_flags & REQ_FAILED)
mapped = false; mapped = false;
...@@ -1188,13 +1195,11 @@ static void dm_softirq_done(struct request *rq) ...@@ -1188,13 +1195,11 @@ static void dm_softirq_done(struct request *rq)
* Complete the clone and the original request with the error status * Complete the clone and the original request with the error status
* through softirq context. * through softirq context.
*/ */
static void dm_complete_request(struct request *clone, int error) static void dm_complete_request(struct request *rq, int error)
{ {
struct dm_rq_target_io *tio = clone->end_io_data; struct dm_rq_target_io *tio = rq->special;
struct request *rq = tio->orig;
tio->error = error; tio->error = error;
rq->completion_data = clone;
blk_complete_request(rq); blk_complete_request(rq);
} }
...@@ -1204,20 +1209,19 @@ static void dm_complete_request(struct request *clone, int error) ...@@ -1204,20 +1209,19 @@ static void dm_complete_request(struct request *clone, int error)
* Target's rq_end_io() function isn't called. * Target's rq_end_io() function isn't called.
* This may be used when the target's map_rq() function fails. * This may be used when the target's map_rq() function fails.
*/ */
static void dm_kill_unmapped_request(struct request *clone, int error) static void dm_kill_unmapped_request(struct request *rq, int error)
{ {
struct dm_rq_target_io *tio = clone->end_io_data;
struct request *rq = tio->orig;
rq->cmd_flags |= REQ_FAILED; rq->cmd_flags |= REQ_FAILED;
dm_complete_request(clone, error); dm_complete_request(rq, error);
} }
/* /*
* Called with the queue lock held * Called with the clone's queue lock held
*/ */
static void end_clone_request(struct request *clone, int error) static void end_clone_request(struct request *clone, int error)
{ {
struct dm_rq_target_io *tio = clone->end_io_data;
/* /*
* For just cleaning up the information of the queue in which * For just cleaning up the information of the queue in which
* the clone was dispatched. * the clone was dispatched.
...@@ -1228,13 +1232,13 @@ static void end_clone_request(struct request *clone, int error) ...@@ -1228,13 +1232,13 @@ static void end_clone_request(struct request *clone, int error)
/* /*
* Actual request completion is done in a softirq context which doesn't * Actual request completion is done in a softirq context which doesn't
* hold the queue lock. Otherwise, deadlock could occur because: * hold the clone's queue lock. Otherwise, deadlock could occur because:
* - another request may be submitted by the upper level driver * - another request may be submitted by the upper level driver
* of the stacking during the completion * of the stacking during the completion
* - the submission which requires queue lock may be done * - the submission which requires queue lock may be done
* against this queue * against this clone's queue
*/ */
dm_complete_request(clone, error); dm_complete_request(tio->orig, error);
} }
/* /*
...@@ -1712,16 +1716,17 @@ static void dm_request(struct request_queue *q, struct bio *bio) ...@@ -1712,16 +1716,17 @@ static void dm_request(struct request_queue *q, struct bio *bio)
_dm_request(q, bio); _dm_request(q, bio);
} }
static void dm_dispatch_request(struct request *rq) static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
{ {
int r; int r;
if (blk_queue_io_stat(rq->q)) if (blk_queue_io_stat(clone->q))
rq->cmd_flags |= REQ_IO_STAT; clone->cmd_flags |= REQ_IO_STAT;
rq->start_time = jiffies; clone->start_time = jiffies;
r = blk_insert_cloned_request(rq->q, rq); r = blk_insert_cloned_request(clone->q, clone);
if (r) if (r)
/* must complete clone in terms of original request */
dm_complete_request(rq, r); dm_complete_request(rq, r);
} }
...@@ -1760,8 +1765,8 @@ static int setup_clone(struct request *clone, struct request *rq, ...@@ -1760,8 +1765,8 @@ static int setup_clone(struct request *clone, struct request *rq,
return 0; return 0;
} }
static struct request *__clone_rq(struct request *rq, struct mapped_device *md, static struct request *clone_rq(struct request *rq, struct mapped_device *md,
struct dm_rq_target_io *tio, gfp_t gfp_mask) struct dm_rq_target_io *tio, gfp_t gfp_mask)
{ {
struct request *clone = alloc_clone_request(md, gfp_mask); struct request *clone = alloc_clone_request(md, gfp_mask);
...@@ -1780,10 +1785,9 @@ static struct request *__clone_rq(struct request *rq, struct mapped_device *md, ...@@ -1780,10 +1785,9 @@ static struct request *__clone_rq(struct request *rq, struct mapped_device *md,
static void map_tio_request(struct kthread_work *work); static void map_tio_request(struct kthread_work *work);
static struct request *clone_rq(struct request *rq, struct mapped_device *md, static struct dm_rq_target_io *prep_tio(struct request *rq,
gfp_t gfp_mask) struct mapped_device *md, gfp_t gfp_mask)
{ {
struct request *clone;
struct dm_rq_target_io *tio; struct dm_rq_target_io *tio;
tio = alloc_rq_tio(md, gfp_mask); tio = alloc_rq_tio(md, gfp_mask);
...@@ -1798,13 +1802,12 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md, ...@@ -1798,13 +1802,12 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md,
memset(&tio->info, 0, sizeof(tio->info)); memset(&tio->info, 0, sizeof(tio->info));
init_kthread_work(&tio->work, map_tio_request); init_kthread_work(&tio->work, map_tio_request);
clone = __clone_rq(rq, md, tio, GFP_ATOMIC); if (!clone_rq(rq, md, tio, gfp_mask)) {
if (!clone) {
free_rq_tio(tio); free_rq_tio(tio);
return NULL; return NULL;
} }
return clone; return tio;
} }
/* /*
...@@ -1813,18 +1816,18 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md, ...@@ -1813,18 +1816,18 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md,
static int dm_prep_fn(struct request_queue *q, struct request *rq) static int dm_prep_fn(struct request_queue *q, struct request *rq)
{ {
struct mapped_device *md = q->queuedata; struct mapped_device *md = q->queuedata;
struct request *clone; struct dm_rq_target_io *tio;
if (unlikely(rq->special)) { if (unlikely(rq->special)) {
DMWARN("Already has something in rq->special."); DMWARN("Already has something in rq->special.");
return BLKPREP_KILL; return BLKPREP_KILL;
} }
clone = clone_rq(rq, md, GFP_ATOMIC); tio = prep_tio(rq, md, GFP_ATOMIC);
if (!clone) if (!tio)
return BLKPREP_DEFER; return BLKPREP_DEFER;
rq->special = clone; rq->special = tio;
rq->cmd_flags |= REQ_DONTPREP; rq->cmd_flags |= REQ_DONTPREP;
return BLKPREP_OK; return BLKPREP_OK;
...@@ -1835,11 +1838,12 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq) ...@@ -1835,11 +1838,12 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq)
* 0 : the request has been processed (not requeued) * 0 : the request has been processed (not requeued)
* !0 : the request has been requeued * !0 : the request has been requeued
*/ */
static int map_request(struct dm_target *ti, struct request *clone, static int map_request(struct dm_target *ti, struct request *rq,
struct mapped_device *md) struct mapped_device *md)
{ {
int r, requeued = 0; int r, requeued = 0;
struct dm_rq_target_io *tio = clone->end_io_data; struct dm_rq_target_io *tio = rq->special;
struct request *clone = tio->clone;
r = ti->type->map_rq(ti, clone, &tio->info); r = ti->type->map_rq(ti, clone, &tio->info);
switch (r) { switch (r) {
...@@ -1849,8 +1853,8 @@ static int map_request(struct dm_target *ti, struct request *clone, ...@@ -1849,8 +1853,8 @@ static int map_request(struct dm_target *ti, struct request *clone,
case DM_MAPIO_REMAPPED: case DM_MAPIO_REMAPPED:
/* The target has remapped the I/O so dispatch it */ /* The target has remapped the I/O so dispatch it */
trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
blk_rq_pos(tio->orig)); blk_rq_pos(rq));
dm_dispatch_request(clone); dm_dispatch_clone_request(clone, rq);
break; break;
case DM_MAPIO_REQUEUE: case DM_MAPIO_REQUEUE:
/* The target wants to requeue the I/O */ /* The target wants to requeue the I/O */
...@@ -1864,7 +1868,7 @@ static int map_request(struct dm_target *ti, struct request *clone, ...@@ -1864,7 +1868,7 @@ static int map_request(struct dm_target *ti, struct request *clone,
} }
/* The target wants to complete the I/O */ /* The target wants to complete the I/O */
dm_kill_unmapped_request(clone, r); dm_kill_unmapped_request(rq, r);
break; break;
} }
...@@ -1875,16 +1879,13 @@ static void map_tio_request(struct kthread_work *work) ...@@ -1875,16 +1879,13 @@ static void map_tio_request(struct kthread_work *work)
{ {
struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
map_request(tio->ti, tio->clone, tio->md); map_request(tio->ti, tio->orig, tio->md);
} }
static struct request *dm_start_request(struct mapped_device *md, struct request *orig) static void dm_start_request(struct mapped_device *md, struct request *orig)
{ {
struct request *clone;
blk_start_request(orig); blk_start_request(orig);
clone = orig->special; atomic_inc(&md->pending[rq_data_dir(orig)]);
atomic_inc(&md->pending[rq_data_dir(clone)]);
/* /*
* Hold the md reference here for the in-flight I/O. * Hold the md reference here for the in-flight I/O.
...@@ -1894,8 +1895,6 @@ static struct request *dm_start_request(struct mapped_device *md, struct request ...@@ -1894,8 +1895,6 @@ static struct request *dm_start_request(struct mapped_device *md, struct request
* See the comment in rq_completed() too. * See the comment in rq_completed() too.
*/ */
dm_get(md); dm_get(md);
return clone;
} }
/* /*
...@@ -1908,7 +1907,7 @@ static void dm_request_fn(struct request_queue *q) ...@@ -1908,7 +1907,7 @@ static void dm_request_fn(struct request_queue *q)
int srcu_idx; int srcu_idx;
struct dm_table *map = dm_get_live_table(md, &srcu_idx); struct dm_table *map = dm_get_live_table(md, &srcu_idx);
struct dm_target *ti; struct dm_target *ti;
struct request *rq, *clone; struct request *rq;
struct dm_rq_target_io *tio; struct dm_rq_target_io *tio;
sector_t pos; sector_t pos;
...@@ -1931,19 +1930,19 @@ static void dm_request_fn(struct request_queue *q) ...@@ -1931,19 +1930,19 @@ static void dm_request_fn(struct request_queue *q)
ti = dm_table_find_target(map, pos); ti = dm_table_find_target(map, pos);
if (!dm_target_is_valid(ti)) { if (!dm_target_is_valid(ti)) {
/* /*
* Must perform setup, that dm_done() requires, * Must perform setup, that rq_completed() requires,
* before calling dm_kill_unmapped_request * before calling dm_kill_unmapped_request
*/ */
DMERR_LIMIT("request attempted access beyond the end of device"); DMERR_LIMIT("request attempted access beyond the end of device");
clone = dm_start_request(md, rq); dm_start_request(md, rq);
dm_kill_unmapped_request(clone, -EIO); dm_kill_unmapped_request(rq, -EIO);
continue; continue;
} }
if (ti->type->busy && ti->type->busy(ti)) if (ti->type->busy && ti->type->busy(ti))
goto delay_and_out; goto delay_and_out;
clone = dm_start_request(md, rq); dm_start_request(md, rq);
tio = rq->special; tio = rq->special;
/* Establish tio->ti before queuing work (map_tio_request) */ /* Establish tio->ti before queuing work (map_tio_request) */
...@@ -2240,16 +2239,15 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t) ...@@ -2240,16 +2239,15 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
bioset_free(md->bs); bioset_free(md->bs);
md->bs = p->bs; md->bs = p->bs;
p->bs = NULL; p->bs = NULL;
} else if (dm_table_get_type(t) == DM_TYPE_REQUEST_BASED) {
/*
* There's no need to reload with request-based dm
* because the size of front_pad doesn't change.
* Note for future: If you are to reload bioset,
* prep-ed requests in the queue may refer
* to bio from the old bioset, so you must walk
* through the queue to unprep.
*/
} }
/*
* There's no need to reload with request-based dm
* because the size of front_pad doesn't change.
* Note for future: If you are to reload bioset,
* prep-ed requests in the queue may refer
* to bio from the old bioset, so you must walk
* through the queue to unprep.
*/
goto out; goto out;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment