Commit 3a366e61 authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

block: add missing block_bio_complete() tracepoint

bio completion didn't kick block_bio_complete TP.  Only dm was
explicitly triggering the TP on IO completion.  This makes
block_bio_complete TP useless for tracers which want to know about
bios, and all other bio based drivers skip generating blktrace
completion events.

This patch makes all bio completions via bio_endio() generate
block_bio_complete TP.

* Explicit trace_block_bio_complete() invocation removed from dm and
  the trace point is unexported.

* @rq dropped from trace_block_bio_complete().  bios may fly around
  w/o queue associated.  Verifying and accessing the assocaited queue
  belongs to TP probes.

* blktrace now gets both request and bio completions.  Make it ignore
  bio completions if request completion path is happening.

This makes all bio based drivers generate blktrace completion events
properly and makes the block_bio_complete TP actually useful.

v2: With this change, block_bio_complete TP could be invoked on sg
    commands which have bio's with %NULL bi_bdev.  Update TP
    assignment code to check whether bio->bi_bdev is %NULL before
    dereferencing.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Original-patch-by: default avatarNamhyung Kim <namhyung@gmail.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Alasdair Kergon <agk@redhat.com>
Cc: dm-devel@redhat.com
Cc: Neil Brown <neilb@suse.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent ac9a1974
......@@ -39,7 +39,6 @@
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
DEFINE_IDA(blk_queue_ida);
......
......@@ -627,7 +627,6 @@ static void dec_pending(struct dm_io *io, int error)
queue_io(md, bio);
} else {
/* done with normal IO or empty flush */
trace_block_bio_complete(md->queue, bio, io_error);
bio_endio(bio, io_error);
}
}
......
......@@ -184,8 +184,6 @@ static void return_io(struct bio *return_bi)
return_bi = bi->bi_next;
bi->bi_next = NULL;
bi->bi_size = 0;
trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
bi, 0);
bio_endio(bi, 0);
bi = return_bi;
}
......@@ -3917,8 +3915,6 @@ static void raid5_align_endio(struct bio *bi, int error)
rdev_dec_pending(rdev, conf->mddev);
if (!error && uptodate) {
trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
raid_bi, 0);
bio_endio(raid_bi, 0);
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_stripe);
......@@ -4377,8 +4373,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
if ( rw == WRITE )
md_write_end(mddev);
trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
bi, 0);
bio_endio(bi, 0);
}
}
......@@ -4755,11 +4749,8 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
handled++;
}
remaining = raid5_dec_bi_active_stripes(raid_bio);
if (remaining == 0) {
trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
raid_bio, 0);
if (remaining == 0)
bio_endio(raid_bio, 0);
}
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_stripe);
return handled;
......
......@@ -1428,6 +1428,8 @@ void bio_endio(struct bio *bio, int error)
else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
error = -EIO;
trace_block_bio_complete(bio, error);
if (bio->bi_end_io)
bio->bi_end_io(bio, error);
}
......
......@@ -12,6 +12,7 @@
struct blk_trace {
int trace_state;
bool rq_based;
struct rchan *rchan;
unsigned long __percpu *sequence;
unsigned char __percpu *msg_data;
......
......@@ -206,7 +206,6 @@ TRACE_EVENT(block_bio_bounce,
/**
* block_bio_complete - completed all work on the block operation
* @q: queue holding the block operation
* @bio: block operation completed
* @error: io error value
*
......@@ -215,9 +214,9 @@ TRACE_EVENT(block_bio_bounce,
*/
TRACE_EVENT(block_bio_complete,
TP_PROTO(struct request_queue *q, struct bio *bio, int error),
TP_PROTO(struct bio *bio, int error),
TP_ARGS(q, bio, error),
TP_ARGS(bio, error),
TP_STRUCT__entry(
__field( dev_t, dev )
......@@ -228,7 +227,8 @@ TRACE_EVENT(block_bio_complete,
),
TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev;
__entry->dev = bio->bi_bdev ?
bio->bi_bdev->bd_dev : 0;
__entry->sector = bio->bi_sector;
__entry->nr_sector = bio->bi_size >> 9;
__entry->error = error;
......
......@@ -739,6 +739,12 @@ static void blk_add_trace_rq_complete(void *ignore,
struct request_queue *q,
struct request *rq)
{
struct blk_trace *bt = q->blk_trace;
/* if control ever passes through here, it's a request based driver */
if (unlikely(bt && !bt->rq_based))
bt->rq_based = true;
blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
}
......@@ -774,10 +780,24 @@ static void blk_add_trace_bio_bounce(void *ignore,
blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
}
static void blk_add_trace_bio_complete(void *ignore,
struct request_queue *q, struct bio *bio,
int error)
static void blk_add_trace_bio_complete(void *ignore, struct bio *bio, int error)
{
struct request_queue *q;
struct blk_trace *bt;
if (!bio->bi_bdev)
return;
q = bdev_get_queue(bio->bi_bdev);
bt = q->blk_trace;
/*
* Request based drivers will generate both rq and bio completions.
* Ignore bio ones.
*/
if (likely(!bt) || bt->rq_based)
return;
blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment