Commit 5f3ea37c authored by Arnaldo Carvalho de Melo's avatar Arnaldo Carvalho de Melo Committed by Ingo Molnar

blktrace: port to tracepoints

This was a forward port of work done by Mathieu Desnoyers, I changed it to
encode the 'what' parameter on the tracepoint name, so that one can register
interest in specific events and not on classes of events to then check the
'what' parameter.
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 509dceef
...@@ -47,6 +47,7 @@ config BLK_DEV_IO_TRACE ...@@ -47,6 +47,7 @@ config BLK_DEV_IO_TRACE
depends on SYSFS depends on SYSFS
select RELAY select RELAY
select DEBUG_FS select DEBUG_FS
select TRACEPOINTS
help help
Say Y here if you want to be able to trace the block layer actions Say Y here if you want to be able to trace the block layer actions
on a given queue. Tracing allows you to see any traffic happening on a given queue. Tracing allows you to see any traffic happening
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <linux/task_io_accounting_ops.h> #include <linux/task_io_accounting_ops.h>
#include <linux/blktrace_api.h> #include <linux/blktrace_api.h>
#include <linux/fault-inject.h> #include <linux/fault-inject.h>
#include <trace/block.h>
#include "blk.h" #include "blk.h"
...@@ -205,7 +206,7 @@ void blk_plug_device(struct request_queue *q) ...@@ -205,7 +206,7 @@ void blk_plug_device(struct request_queue *q)
if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) { if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); trace_block_plug(q);
} }
} }
EXPORT_SYMBOL(blk_plug_device); EXPORT_SYMBOL(blk_plug_device);
...@@ -292,9 +293,7 @@ void blk_unplug_work(struct work_struct *work) ...@@ -292,9 +293,7 @@ void blk_unplug_work(struct work_struct *work)
struct request_queue *q = struct request_queue *q =
container_of(work, struct request_queue, unplug_work); container_of(work, struct request_queue, unplug_work);
blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, trace_block_unplug_io(q);
q->rq.count[READ] + q->rq.count[WRITE]);
q->unplug_fn(q); q->unplug_fn(q);
} }
...@@ -302,9 +301,7 @@ void blk_unplug_timeout(unsigned long data) ...@@ -302,9 +301,7 @@ void blk_unplug_timeout(unsigned long data)
{ {
struct request_queue *q = (struct request_queue *)data; struct request_queue *q = (struct request_queue *)data;
blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL, trace_block_unplug_timer(q);
q->rq.count[READ] + q->rq.count[WRITE]);
kblockd_schedule_work(q, &q->unplug_work); kblockd_schedule_work(q, &q->unplug_work);
} }
...@@ -314,9 +311,7 @@ void blk_unplug(struct request_queue *q) ...@@ -314,9 +311,7 @@ void blk_unplug(struct request_queue *q)
* devices don't necessarily have an ->unplug_fn defined * devices don't necessarily have an ->unplug_fn defined
*/ */
if (q->unplug_fn) { if (q->unplug_fn) {
blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, trace_block_unplug_io(q);
q->rq.count[READ] + q->rq.count[WRITE]);
q->unplug_fn(q); q->unplug_fn(q);
} }
} }
...@@ -822,7 +817,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, ...@@ -822,7 +817,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
if (ioc_batching(q, ioc)) if (ioc_batching(q, ioc))
ioc->nr_batch_requests--; ioc->nr_batch_requests--;
blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); trace_block_getrq(q, bio, rw);
out: out:
return rq; return rq;
} }
...@@ -848,7 +843,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, ...@@ -848,7 +843,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
prepare_to_wait_exclusive(&rl->wait[rw], &wait, prepare_to_wait_exclusive(&rl->wait[rw], &wait,
TASK_UNINTERRUPTIBLE); TASK_UNINTERRUPTIBLE);
blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ); trace_block_sleeprq(q, bio, rw);
__generic_unplug_device(q); __generic_unplug_device(q);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
...@@ -928,7 +923,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq) ...@@ -928,7 +923,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
{ {
blk_delete_timer(rq); blk_delete_timer(rq);
blk_clear_rq_complete(rq); blk_clear_rq_complete(rq);
blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); trace_block_rq_requeue(q, rq);
if (blk_rq_tagged(rq)) if (blk_rq_tagged(rq))
blk_queue_end_tag(q, rq); blk_queue_end_tag(q, rq);
...@@ -1167,7 +1162,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) ...@@ -1167,7 +1162,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
if (!ll_back_merge_fn(q, req, bio)) if (!ll_back_merge_fn(q, req, bio))
break; break;
blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); trace_block_bio_backmerge(q, bio);
req->biotail->bi_next = bio; req->biotail->bi_next = bio;
req->biotail = bio; req->biotail = bio;
...@@ -1186,7 +1181,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) ...@@ -1186,7 +1181,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
if (!ll_front_merge_fn(q, req, bio)) if (!ll_front_merge_fn(q, req, bio))
break; break;
blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); trace_block_bio_frontmerge(q, bio);
bio->bi_next = req->bio; bio->bi_next = req->bio;
req->bio = bio; req->bio = bio;
...@@ -1269,7 +1264,7 @@ static inline void blk_partition_remap(struct bio *bio) ...@@ -1269,7 +1264,7 @@ static inline void blk_partition_remap(struct bio *bio)
bio->bi_sector += p->start_sect; bio->bi_sector += p->start_sect;
bio->bi_bdev = bdev->bd_contains; bio->bi_bdev = bdev->bd_contains;
blk_add_trace_remap(bdev_get_queue(bio->bi_bdev), bio, trace_block_remap(bdev_get_queue(bio->bi_bdev), bio,
bdev->bd_dev, bio->bi_sector, bdev->bd_dev, bio->bi_sector,
bio->bi_sector - p->start_sect); bio->bi_sector - p->start_sect);
} }
...@@ -1441,10 +1436,10 @@ static inline void __generic_make_request(struct bio *bio) ...@@ -1441,10 +1436,10 @@ static inline void __generic_make_request(struct bio *bio)
goto end_io; goto end_io;
if (old_sector != -1) if (old_sector != -1)
blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, trace_block_remap(q, bio, old_dev, bio->bi_sector,
old_sector); old_sector);
blk_add_trace_bio(q, bio, BLK_TA_QUEUE); trace_block_bio_queue(q, bio);
old_sector = bio->bi_sector; old_sector = bio->bi_sector;
old_dev = bio->bi_bdev->bd_dev; old_dev = bio->bi_bdev->bd_dev;
...@@ -1656,7 +1651,7 @@ static int __end_that_request_first(struct request *req, int error, ...@@ -1656,7 +1651,7 @@ static int __end_that_request_first(struct request *req, int error,
int total_bytes, bio_nbytes, next_idx = 0; int total_bytes, bio_nbytes, next_idx = 0;
struct bio *bio; struct bio *bio;
blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE); trace_block_rq_complete(req->q, req);
/* /*
* for a REQ_TYPE_BLOCK_PC request, we want to carry any eventual * for a REQ_TYPE_BLOCK_PC request, we want to carry any eventual
......
This diff is collapsed.
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/blktrace_api.h> #include <linux/blktrace_api.h>
#include <trace/block.h>
#include <linux/hash.h> #include <linux/hash.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
...@@ -586,7 +587,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) ...@@ -586,7 +587,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
unsigned ordseq; unsigned ordseq;
int unplug_it = 1; int unplug_it = 1;
blk_add_trace_rq(q, rq, BLK_TA_INSERT); trace_block_rq_insert(q, rq);
rq->q = q; rq->q = q;
...@@ -772,7 +773,7 @@ struct request *elv_next_request(struct request_queue *q) ...@@ -772,7 +773,7 @@ struct request *elv_next_request(struct request_queue *q)
* not be passed by new incoming requests * not be passed by new incoming requests
*/ */
rq->cmd_flags |= REQ_STARTED; rq->cmd_flags |= REQ_STARTED;
blk_add_trace_rq(q, rq, BLK_TA_ISSUE); trace_block_rq_issue(q, rq);
} }
if (!q->boundary_rq || q->boundary_rq == rq) { if (!q->boundary_rq || q->boundary_rq == rq) {
...@@ -921,7 +922,7 @@ void elv_abort_queue(struct request_queue *q) ...@@ -921,7 +922,7 @@ void elv_abort_queue(struct request_queue *q)
while (!list_empty(&q->queue_head)) { while (!list_empty(&q->queue_head)) {
rq = list_entry_rq(q->queue_head.next); rq = list_entry_rq(q->queue_head.next);
rq->cmd_flags |= REQ_QUIET; rq->cmd_flags |= REQ_QUIET;
blk_add_trace_rq(q, rq, BLK_TA_ABORT); trace_block_rq_abort(q, rq);
__blk_end_request(rq, -EIO, blk_rq_bytes(rq)); __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
} }
} }
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/hdreg.h> #include <linux/hdreg.h>
#include <linux/blktrace_api.h> #include <linux/blktrace_api.h>
#include <trace/block.h>
#define DM_MSG_PREFIX "core" #define DM_MSG_PREFIX "core"
...@@ -504,8 +505,7 @@ static void dec_pending(struct dm_io *io, int error) ...@@ -504,8 +505,7 @@ static void dec_pending(struct dm_io *io, int error)
end_io_acct(io); end_io_acct(io);
if (io->error != DM_ENDIO_REQUEUE) { if (io->error != DM_ENDIO_REQUEUE) {
blk_add_trace_bio(io->md->queue, io->bio, trace_block_bio_complete(io->md->queue, io->bio);
BLK_TA_COMPLETE);
bio_endio(io->bio, io->error); bio_endio(io->bio, io->error);
} }
...@@ -598,7 +598,7 @@ static void __map_bio(struct dm_target *ti, struct bio *clone, ...@@ -598,7 +598,7 @@ static void __map_bio(struct dm_target *ti, struct bio *clone,
if (r == DM_MAPIO_REMAPPED) { if (r == DM_MAPIO_REMAPPED) {
/* the bio has been remapped so dispatch it */ /* the bio has been remapped so dispatch it */
blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone, trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
tio->io->bio->bi_bdev->bd_dev, tio->io->bio->bi_bdev->bd_dev,
clone->bi_sector, sector); clone->bi_sector, sector);
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <linux/mempool.h> #include <linux/mempool.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/blktrace_api.h> #include <linux/blktrace_api.h>
#include <trace/block.h>
#include <scsi/sg.h> /* for struct sg_iovec */ #include <scsi/sg.h> /* for struct sg_iovec */
static struct kmem_cache *bio_slab __read_mostly; static struct kmem_cache *bio_slab __read_mostly;
...@@ -1263,7 +1264,7 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors) ...@@ -1263,7 +1264,7 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors)
if (!bp) if (!bp)
return bp; return bp;
blk_add_trace_pdu_int(bdev_get_queue(bi->bi_bdev), BLK_TA_SPLIT, bi, trace_block_split(bdev_get_queue(bi->bi_bdev), bi,
bi->bi_sector + first_sectors); bi->bi_sector + first_sectors);
BUG_ON(bi->bi_vcnt != 1); BUG_ON(bi->bi_vcnt != 1);
......
...@@ -160,7 +160,6 @@ struct blk_trace { ...@@ -160,7 +160,6 @@ struct blk_trace {
extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
extern void blk_trace_shutdown(struct request_queue *); extern void blk_trace_shutdown(struct request_queue *);
extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
extern int do_blk_trace_setup(struct request_queue *q, extern int do_blk_trace_setup(struct request_queue *q,
char *name, dev_t dev, struct blk_user_trace_setup *buts); char *name, dev_t dev, struct blk_user_trace_setup *buts);
extern void __trace_note_message(struct blk_trace *, const char *fmt, ...); extern void __trace_note_message(struct blk_trace *, const char *fmt, ...);
...@@ -186,168 +185,8 @@ extern void __trace_note_message(struct blk_trace *, const char *fmt, ...); ...@@ -186,168 +185,8 @@ extern void __trace_note_message(struct blk_trace *, const char *fmt, ...);
} while (0) } while (0)
#define BLK_TN_MAX_MSG 128 #define BLK_TN_MAX_MSG 128
/** extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
* blk_add_trace_rq - Add a trace for a request oriented action void *data, size_t len);
* @q: queue the io is for
* @rq: the source request
* @what: the action
*
* Description:
* Records an action against a request. Will log the bio offset + size.
*
**/
static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
u32 what)
{
struct blk_trace *bt = q->blk_trace;
int rw = rq->cmd_flags & 0x03;
if (likely(!bt))
return;
if (blk_discard_rq(rq))
rw |= (1 << BIO_RW_DISCARD);
if (blk_pc_request(rq)) {
what |= BLK_TC_ACT(BLK_TC_PC);
__blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd);
} else {
what |= BLK_TC_ACT(BLK_TC_FS);
__blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL);
}
}
/**
* blk_add_trace_bio - Add a trace for a bio oriented action
* @q: queue the io is for
* @bio: the source bio
* @what: the action
*
* Description:
* Records an action against a bio. Will log the bio offset + size.
*
**/
static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
u32 what)
{
struct blk_trace *bt = q->blk_trace;
if (likely(!bt))
return;
__blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
}
/**
* blk_add_trace_generic - Add a trace for a generic action
* @q: queue the io is for
* @bio: the source bio
* @rw: the data direction
* @what: the action
*
* Description:
* Records a simple trace
*
**/
static inline void blk_add_trace_generic(struct request_queue *q,
struct bio *bio, int rw, u32 what)
{
struct blk_trace *bt = q->blk_trace;
if (likely(!bt))
return;
if (bio)
blk_add_trace_bio(q, bio, what);
else
__blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL);
}
/**
* blk_add_trace_pdu_int - Add a trace for a bio with an integer payload
* @q: queue the io is for
* @what: the action
* @bio: the source bio
* @pdu: the integer payload
*
* Description:
* Adds a trace with some integer payload. This might be an unplug
* option given as the action, with the depth at unplug time given
* as the payload
*
**/
static inline void blk_add_trace_pdu_int(struct request_queue *q, u32 what,
struct bio *bio, unsigned int pdu)
{
struct blk_trace *bt = q->blk_trace;
__be64 rpdu = cpu_to_be64(pdu);
if (likely(!bt))
return;
if (bio)
__blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu);
else
__blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
}
/**
* blk_add_trace_remap - Add a trace for a remap operation
* @q: queue the io is for
* @bio: the source bio
* @dev: target device
* @from: source sector
* @to: target sector
*
* Description:
* Device mapper or raid target sometimes need to split a bio because
* it spans a stripe (or similar). Add a trace for that action.
*
**/
static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
dev_t dev, sector_t from, sector_t to)
{
struct blk_trace *bt = q->blk_trace;
struct blk_io_trace_remap r;
if (likely(!bt))
return;
r.device = cpu_to_be32(dev);
r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev);
r.sector = cpu_to_be64(to);
__blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
}
/**
* blk_add_driver_data - Add binary message with driver-specific data
* @q: queue the io is for
* @rq: io request
* @data: driver-specific data
* @len: length of driver-specific data
*
* Description:
* Some drivers might want to write driver-specific data per request.
*
**/
static inline void blk_add_driver_data(struct request_queue *q,
struct request *rq,
void *data, size_t len)
{
struct blk_trace *bt = q->blk_trace;
if (likely(!bt))
return;
if (blk_pc_request(rq))
__blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA,
rq->errors, len, data);
else
__blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
0, BLK_TA_DRV_DATA, rq->errors, len, data);
}
extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
char __user *arg); char __user *arg);
extern int blk_trace_startstop(struct request_queue *q, int start); extern int blk_trace_startstop(struct request_queue *q, int start);
...@@ -356,13 +195,8 @@ extern int blk_trace_remove(struct request_queue *q); ...@@ -356,13 +195,8 @@ extern int blk_trace_remove(struct request_queue *q);
#else /* !CONFIG_BLK_DEV_IO_TRACE */ #else /* !CONFIG_BLK_DEV_IO_TRACE */
#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) #define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
#define blk_trace_shutdown(q) do { } while (0) #define blk_trace_shutdown(q) do { } while (0)
#define blk_add_trace_rq(q, rq, what) do { } while (0)
#define blk_add_trace_bio(q, rq, what) do { } while (0)
#define blk_add_trace_generic(q, rq, rw, what) do { } while (0)
#define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0)
#define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0)
#define blk_add_driver_data(q, rq, data, len) do {} while (0)
#define do_blk_trace_setup(q, name, dev, buts) (-ENOTTY) #define do_blk_trace_setup(q, name, dev, buts) (-ENOTTY)
#define blk_add_driver_data(q, rq, data, len) do {} while (0)
#define blk_trace_setup(q, name, dev, arg) (-ENOTTY) #define blk_trace_setup(q, name, dev, arg) (-ENOTTY)
#define blk_trace_startstop(q, start) (-ENOTTY) #define blk_trace_startstop(q, start) (-ENOTTY)
#define blk_trace_remove(q) (-ENOTTY) #define blk_trace_remove(q) (-ENOTTY)
......
#ifndef _TRACE_BLOCK_H
#define _TRACE_BLOCK_H
#include <linux/blkdev.h>
#include <linux/tracepoint.h>
DEFINE_TRACE(block_rq_abort,
TPPROTO(struct request_queue *q, struct request *rq),
TPARGS(q, rq));
DEFINE_TRACE(block_rq_insert,
TPPROTO(struct request_queue *q, struct request *rq),
TPARGS(q, rq));
DEFINE_TRACE(block_rq_issue,
TPPROTO(struct request_queue *q, struct request *rq),
TPARGS(q, rq));
DEFINE_TRACE(block_rq_requeue,
TPPROTO(struct request_queue *q, struct request *rq),
TPARGS(q, rq));
DEFINE_TRACE(block_rq_complete,
TPPROTO(struct request_queue *q, struct request *rq),
TPARGS(q, rq));
DEFINE_TRACE(block_bio_bounce,
TPPROTO(struct request_queue *q, struct bio *bio),
TPARGS(q, bio));
DEFINE_TRACE(block_bio_complete,
TPPROTO(struct request_queue *q, struct bio *bio),
TPARGS(q, bio));
DEFINE_TRACE(block_bio_backmerge,
TPPROTO(struct request_queue *q, struct bio *bio),
TPARGS(q, bio));
DEFINE_TRACE(block_bio_frontmerge,
TPPROTO(struct request_queue *q, struct bio *bio),
TPARGS(q, bio));
DEFINE_TRACE(block_bio_queue,
TPPROTO(struct request_queue *q, struct bio *bio),
TPARGS(q, bio));
DEFINE_TRACE(block_getrq,
TPPROTO(struct request_queue *q, struct bio *bio, int rw),
TPARGS(q, bio, rw));
DEFINE_TRACE(block_sleeprq,
TPPROTO(struct request_queue *q, struct bio *bio, int rw),
TPARGS(q, bio, rw));
DEFINE_TRACE(block_plug,
TPPROTO(struct request_queue *q),
TPARGS(q));
DEFINE_TRACE(block_unplug_timer,
TPPROTO(struct request_queue *q),
TPARGS(q));
DEFINE_TRACE(block_unplug_io,
TPPROTO(struct request_queue *q),
TPARGS(q));
DEFINE_TRACE(block_split,
TPPROTO(struct request_queue *q, struct bio *bio, unsigned int pdu),
TPARGS(q, bio, pdu));
DEFINE_TRACE(block_remap,
TPPROTO(struct request_queue *q, struct bio *bio, dev_t dev,
sector_t from, sector_t to),
TPARGS(q, bio, dev, from, to));
#endif
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/hash.h> #include <linux/hash.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/blktrace_api.h> #include <linux/blktrace_api.h>
#include <trace/block.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#define POOL_SIZE 64 #define POOL_SIZE 64
...@@ -222,7 +223,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, ...@@ -222,7 +223,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
if (!bio) if (!bio)
return; return;
blk_add_trace_bio(q, *bio_orig, BLK_TA_BOUNCE); trace_block_bio_bounce(q, *bio_orig);
/* /*
* at least one page was bounced, fill in possible non-highmem * at least one page was bounced, fill in possible non-highmem
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment