Commit 94b5eb28 authored by Jens Axboe's avatar Jens Axboe

block: fixup block IO unplug trace call

It was removed with the on-stack plugging, readd it and track the
depth of requests added when flushing the plug.
Signed-off-by: default avatarJens Axboe <jaxboe@fusionio.com>
parent d9c97833
...@@ -2668,12 +2668,19 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) ...@@ -2668,12 +2668,19 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
return !(rqa->q <= rqb->q); return !(rqa->q <= rqb->q);
} }
static void queue_unplugged(struct request_queue *q, unsigned int depth)
{
trace_block_unplug_io(q, depth);
__blk_run_queue(q, false);
}
static void flush_plug_list(struct blk_plug *plug) static void flush_plug_list(struct blk_plug *plug)
{ {
struct request_queue *q; struct request_queue *q;
unsigned long flags; unsigned long flags;
struct request *rq; struct request *rq;
LIST_HEAD(list); LIST_HEAD(list);
unsigned int depth;
BUG_ON(plug->magic != PLUG_MAGIC); BUG_ON(plug->magic != PLUG_MAGIC);
...@@ -2688,6 +2695,7 @@ static void flush_plug_list(struct blk_plug *plug) ...@@ -2688,6 +2695,7 @@ static void flush_plug_list(struct blk_plug *plug)
} }
q = NULL; q = NULL;
depth = 0;
local_irq_save(flags); local_irq_save(flags);
while (!list_empty(&list)) { while (!list_empty(&list)) {
rq = list_entry_rq(list.next); rq = list_entry_rq(list.next);
...@@ -2696,10 +2704,11 @@ static void flush_plug_list(struct blk_plug *plug) ...@@ -2696,10 +2704,11 @@ static void flush_plug_list(struct blk_plug *plug)
BUG_ON(!rq->q); BUG_ON(!rq->q);
if (rq->q != q) { if (rq->q != q) {
if (q) { if (q) {
__blk_run_queue(q, false); queue_unplugged(q, depth);
spin_unlock(q->queue_lock); spin_unlock(q->queue_lock);
} }
q = rq->q; q = rq->q;
depth = 0;
spin_lock(q->queue_lock); spin_lock(q->queue_lock);
} }
rq->cmd_flags &= ~REQ_ON_PLUG; rq->cmd_flags &= ~REQ_ON_PLUG;
...@@ -2711,10 +2720,12 @@ static void flush_plug_list(struct blk_plug *plug) ...@@ -2711,10 +2720,12 @@ static void flush_plug_list(struct blk_plug *plug)
__elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
else else
__elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
depth++;
} }
if (q) { if (q) {
__blk_run_queue(q, false); queue_unplugged(q, depth);
spin_unlock(q->queue_lock); spin_unlock(q->queue_lock);
} }
......
...@@ -401,9 +401,9 @@ TRACE_EVENT(block_plug, ...@@ -401,9 +401,9 @@ TRACE_EVENT(block_plug,
DECLARE_EVENT_CLASS(block_unplug, DECLARE_EVENT_CLASS(block_unplug,
TP_PROTO(struct request_queue *q), TP_PROTO(struct request_queue *q, unsigned int depth),
TP_ARGS(q), TP_ARGS(q, depth),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( int, nr_rq ) __field( int, nr_rq )
...@@ -411,7 +411,7 @@ DECLARE_EVENT_CLASS(block_unplug, ...@@ -411,7 +411,7 @@ DECLARE_EVENT_CLASS(block_unplug,
), ),
TP_fast_assign( TP_fast_assign(
__entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE]; __entry->nr_rq = depth;
memcpy(__entry->comm, current->comm, TASK_COMM_LEN); memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
), ),
...@@ -421,15 +421,16 @@ DECLARE_EVENT_CLASS(block_unplug, ...@@ -421,15 +421,16 @@ DECLARE_EVENT_CLASS(block_unplug,
/** /**
* block_unplug_io - release of operations requests in request queue * block_unplug_io - release of operations requests in request queue
* @q: request queue to unplug * @q: request queue to unplug
* @depth: number of requests just added to the queue
* *
* Unplug request queue @q because device driver is scheduled to work * Unplug request queue @q because device driver is scheduled to work
* on elements in the request queue. * on elements in the request queue.
*/ */
DEFINE_EVENT(block_unplug, block_unplug_io, DEFINE_EVENT(block_unplug, block_unplug_io,
TP_PROTO(struct request_queue *q), TP_PROTO(struct request_queue *q, unsigned int depth),
TP_ARGS(q) TP_ARGS(q, depth)
); );
/** /**
......
...@@ -850,13 +850,13 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q) ...@@ -850,13 +850,13 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q)
__blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
} }
static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q) static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q,
unsigned int depth)
{ {
struct blk_trace *bt = q->blk_trace; struct blk_trace *bt = q->blk_trace;
if (bt) { if (bt) {
unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; __be64 rpdu = cpu_to_be64(depth);
__be64 rpdu = cpu_to_be64(pdu);
__blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0, __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
sizeof(rpdu), &rpdu); sizeof(rpdu), &rpdu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment