Commit b73c7d02 authored by Douglas Anderson's avatar Douglas Anderson Committed by Greg Kroah-Hartman

tracing: kdb: Fix ftdump to not sleep

[ Upstream commit 31b265b3 ]

As reported back in 2016-11 [1], the "ftdump" kdb command triggers a
BUG for "sleeping function called from invalid context".

kdb's "ftdump" command wants to call ring_buffer_read_prepare() in
atomic context.  A very simple solution for this is to add allocation
flags to ring_buffer_read_prepare() so kdb can call it without
triggering the allocation error.  This patch does that.

Note that in the original email thread about this, it was suggested
that perhaps the solution for kdb was to either preallocate the buffer
ahead of time or create our own iterator.  I'm hoping that this
alternative of adding allocation flags to ring_buffer_read_prepare()
can be considered since it means I don't need to duplicate more of the
core trace code into "trace_kdb.c" (for either creating my own
iterator or re-preparing a ring allocator whose memory was already
allocated).

NOTE: another option for kdb is to actually figure out how to make it
reuse the existing ftrace_dump() function and totally eliminate the
duplication.  This sounds very appealing and actually works (the "sr
z" command can be seen to properly dump the ftrace buffer).  The
downside here is that ftrace_dump() fully consumes the trace buffer.
Unless that is changed I'd rather not use it because it means "ftdump
| grep xyz" won't be very useful to search the ftrace buffer since it
will throw away the whole trace on the first grep.  A future patch to
dump only the last few lines of the buffer will also be hard to
implement.

[1] https://lkml.kernel.org/r/20161117191605.GA21459@google.com

Link: http://lkml.kernel.org/r/20190308193205.213659-1-dianders@chromium.orgReported-by: default avatarBrian Norris <briannorris@chromium.org>
Signed-off-by: default avatarDouglas Anderson <dianders@chromium.org>
Signed-off-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
parent d7391962
...@@ -128,7 +128,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, ...@@ -128,7 +128,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events); unsigned long *lost_events);
struct ring_buffer_iter * struct ring_buffer_iter *
ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu); ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags);
void ring_buffer_read_prepare_sync(void); void ring_buffer_read_prepare_sync(void);
void ring_buffer_read_start(struct ring_buffer_iter *iter); void ring_buffer_read_start(struct ring_buffer_iter *iter);
void ring_buffer_read_finish(struct ring_buffer_iter *iter); void ring_buffer_read_finish(struct ring_buffer_iter *iter);
......
...@@ -4141,6 +4141,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume); ...@@ -4141,6 +4141,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
* ring_buffer_read_prepare - Prepare for a non consuming read of the buffer * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
* @buffer: The ring buffer to read from * @buffer: The ring buffer to read from
* @cpu: The cpu buffer to iterate over * @cpu: The cpu buffer to iterate over
* @flags: gfp flags to use for memory allocation
* *
* This performs the initial preparations necessary to iterate * This performs the initial preparations necessary to iterate
* through the buffer. Memory is allocated, buffer recording * through the buffer. Memory is allocated, buffer recording
...@@ -4158,7 +4159,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume); ...@@ -4158,7 +4159,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
* This overall must be paired with ring_buffer_read_finish. * This overall must be paired with ring_buffer_read_finish.
*/ */
struct ring_buffer_iter * struct ring_buffer_iter *
ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu) ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_iter *iter; struct ring_buffer_iter *iter;
...@@ -4166,7 +4167,7 @@ ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu) ...@@ -4166,7 +4167,7 @@ ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
if (!cpumask_test_cpu(cpu, buffer->cpumask)) if (!cpumask_test_cpu(cpu, buffer->cpumask))
return NULL; return NULL;
iter = kmalloc(sizeof(*iter), GFP_KERNEL); iter = kmalloc(sizeof(*iter), flags);
if (!iter) if (!iter)
return NULL; return NULL;
......
...@@ -3903,7 +3903,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot) ...@@ -3903,7 +3903,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
for_each_tracing_cpu(cpu) { for_each_tracing_cpu(cpu) {
iter->buffer_iter[cpu] = iter->buffer_iter[cpu] =
ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu); ring_buffer_read_prepare(iter->trace_buffer->buffer,
cpu, GFP_KERNEL);
} }
ring_buffer_read_prepare_sync(); ring_buffer_read_prepare_sync();
for_each_tracing_cpu(cpu) { for_each_tracing_cpu(cpu) {
...@@ -3913,7 +3914,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot) ...@@ -3913,7 +3914,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
} else { } else {
cpu = iter->cpu_file; cpu = iter->cpu_file;
iter->buffer_iter[cpu] = iter->buffer_iter[cpu] =
ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu); ring_buffer_read_prepare(iter->trace_buffer->buffer,
cpu, GFP_KERNEL);
ring_buffer_read_prepare_sync(); ring_buffer_read_prepare_sync();
ring_buffer_read_start(iter->buffer_iter[cpu]); ring_buffer_read_start(iter->buffer_iter[cpu]);
tracing_iter_reset(iter, cpu); tracing_iter_reset(iter, cpu);
......
...@@ -51,14 +51,16 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file) ...@@ -51,14 +51,16 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
if (cpu_file == RING_BUFFER_ALL_CPUS) { if (cpu_file == RING_BUFFER_ALL_CPUS) {
for_each_tracing_cpu(cpu) { for_each_tracing_cpu(cpu) {
iter.buffer_iter[cpu] = iter.buffer_iter[cpu] =
ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu); ring_buffer_read_prepare(iter.trace_buffer->buffer,
cpu, GFP_ATOMIC);
ring_buffer_read_start(iter.buffer_iter[cpu]); ring_buffer_read_start(iter.buffer_iter[cpu]);
tracing_iter_reset(&iter, cpu); tracing_iter_reset(&iter, cpu);
} }
} else { } else {
iter.cpu_file = cpu_file; iter.cpu_file = cpu_file;
iter.buffer_iter[cpu_file] = iter.buffer_iter[cpu_file] =
ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu_file); ring_buffer_read_prepare(iter.trace_buffer->buffer,
cpu_file, GFP_ATOMIC);
ring_buffer_read_start(iter.buffer_iter[cpu_file]); ring_buffer_read_start(iter.buffer_iter[cpu_file]);
tracing_iter_reset(&iter, cpu_file); tracing_iter_reset(&iter, cpu_file);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment