Commit ef710e10 authored by KOSAKI Motohiro's avatar KOSAKI Motohiro Committed by Steven Rostedt

tracing: Shrink max latency ringbuffer if unnecessary

Documentation/trace/ftrace.txt says

  buffer_size_kb:

        This sets or displays the number of kilobytes each CPU
        buffer can hold. The tracer buffers are the same size
        for each CPU. The displayed number is the size of the
        CPU buffer and not total size of all buffers. The
        trace buffers are allocated in pages (blocks of memory
        that the kernel uses for allocation, usually 4 KB in size).
        If the last page allocated has room for more bytes
        than requested, the rest of the page will be used,
        making the actual allocation bigger than requested.
        ( Note, the size may not be a multiple of the page size
          due to buffer management overhead. )

        This can only be updated when the current_tracer
        is set to "nop".

But it's incorrect. currently total memory consumption is
'buffer_size_kb x CPUs x 2'.

Why two times difference is there? because ftrace implicitly allocate
the buffer for max latency too.

That makes sad result when admin want to use large buffer. (If admin
want full logging and makes detail analysis). example, If admin
have 24 CPUs machine and write 200MB to buffer_size_kb, the system
consume ~10GB memory (200MB x 24 x 2). umm.. 5GB memory waste is
usually unacceptable.

Fortunatelly, almost all users don't use max latency feature.
The max latency buffer can be disabled easily.

This patch shrink buffer size of the max latency buffer if
unnecessary.
Signed-off-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
LKML-Reference: <20100701104554.DA2D.A69D9226@jp.fujitsu.com>
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent bc289ae9
...@@ -660,6 +660,10 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) ...@@ -660,6 +660,10 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
return; return;
WARN_ON_ONCE(!irqs_disabled()); WARN_ON_ONCE(!irqs_disabled());
if (!current_trace->use_max_tr) {
WARN_ON_ONCE(1);
return;
}
arch_spin_lock(&ftrace_max_lock); arch_spin_lock(&ftrace_max_lock);
tr->buffer = max_tr.buffer; tr->buffer = max_tr.buffer;
...@@ -686,6 +690,11 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) ...@@ -686,6 +690,11 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
return; return;
WARN_ON_ONCE(!irqs_disabled()); WARN_ON_ONCE(!irqs_disabled());
if (!current_trace->use_max_tr) {
WARN_ON_ONCE(1);
return;
}
arch_spin_lock(&ftrace_max_lock); arch_spin_lock(&ftrace_max_lock);
ftrace_disable_cpu(); ftrace_disable_cpu();
...@@ -2801,6 +2810,9 @@ static int tracing_resize_ring_buffer(unsigned long size) ...@@ -2801,6 +2810,9 @@ static int tracing_resize_ring_buffer(unsigned long size)
if (ret < 0) if (ret < 0)
return ret; return ret;
if (!current_trace->use_max_tr)
goto out;
ret = ring_buffer_resize(max_tr.buffer, size); ret = ring_buffer_resize(max_tr.buffer, size);
if (ret < 0) { if (ret < 0) {
int r; int r;
...@@ -2828,11 +2840,14 @@ static int tracing_resize_ring_buffer(unsigned long size) ...@@ -2828,11 +2840,14 @@ static int tracing_resize_ring_buffer(unsigned long size)
return ret; return ret;
} }
max_tr.entries = size;
out:
global_trace.entries = size; global_trace.entries = size;
return ret; return ret;
} }
/** /**
* tracing_update_buffers - used by tracing facility to expand ring buffers * tracing_update_buffers - used by tracing facility to expand ring buffers
* *
...@@ -2893,12 +2908,26 @@ static int tracing_set_tracer(const char *buf) ...@@ -2893,12 +2908,26 @@ static int tracing_set_tracer(const char *buf)
trace_branch_disable(); trace_branch_disable();
if (current_trace && current_trace->reset) if (current_trace && current_trace->reset)
current_trace->reset(tr); current_trace->reset(tr);
if (current_trace && current_trace->use_max_tr) {
/*
* We don't free the ring buffer. instead, resize it because
* The max_tr ring buffer has some state (e.g. ring->clock) and
* we want preserve it.
*/
ring_buffer_resize(max_tr.buffer, 1);
max_tr.entries = 1;
}
destroy_trace_option_files(topts); destroy_trace_option_files(topts);
current_trace = t; current_trace = t;
topts = create_trace_option_files(current_trace); topts = create_trace_option_files(current_trace);
if (current_trace->use_max_tr) {
ret = ring_buffer_resize(max_tr.buffer, global_trace.entries);
if (ret < 0)
goto out;
max_tr.entries = global_trace.entries;
}
if (t->init) { if (t->init) {
ret = tracer_init(t, tr); ret = tracer_init(t, tr);
...@@ -3480,7 +3509,6 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, ...@@ -3480,7 +3509,6 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
} }
tracing_start(); tracing_start();
max_tr.entries = global_trace.entries;
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
return cnt; return cnt;
...@@ -4578,16 +4606,14 @@ __init static int tracer_alloc_buffers(void) ...@@ -4578,16 +4606,14 @@ __init static int tracer_alloc_buffers(void)
#ifdef CONFIG_TRACER_MAX_TRACE #ifdef CONFIG_TRACER_MAX_TRACE
max_tr.buffer = ring_buffer_alloc(ring_buf_size, max_tr.buffer = ring_buffer_alloc(1, TRACE_BUFFER_FLAGS);
TRACE_BUFFER_FLAGS);
if (!max_tr.buffer) { if (!max_tr.buffer) {
printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
WARN_ON(1); WARN_ON(1);
ring_buffer_free(global_trace.buffer); ring_buffer_free(global_trace.buffer);
goto out_free_cpumask; goto out_free_cpumask;
} }
max_tr.entries = ring_buffer_size(max_tr.buffer); max_tr.entries = 1;
WARN_ON(max_tr.entries != global_trace.entries);
#endif #endif
/* Allocate the first page for all buffers */ /* Allocate the first page for all buffers */
......
...@@ -276,6 +276,7 @@ struct tracer { ...@@ -276,6 +276,7 @@ struct tracer {
struct tracer *next; struct tracer *next;
int print_max; int print_max;
struct tracer_flags *flags; struct tracer_flags *flags;
int use_max_tr;
}; };
......
...@@ -649,6 +649,7 @@ static struct tracer irqsoff_tracer __read_mostly = ...@@ -649,6 +649,7 @@ static struct tracer irqsoff_tracer __read_mostly =
#endif #endif
.open = irqsoff_trace_open, .open = irqsoff_trace_open,
.close = irqsoff_trace_close, .close = irqsoff_trace_close,
.use_max_tr = 1,
}; };
# define register_irqsoff(trace) register_tracer(&trace) # define register_irqsoff(trace) register_tracer(&trace)
#else #else
...@@ -681,6 +682,7 @@ static struct tracer preemptoff_tracer __read_mostly = ...@@ -681,6 +682,7 @@ static struct tracer preemptoff_tracer __read_mostly =
#endif #endif
.open = irqsoff_trace_open, .open = irqsoff_trace_open,
.close = irqsoff_trace_close, .close = irqsoff_trace_close,
.use_max_tr = 1,
}; };
# define register_preemptoff(trace) register_tracer(&trace) # define register_preemptoff(trace) register_tracer(&trace)
#else #else
...@@ -715,6 +717,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly = ...@@ -715,6 +717,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
#endif #endif
.open = irqsoff_trace_open, .open = irqsoff_trace_open,
.close = irqsoff_trace_close, .close = irqsoff_trace_close,
.use_max_tr = 1,
}; };
# define register_preemptirqsoff(trace) register_tracer(&trace) # define register_preemptirqsoff(trace) register_tracer(&trace)
......
...@@ -382,6 +382,7 @@ static struct tracer wakeup_tracer __read_mostly = ...@@ -382,6 +382,7 @@ static struct tracer wakeup_tracer __read_mostly =
#ifdef CONFIG_FTRACE_SELFTEST #ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_wakeup, .selftest = trace_selftest_startup_wakeup,
#endif #endif
.use_max_tr = 1,
}; };
static struct tracer wakeup_rt_tracer __read_mostly = static struct tracer wakeup_rt_tracer __read_mostly =
...@@ -396,6 +397,7 @@ static struct tracer wakeup_rt_tracer __read_mostly = ...@@ -396,6 +397,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
#ifdef CONFIG_FTRACE_SELFTEST #ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_wakeup, .selftest = trace_selftest_startup_wakeup,
#endif #endif
.use_max_tr = 1,
}; };
__init static int init_wakeup_tracer(void) __init static int init_wakeup_tracer(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment