Commit 553552ce authored by Steven Rostedt's avatar Steven Rostedt Committed by Steven Rostedt

tracing: Combine event filter_active and enable into single flags field

The filter_active and enable both use an int (4 bytes each) to
set a single flag. We can save 4 bytes per event by combining the
two into a single integer.

   text	   data	    bss	    dec	    hex	filename
4913961	1088356	 861512	6863829	 68bbd5	vmlinux.orig
4894944	1018052	 861512	6774508	 675eec	vmlinux.id
4894871	1012292	 861512	6768675	 674823	vmlinux.flags

This gives us another 5K in savings.

The modification of both the enable and filter fields are done
under the event_mutex, so it is still safe to combine the two.

Note: Although Mathieu gave his Acked-by, he would like it documented
 that the reads of flags are not protected by the mutex. The way the
 code works, these reads will not break anything, but will have a
 residual effect. Since this behavior is the same even before this
 patch, describing this situation is left to another patch, as this
 patch does not change the behavior, but just brought it to Mathieu's
 attention.

v2: Updated the event trace self test to for this change.
Acked-by: default avatarMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Acked-by: default avatarMasami Hiramatsu <mhiramat@redhat.com>
Acked-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent 32c0edae
...@@ -143,6 +143,16 @@ struct ftrace_event_class { ...@@ -143,6 +143,16 @@ struct ftrace_event_class {
int (*raw_init)(struct ftrace_event_call *); int (*raw_init)(struct ftrace_event_call *);
}; };
enum {
TRACE_EVENT_FL_ENABLED_BIT,
TRACE_EVENT_FL_FILTERED_BIT,
};
enum {
TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT),
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
};
struct ftrace_event_call { struct ftrace_event_call {
struct list_head list; struct list_head list;
struct ftrace_event_class *class; struct ftrace_event_class *class;
...@@ -154,8 +164,15 @@ struct ftrace_event_call { ...@@ -154,8 +164,15 @@ struct ftrace_event_call {
void *mod; void *mod;
void *data; void *data;
int enabled; /*
int filter_active; * 32 bit flags:
* bit 1: enabled
* bit 2: filter_active
*
* Must hold event_mutex to change.
*/
unsigned int flags;
int perf_refcount; int perf_refcount;
}; };
......
...@@ -802,7 +802,7 @@ filter_check_discard(struct ftrace_event_call *call, void *rec, ...@@ -802,7 +802,7 @@ filter_check_discard(struct ftrace_event_call *call, void *rec,
struct ring_buffer *buffer, struct ring_buffer *buffer,
struct ring_buffer_event *event) struct ring_buffer_event *event)
{ {
if (unlikely(call->filter_active) && if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
!filter_match_preds(call->filter, rec)) { !filter_match_preds(call->filter, rec)) {
ring_buffer_discard_commit(buffer, event); ring_buffer_discard_commit(buffer, event);
return 1; return 1;
......
...@@ -137,8 +137,8 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call, ...@@ -137,8 +137,8 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call,
switch (enable) { switch (enable) {
case 0: case 0:
if (call->enabled) { if (call->flags & TRACE_EVENT_FL_ENABLED) {
call->enabled = 0; call->flags &= ~TRACE_EVENT_FL_ENABLED;
tracing_stop_cmdline_record(); tracing_stop_cmdline_record();
if (call->class->reg) if (call->class->reg)
call->class->reg(call, TRACE_REG_UNREGISTER); call->class->reg(call, TRACE_REG_UNREGISTER);
...@@ -149,7 +149,7 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call, ...@@ -149,7 +149,7 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call,
} }
break; break;
case 1: case 1:
if (!call->enabled) { if (!(call->flags & TRACE_EVENT_FL_ENABLED)) {
tracing_start_cmdline_record(); tracing_start_cmdline_record();
if (call->class->reg) if (call->class->reg)
ret = call->class->reg(call, TRACE_REG_REGISTER); ret = call->class->reg(call, TRACE_REG_REGISTER);
...@@ -163,7 +163,7 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call, ...@@ -163,7 +163,7 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call,
"%s\n", call->name); "%s\n", call->name);
break; break;
} }
call->enabled = 1; call->flags |= TRACE_EVENT_FL_ENABLED;
} }
break; break;
} }
...@@ -352,7 +352,7 @@ s_next(struct seq_file *m, void *v, loff_t *pos) ...@@ -352,7 +352,7 @@ s_next(struct seq_file *m, void *v, loff_t *pos)
(*pos)++; (*pos)++;
list_for_each_entry_continue(call, &ftrace_events, list) { list_for_each_entry_continue(call, &ftrace_events, list) {
if (call->enabled) if (call->flags & TRACE_EVENT_FL_ENABLED)
return call; return call;
} }
...@@ -411,7 +411,7 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, ...@@ -411,7 +411,7 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
struct ftrace_event_call *call = filp->private_data; struct ftrace_event_call *call = filp->private_data;
char *buf; char *buf;
if (call->enabled) if (call->flags & TRACE_EVENT_FL_ENABLED)
buf = "1\n"; buf = "1\n";
else else
buf = "0\n"; buf = "0\n";
...@@ -486,7 +486,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, ...@@ -486,7 +486,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
* or if all events or cleared, or if we have * or if all events or cleared, or if we have
* a mixture. * a mixture.
*/ */
set |= (1 << !!call->enabled); set |= (1 << !!(call->flags & TRACE_EVENT_FL_ENABLED));
/* /*
* If we have a mixture, no need to look further. * If we have a mixture, no need to look further.
...@@ -1447,7 +1447,7 @@ static __init void event_trace_self_tests(void) ...@@ -1447,7 +1447,7 @@ static __init void event_trace_self_tests(void)
* If an event is already enabled, someone is using * If an event is already enabled, someone is using
* it and the self test should not be on. * it and the self test should not be on.
*/ */
if (call->enabled) { if (call->flags & TRACE_EVENT_FL_ENABLED) {
pr_warning("Enabled event during self test!\n"); pr_warning("Enabled event during self test!\n");
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
continue; continue;
......
...@@ -547,7 +547,7 @@ static void filter_disable_preds(struct ftrace_event_call *call) ...@@ -547,7 +547,7 @@ static void filter_disable_preds(struct ftrace_event_call *call)
struct event_filter *filter = call->filter; struct event_filter *filter = call->filter;
int i; int i;
call->filter_active = 0; call->flags &= ~TRACE_EVENT_FL_FILTERED;
filter->n_preds = 0; filter->n_preds = 0;
for (i = 0; i < MAX_FILTER_PRED; i++) for (i = 0; i < MAX_FILTER_PRED; i++)
...@@ -574,7 +574,7 @@ void destroy_preds(struct ftrace_event_call *call) ...@@ -574,7 +574,7 @@ void destroy_preds(struct ftrace_event_call *call)
{ {
__free_preds(call->filter); __free_preds(call->filter);
call->filter = NULL; call->filter = NULL;
call->filter_active = 0; call->flags &= ~TRACE_EVENT_FL_FILTERED;
} }
static struct event_filter *__alloc_preds(void) static struct event_filter *__alloc_preds(void)
...@@ -613,7 +613,7 @@ static int init_preds(struct ftrace_event_call *call) ...@@ -613,7 +613,7 @@ static int init_preds(struct ftrace_event_call *call)
if (call->filter) if (call->filter)
return 0; return 0;
call->filter_active = 0; call->flags &= ~TRACE_EVENT_FL_FILTERED;
call->filter = __alloc_preds(); call->filter = __alloc_preds();
if (IS_ERR(call->filter)) if (IS_ERR(call->filter))
return PTR_ERR(call->filter); return PTR_ERR(call->filter);
...@@ -1268,7 +1268,7 @@ static int replace_system_preds(struct event_subsystem *system, ...@@ -1268,7 +1268,7 @@ static int replace_system_preds(struct event_subsystem *system,
if (err) if (err)
filter_disable_preds(call); filter_disable_preds(call);
else { else {
call->filter_active = 1; call->flags |= TRACE_EVENT_FL_FILTERED;
replace_filter_string(filter, filter_string); replace_filter_string(filter, filter_string);
} }
fail = false; fail = false;
...@@ -1317,7 +1317,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string) ...@@ -1317,7 +1317,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
if (err) if (err)
append_filter_err(ps, call->filter); append_filter_err(ps, call->filter);
else else
call->filter_active = 1; call->flags |= TRACE_EVENT_FL_FILTERED;
out: out:
filter_opstack_clear(ps); filter_opstack_clear(ps);
postfix_clear(ps); postfix_clear(ps);
......
...@@ -1382,7 +1382,7 @@ static int register_probe_event(struct trace_probe *tp) ...@@ -1382,7 +1382,7 @@ static int register_probe_event(struct trace_probe *tp)
kfree(call->print_fmt); kfree(call->print_fmt);
return -ENODEV; return -ENODEV;
} }
call->enabled = 0; call->flags = 0;
call->class->reg = kprobe_register; call->class->reg = kprobe_register;
call->data = tp; call->data = tp;
ret = trace_add_event_call(call); ret = trace_add_event_call(call);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment