Commit 161c85fa authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf/core: Cleanup the rb-tree code

Trivial comment and code fixups..
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarMark Rutland <mark.rutland@arm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Carrillo-Cisneros <davidcc@google.com>
Cc: Dmitri Prokhorov <Dmitry.Prohorov@intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Valery Cherepennikov <valery.cherepennikov@intel.com>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 8e1a2031
...@@ -1461,9 +1461,9 @@ static enum event_type_t get_event_type(struct perf_event *event) ...@@ -1461,9 +1461,9 @@ static enum event_type_t get_event_type(struct perf_event *event)
} }
/* /*
* Helper function to initialize group leader event; * Helper function to initialize event group nodes.
*/ */
void init_event_group(struct perf_event *event) static void init_event_group(struct perf_event *event)
{ {
RB_CLEAR_NODE(&event->group_node); RB_CLEAR_NODE(&event->group_node);
event->group_index = 0; event->group_index = 0;
...@@ -1471,7 +1471,7 @@ void init_event_group(struct perf_event *event) ...@@ -1471,7 +1471,7 @@ void init_event_group(struct perf_event *event)
/* /*
* Extract pinned or flexible groups from the context * Extract pinned or flexible groups from the context
* based on event attrs bits; * based on event attrs bits.
*/ */
static struct perf_event_groups * static struct perf_event_groups *
get_event_groups(struct perf_event *event, struct perf_event_context *ctx) get_event_groups(struct perf_event *event, struct perf_event_context *ctx)
...@@ -1483,9 +1483,9 @@ get_event_groups(struct perf_event *event, struct perf_event_context *ctx) ...@@ -1483,9 +1483,9 @@ get_event_groups(struct perf_event *event, struct perf_event_context *ctx)
} }
/* /*
* Helper function to initializes perf event groups object; * Helper function to initializes perf_event_group trees.
*/ */
void perf_event_groups_init(struct perf_event_groups *groups) static void perf_event_groups_init(struct perf_event_groups *groups)
{ {
groups->tree = RB_ROOT; groups->tree = RB_ROOT;
groups->index = 0; groups->index = 0;
...@@ -1493,35 +1493,34 @@ void perf_event_groups_init(struct perf_event_groups *groups) ...@@ -1493,35 +1493,34 @@ void perf_event_groups_init(struct perf_event_groups *groups)
/* /*
* Compare function for event groups; * Compare function for event groups;
* Implements complex key that first sorts by CPU and then by *
* virtual index which provides ordering when rotating * Implements complex key that first sorts by CPU and then by virtual index
* groups for the same CPU; * which provides ordering when rotating groups for the same CPU.
*/ */
int perf_event_groups_less(struct perf_event *left, struct perf_event *right) static bool
perf_event_groups_less(struct perf_event *left, struct perf_event *right)
{ {
if (left->cpu < right->cpu) { if (left->cpu < right->cpu)
return 1; return true;
} else if (left->cpu > right->cpu) { if (left->cpu > right->cpu)
return 0; return false;
} else {
if (left->group_index < right->group_index) { if (left->group_index < right->group_index)
return 1; return true;
} else if(left->group_index > right->group_index) { if (left->group_index > right->group_index)
return 0; return false;
} else {
return 0; return false;
}
}
} }
/* /*
* Insert a group into a tree using event->cpu as a key. If event->cpu node * Insert @event into @groups' tree; using {@event->cpu, ++@groups->index} for
* is already attached to the tree then the event is added to the attached * key (see perf_event_groups_less). This places it last inside the CPU
* group's group_list list. * subtree.
*/ */
static void static void
perf_event_groups_insert(struct perf_event_groups *groups, perf_event_groups_insert(struct perf_event_groups *groups,
struct perf_event *event) struct perf_event *event)
{ {
struct perf_event *node_event; struct perf_event *node_event;
struct rb_node *parent; struct rb_node *parent;
...@@ -1534,8 +1533,7 @@ perf_event_groups_insert(struct perf_event_groups *groups, ...@@ -1534,8 +1533,7 @@ perf_event_groups_insert(struct perf_event_groups *groups,
while (*node) { while (*node) {
parent = *node; parent = *node;
node_event = container_of(*node, node_event = container_of(*node, struct perf_event, group_node);
struct perf_event, group_node);
if (perf_event_groups_less(event, node_event)) if (perf_event_groups_less(event, node_event))
node = &parent->rb_left; node = &parent->rb_left;
...@@ -1548,8 +1546,7 @@ perf_event_groups_insert(struct perf_event_groups *groups, ...@@ -1548,8 +1546,7 @@ perf_event_groups_insert(struct perf_event_groups *groups,
} }
/* /*
* Helper function to insert event into the pinned or * Helper function to insert event into the pinned or flexible groups.
* flexible groups;
*/ */
static void static void
add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx) add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx)
...@@ -1561,22 +1558,21 @@ add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx) ...@@ -1561,22 +1558,21 @@ add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx)
} }
/* /*
* Delete a group from a tree. If the group is directly attached to the tree * Delete a group from a tree.
* it also detaches all groups on the group's group_list list.
*/ */
static void static void
perf_event_groups_delete(struct perf_event_groups *groups, perf_event_groups_delete(struct perf_event_groups *groups,
struct perf_event *event) struct perf_event *event)
{ {
if (!RB_EMPTY_NODE(&event->group_node) && WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) ||
!RB_EMPTY_ROOT(&groups->tree)) RB_EMPTY_ROOT(&groups->tree));
rb_erase(&event->group_node, &groups->tree);
rb_erase(&event->group_node, &groups->tree);
init_event_group(event); init_event_group(event);
} }
/* /*
* Helper function to delete event from its groups; * Helper function to delete event from its groups.
*/ */
static void static void
del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx) del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx)
...@@ -1588,7 +1584,7 @@ del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx) ...@@ -1588,7 +1584,7 @@ del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx)
} }
/* /*
* Get a group by a cpu key from groups tree with the least group_index; * Get the leftmost event in the @cpu subtree.
*/ */
static struct perf_event * static struct perf_event *
perf_event_groups_first(struct perf_event_groups *groups, int cpu) perf_event_groups_first(struct perf_event_groups *groups, int cpu)
...@@ -1597,8 +1593,7 @@ perf_event_groups_first(struct perf_event_groups *groups, int cpu) ...@@ -1597,8 +1593,7 @@ perf_event_groups_first(struct perf_event_groups *groups, int cpu)
struct rb_node *node = groups->tree.rb_node; struct rb_node *node = groups->tree.rb_node;
while (node) { while (node) {
node_event = container_of(node, node_event = container_of(node, struct perf_event, group_node);
struct perf_event, group_node);
if (cpu < node_event->cpu) { if (cpu < node_event->cpu) {
node = node->rb_left; node = node->rb_left;
...@@ -1614,13 +1609,14 @@ perf_event_groups_first(struct perf_event_groups *groups, int cpu) ...@@ -1614,13 +1609,14 @@ perf_event_groups_first(struct perf_event_groups *groups, int cpu)
} }
/* /*
* Find group list by a cpu key and rotate it. * Rotate the @cpu subtree.
*
* Re-insert the leftmost event at the tail of the subtree.
*/ */
static void static void
perf_event_groups_rotate(struct perf_event_groups *groups, int cpu) perf_event_groups_rotate(struct perf_event_groups *groups, int cpu)
{ {
struct perf_event *event = struct perf_event *event = perf_event_groups_first(groups, cpu);
perf_event_groups_first(groups, cpu);
if (event) { if (event) {
perf_event_groups_delete(groups, event); perf_event_groups_delete(groups, event);
...@@ -1629,7 +1625,7 @@ perf_event_groups_rotate(struct perf_event_groups *groups, int cpu) ...@@ -1629,7 +1625,7 @@ perf_event_groups_rotate(struct perf_event_groups *groups, int cpu)
} }
/* /*
* Iterate event groups thru the whole tree. * Iterate through the whole groups tree.
*/ */
#define perf_event_groups_for_each(event, groups, node) \ #define perf_event_groups_for_each(event, groups, node) \
for (event = rb_entry_safe(rb_first(&((groups)->tree)), \ for (event = rb_entry_safe(rb_first(&((groups)->tree)), \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment