Commit f344011c authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf_counter: Optimize perf_counter_alloc()'s inherit case

We don't need to add usage counts for swcounter and attr usage
models for inherited counters since the parent counter will
always have one, which suffices to generate the needed output.

This avoids up to 3 global atomic increments per inherited
counter.

LKML-Reference: <new-submission>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent b84fbc9f
...@@ -1508,11 +1508,13 @@ static void free_counter(struct perf_counter *counter) ...@@ -1508,11 +1508,13 @@ static void free_counter(struct perf_counter *counter)
{ {
perf_pending_sync(counter); perf_pending_sync(counter);
atomic_dec(&nr_counters); if (!counter->parent) {
if (counter->attr.mmap) atomic_dec(&nr_counters);
atomic_dec(&nr_mmap_counters); if (counter->attr.mmap)
if (counter->attr.comm) atomic_dec(&nr_mmap_counters);
atomic_dec(&nr_comm_counters); if (counter->attr.comm)
atomic_dec(&nr_comm_counters);
}
if (counter->destroy) if (counter->destroy)
counter->destroy(counter); counter->destroy(counter);
...@@ -3515,6 +3517,8 @@ static void sw_perf_counter_destroy(struct perf_counter *counter) ...@@ -3515,6 +3517,8 @@ static void sw_perf_counter_destroy(struct perf_counter *counter)
{ {
u64 event = counter->attr.config; u64 event = counter->attr.config;
WARN_ON(counter->parent);
atomic_dec(&perf_swcounter_enabled[event]); atomic_dec(&perf_swcounter_enabled[event]);
} }
...@@ -3551,8 +3555,10 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) ...@@ -3551,8 +3555,10 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
case PERF_COUNT_SW_PAGE_FAULTS_MAJ: case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
case PERF_COUNT_SW_CONTEXT_SWITCHES: case PERF_COUNT_SW_CONTEXT_SWITCHES:
case PERF_COUNT_SW_CPU_MIGRATIONS: case PERF_COUNT_SW_CPU_MIGRATIONS:
atomic_inc(&perf_swcounter_enabled[event]); if (!counter->parent) {
counter->destroy = sw_perf_counter_destroy; atomic_inc(&perf_swcounter_enabled[event]);
counter->destroy = sw_perf_counter_destroy;
}
pmu = &perf_ops_generic; pmu = &perf_ops_generic;
break; break;
} }
...@@ -3663,11 +3669,13 @@ perf_counter_alloc(struct perf_counter_attr *attr, ...@@ -3663,11 +3669,13 @@ perf_counter_alloc(struct perf_counter_attr *attr,
counter->pmu = pmu; counter->pmu = pmu;
atomic_inc(&nr_counters); if (!counter->parent) {
if (counter->attr.mmap) atomic_inc(&nr_counters);
atomic_inc(&nr_mmap_counters); if (counter->attr.mmap)
if (counter->attr.comm) atomic_inc(&nr_mmap_counters);
atomic_inc(&nr_comm_counters); if (counter->attr.comm)
atomic_inc(&nr_comm_counters);
}
return counter; return counter;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment