Commit aa2bc1ad authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf: Don't use -ENOSPC for out of PMU resources

People (Linus) objected to using -ENOSPC to signal not having enough
resources on the PMU to satisfy the request. Use -EINVAL.
Requested-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Daney <david.daney@cavium.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/n/tip-xv8geaz2zpbjhlx0svmpp28n@git.kernel.org
[ merged to newer kernel, fixed up MIPS impact ]
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 1d5f003f
......@@ -347,15 +347,15 @@ validate_group(struct perf_event *event)
memset(&fake_pmu, 0, sizeof(fake_pmu));
if (!validate_event(&fake_pmu, leader))
return -ENOSPC;
return -EINVAL;
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
if (!validate_event(&fake_pmu, sibling))
return -ENOSPC;
return -EINVAL;
}
if (!validate_event(&fake_pmu, event))
return -ENOSPC;
return -EINVAL;
return 0;
}
......
......@@ -623,7 +623,7 @@ static int mipspmu_event_init(struct perf_event *event)
if (!atomic_inc_not_zero(&active_events)) {
if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
atomic_dec(&active_events);
return -ENOSPC;
return -EINVAL;
}
mutex_lock(&pmu_reserve_mutex);
......@@ -732,15 +732,15 @@ static int validate_group(struct perf_event *event)
memset(&fake_cpuc, 0, sizeof(fake_cpuc));
if (!validate_event(&fake_cpuc, leader))
return -ENOSPC;
return -EINVAL;
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
if (!validate_event(&fake_cpuc, sibling))
return -ENOSPC;
return -EINVAL;
}
if (!validate_event(&fake_cpuc, event))
return -ENOSPC;
return -EINVAL;
return 0;
}
......
......@@ -588,7 +588,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
}
}
return num ? -ENOSPC : 0;
return num ? -EINVAL : 0;
}
/*
......@@ -607,7 +607,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
if (is_x86_event(leader)) {
if (n >= max_count)
return -ENOSPC;
return -EINVAL;
cpuc->event_list[n] = leader;
n++;
}
......@@ -620,7 +620,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
continue;
if (n >= max_count)
return -ENOSPC;
return -EINVAL;
cpuc->event_list[n] = event;
n++;
......@@ -1316,7 +1316,7 @@ static int validate_event(struct perf_event *event)
c = x86_pmu.get_event_constraints(fake_cpuc, event);
if (!c || !c->weight)
ret = -ENOSPC;
ret = -EINVAL;
if (x86_pmu.put_event_constraints)
x86_pmu.put_event_constraints(fake_cpuc, event);
......@@ -1341,7 +1341,7 @@ static int validate_group(struct perf_event *event)
{
struct perf_event *leader = event->group_leader;
struct cpu_hw_events *fake_cpuc;
int ret = -ENOSPC, n;
int ret = -EINVAL, n;
fake_cpuc = allocate_fake_cpuc();
if (IS_ERR(fake_cpuc))
......
......@@ -1268,7 +1268,7 @@ static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign
}
done:
return num ? -ENOSPC : 0;
return num ? -EINVAL : 0;
}
static __initconst const struct x86_pmu p4_pmu = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment