Commit 3d325bf0 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'perf/urgent' into perf/core, to pick up fixes before applying new changes

Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents f1d800bf d7a702f0
...@@ -2758,7 +2758,7 @@ static int intel_pmu_cpu_prepare(int cpu) ...@@ -2758,7 +2758,7 @@ static int intel_pmu_cpu_prepare(int cpu)
if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) { if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
cpuc->shared_regs = allocate_shared_regs(cpu); cpuc->shared_regs = allocate_shared_regs(cpu);
if (!cpuc->shared_regs) if (!cpuc->shared_regs)
return NOTIFY_BAD; goto err;
} }
if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
...@@ -2766,18 +2766,27 @@ static int intel_pmu_cpu_prepare(int cpu) ...@@ -2766,18 +2766,27 @@ static int intel_pmu_cpu_prepare(int cpu)
cpuc->constraint_list = kzalloc(sz, GFP_KERNEL); cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
if (!cpuc->constraint_list) if (!cpuc->constraint_list)
return NOTIFY_BAD; goto err_shared_regs;
cpuc->excl_cntrs = allocate_excl_cntrs(cpu); cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
if (!cpuc->excl_cntrs) { if (!cpuc->excl_cntrs)
kfree(cpuc->constraint_list); goto err_constraint_list;
kfree(cpuc->shared_regs);
return NOTIFY_BAD;
}
cpuc->excl_thread_id = 0; cpuc->excl_thread_id = 0;
} }
return NOTIFY_OK; return NOTIFY_OK;
err_constraint_list:
kfree(cpuc->constraint_list);
cpuc->constraint_list = NULL;
err_shared_regs:
kfree(cpuc->shared_regs);
cpuc->shared_regs = NULL;
err:
return NOTIFY_BAD;
} }
static void intel_pmu_cpu_starting(int cpu) static void intel_pmu_cpu_starting(int cpu)
......
...@@ -1255,7 +1255,7 @@ static inline void cqm_pick_event_reader(int cpu) ...@@ -1255,7 +1255,7 @@ static inline void cqm_pick_event_reader(int cpu)
cpumask_set_cpu(cpu, &cqm_cpumask); cpumask_set_cpu(cpu, &cqm_cpumask);
} }
static void intel_cqm_cpu_prepare(unsigned int cpu) static void intel_cqm_cpu_starting(unsigned int cpu)
{ {
struct intel_pqr_state *state = &per_cpu(pqr_state, cpu); struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
struct cpuinfo_x86 *c = &cpu_data(cpu); struct cpuinfo_x86 *c = &cpu_data(cpu);
...@@ -1296,13 +1296,11 @@ static int intel_cqm_cpu_notifier(struct notifier_block *nb, ...@@ -1296,13 +1296,11 @@ static int intel_cqm_cpu_notifier(struct notifier_block *nb,
unsigned int cpu = (unsigned long)hcpu; unsigned int cpu = (unsigned long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) { switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
intel_cqm_cpu_prepare(cpu);
break;
case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE:
intel_cqm_cpu_exit(cpu); intel_cqm_cpu_exit(cpu);
break; break;
case CPU_STARTING: case CPU_STARTING:
intel_cqm_cpu_starting(cpu);
cqm_pick_event_reader(cpu); cqm_pick_event_reader(cpu);
break; break;
} }
...@@ -1373,7 +1371,7 @@ static int __init intel_cqm_init(void) ...@@ -1373,7 +1371,7 @@ static int __init intel_cqm_init(void)
goto out; goto out;
for_each_online_cpu(i) { for_each_online_cpu(i) {
intel_cqm_cpu_prepare(i); intel_cqm_cpu_starting(i);
cqm_pick_event_reader(i); cqm_pick_event_reader(i);
} }
......
...@@ -3972,28 +3972,21 @@ static void perf_event_for_each(struct perf_event *event, ...@@ -3972,28 +3972,21 @@ static void perf_event_for_each(struct perf_event *event,
perf_event_for_each_child(sibling, func); perf_event_for_each_child(sibling, func);
} }
static int perf_event_period(struct perf_event *event, u64 __user *arg) struct period_event {
{ struct perf_event *event;
struct perf_event_context *ctx = event->ctx;
int ret = 0, active;
u64 value; u64 value;
};
if (!is_sampling_event(event)) static int __perf_event_period(void *info)
return -EINVAL; {
struct period_event *pe = info;
if (copy_from_user(&value, arg, sizeof(value))) struct perf_event *event = pe->event;
return -EFAULT; struct perf_event_context *ctx = event->ctx;
u64 value = pe->value;
if (!value) bool active;
return -EINVAL;
raw_spin_lock_irq(&ctx->lock); raw_spin_lock(&ctx->lock);
if (event->attr.freq) { if (event->attr.freq) {
if (value > sysctl_perf_event_sample_rate) {
ret = -EINVAL;
goto unlock;
}
event->attr.sample_freq = value; event->attr.sample_freq = value;
} else { } else {
event->attr.sample_period = value; event->attr.sample_period = value;
...@@ -4012,11 +4005,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg) ...@@ -4012,11 +4005,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
event->pmu->start(event, PERF_EF_RELOAD); event->pmu->start(event, PERF_EF_RELOAD);
perf_pmu_enable(ctx->pmu); perf_pmu_enable(ctx->pmu);
} }
raw_spin_unlock(&ctx->lock);
unlock: return 0;
}
static int perf_event_period(struct perf_event *event, u64 __user *arg)
{
struct period_event pe = { .event = event, };
struct perf_event_context *ctx = event->ctx;
struct task_struct *task;
u64 value;
if (!is_sampling_event(event))
return -EINVAL;
if (copy_from_user(&value, arg, sizeof(value)))
return -EFAULT;
if (!value)
return -EINVAL;
if (event->attr.freq && value > sysctl_perf_event_sample_rate)
return -EINVAL;
task = ctx->task;
pe.value = value;
if (!task) {
cpu_function_call(event->cpu, __perf_event_period, &pe);
return 0;
}
retry:
if (!task_function_call(task, __perf_event_period, &pe))
return 0;
raw_spin_lock_irq(&ctx->lock);
if (ctx->is_active) {
raw_spin_unlock_irq(&ctx->lock);
task = ctx->task;
goto retry;
}
__perf_event_period(&pe);
raw_spin_unlock_irq(&ctx->lock); raw_spin_unlock_irq(&ctx->lock);
return ret; return 0;
} }
static const struct file_operations perf_fops; static const struct file_operations perf_fops;
...@@ -4754,12 +4789,20 @@ static const struct file_operations perf_fops = { ...@@ -4754,12 +4789,20 @@ static const struct file_operations perf_fops = {
* to user-space before waking everybody up. * to user-space before waking everybody up.
*/ */
static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
{
/* only the parent has fasync state */
if (event->parent)
event = event->parent;
return &event->fasync;
}
void perf_event_wakeup(struct perf_event *event) void perf_event_wakeup(struct perf_event *event)
{ {
ring_buffer_wakeup(event); ring_buffer_wakeup(event);
if (event->pending_kill) { if (event->pending_kill) {
kill_fasync(&event->fasync, SIGIO, event->pending_kill); kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
event->pending_kill = 0; event->pending_kill = 0;
} }
} }
...@@ -6221,7 +6264,7 @@ static int __perf_event_overflow(struct perf_event *event, ...@@ -6221,7 +6264,7 @@ static int __perf_event_overflow(struct perf_event *event,
else else
perf_event_output(event, data, regs); perf_event_output(event, data, regs);
if (event->fasync && event->pending_kill) { if (*perf_event_fasync(event) && event->pending_kill) {
event->pending_wakeup = 1; event->pending_wakeup = 1;
irq_work_queue(&event->pending); irq_work_queue(&event->pending);
} }
......
...@@ -559,11 +559,13 @@ static void __rb_free_aux(struct ring_buffer *rb) ...@@ -559,11 +559,13 @@ static void __rb_free_aux(struct ring_buffer *rb)
rb->aux_priv = NULL; rb->aux_priv = NULL;
} }
for (pg = 0; pg < rb->aux_nr_pages; pg++) if (rb->aux_nr_pages) {
rb_free_aux_page(rb, pg); for (pg = 0; pg < rb->aux_nr_pages; pg++)
rb_free_aux_page(rb, pg);
kfree(rb->aux_pages); kfree(rb->aux_pages);
rb->aux_nr_pages = 0; rb->aux_nr_pages = 0;
}
} }
void rb_free_aux(struct ring_buffer *rb) void rb_free_aux(struct ring_buffer *rb)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment