Commit 56053170 authored by Frederic Weisbecker's avatar Frederic Weisbecker

hw-breakpoints: Fix task-bound breakpoint slot allocation

Whatever the context nature of a breakpoint, we always perform the
following constraint checks before allocating it a slot:

- Check the number of pinned breakpoint bound the concerned cpus
- Check the max number of task-bound breakpoints that are belonging
  to a task.
- Add both and see if we have a reamining slot for the new breakpoint

This is the right thing to do when we are about to register a cpu-only
bound breakpoint. But not if we are dealing with a task bound
breakpoint. What we want in this case is:

- Check the number of pinned breakpoint bound the concerned cpus
- Check the number of breakpoints that already belong to the task
  in which the breakpoint to register is bound to.
- Add both

This fixes a regression that makes the "firefox -g" command fail to
register breakpoints once we deal with a secondary thread.
Reported-by: default avatarWalt <w41ter@gmail.com>
Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Cc: Prasad <prasad@linux.vnet.ibm.com>
parent ed872d09
...@@ -83,15 +83,51 @@ static unsigned int max_task_bp_pinned(int cpu) ...@@ -83,15 +83,51 @@ static unsigned int max_task_bp_pinned(int cpu)
return 0; return 0;
} }
static int task_bp_pinned(struct task_struct *tsk)
{
struct perf_event_context *ctx = tsk->perf_event_ctxp;
struct list_head *list;
struct perf_event *bp;
unsigned long flags;
int count = 0;
if (WARN_ONCE(!ctx, "No perf context for this task"))
return 0;
list = &ctx->event_list;
spin_lock_irqsave(&ctx->lock, flags);
/*
* The current breakpoint counter is not included in the list
* at the open() callback time
*/
list_for_each_entry(bp, list, event_entry) {
if (bp->attr.type == PERF_TYPE_BREAKPOINT)
count++;
}
spin_unlock_irqrestore(&ctx->lock, flags);
return count;
}
/* /*
* Report the number of pinned/un-pinned breakpoints we have in * Report the number of pinned/un-pinned breakpoints we have in
* a given cpu (cpu > -1) or in all of them (cpu = -1). * a given cpu (cpu > -1) or in all of them (cpu = -1).
*/ */
static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu) static void
fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp)
{ {
int cpu = bp->cpu;
struct task_struct *tsk = bp->ctx->task;
if (cpu >= 0) { if (cpu >= 0) {
slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu); slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu);
slots->pinned += max_task_bp_pinned(cpu); if (!tsk)
slots->pinned += max_task_bp_pinned(cpu);
else
slots->pinned += task_bp_pinned(tsk);
slots->flexible = per_cpu(nr_bp_flexible, cpu); slots->flexible = per_cpu(nr_bp_flexible, cpu);
return; return;
...@@ -101,7 +137,10 @@ static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu) ...@@ -101,7 +137,10 @@ static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu)
unsigned int nr; unsigned int nr;
nr = per_cpu(nr_cpu_bp_pinned, cpu); nr = per_cpu(nr_cpu_bp_pinned, cpu);
nr += max_task_bp_pinned(cpu); if (!tsk)
nr += max_task_bp_pinned(cpu);
else
nr += task_bp_pinned(tsk);
if (nr > slots->pinned) if (nr > slots->pinned)
slots->pinned = nr; slots->pinned = nr;
...@@ -118,33 +157,10 @@ static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu) ...@@ -118,33 +157,10 @@ static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu)
*/ */
static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable) static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable)
{ {
int count = 0;
struct perf_event *bp;
struct perf_event_context *ctx = tsk->perf_event_ctxp;
unsigned int *tsk_pinned; unsigned int *tsk_pinned;
struct list_head *list; int count = 0;
unsigned long flags;
if (WARN_ONCE(!ctx, "No perf context for this task"))
return;
list = &ctx->event_list;
spin_lock_irqsave(&ctx->lock, flags);
/*
* The current breakpoint counter is not included in the list
* at the open() callback time
*/
list_for_each_entry(bp, list, event_entry) {
if (bp->attr.type == PERF_TYPE_BREAKPOINT)
count++;
}
spin_unlock_irqrestore(&ctx->lock, flags);
if (WARN_ONCE(count < 0, "No breakpoint counter found in the counter list")) count = task_bp_pinned(tsk);
return;
tsk_pinned = per_cpu(task_bp_pinned, cpu); tsk_pinned = per_cpu(task_bp_pinned, cpu);
if (enable) { if (enable) {
...@@ -233,7 +249,7 @@ int reserve_bp_slot(struct perf_event *bp) ...@@ -233,7 +249,7 @@ int reserve_bp_slot(struct perf_event *bp)
mutex_lock(&nr_bp_mutex); mutex_lock(&nr_bp_mutex);
fetch_bp_busy_slots(&slots, bp->cpu); fetch_bp_busy_slots(&slots, bp);
/* Flexible counters need to keep at least one slot */ /* Flexible counters need to keep at least one slot */
if (slots.pinned + (!!slots.flexible) == HBP_NUM) { if (slots.pinned + (!!slots.flexible) == HBP_NUM) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment