Commit 8a2d71a3 authored by Naveen N. Rao's avatar Naveen N. Rao Committed by Michael Ellerman

powerpc/kprobes: Disable preemption before invoking probe handler for optprobes

Per Documentation/kprobes.txt, probe handlers need to be invoked with
preemption disabled. Update optimized_callback() to do so. Also move
get_kprobe_ctlblk() invocation post preemption disable, since it
accesses pre-cpu data.

This was not an issue so far since optprobes wasn't selected if
CONFIG_PREEMPT was enabled. Commit a30b85df ("kprobes: Use
synchronize_rcu_tasks() for optprobe with CONFIG_PREEMPT=y") changes
this.
Signed-off-by: default avatarNaveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Acked-by: default avatarMasami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent fc2a5a61
...@@ -115,7 +115,6 @@ static unsigned long can_optimize(struct kprobe *p) ...@@ -115,7 +115,6 @@ static unsigned long can_optimize(struct kprobe *p)
static void optimized_callback(struct optimized_kprobe *op, static void optimized_callback(struct optimized_kprobe *op,
struct pt_regs *regs) struct pt_regs *regs)
{ {
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
unsigned long flags; unsigned long flags;
/* This is possible if op is under delayed unoptimizing */ /* This is possible if op is under delayed unoptimizing */
...@@ -124,13 +123,14 @@ static void optimized_callback(struct optimized_kprobe *op, ...@@ -124,13 +123,14 @@ static void optimized_callback(struct optimized_kprobe *op,
local_irq_save(flags); local_irq_save(flags);
hard_irq_disable(); hard_irq_disable();
preempt_disable();
if (kprobe_running()) { if (kprobe_running()) {
kprobes_inc_nmissed_count(&op->kp); kprobes_inc_nmissed_count(&op->kp);
} else { } else {
__this_cpu_write(current_kprobe, &op->kp); __this_cpu_write(current_kprobe, &op->kp);
regs->nip = (unsigned long)op->kp.addr; regs->nip = (unsigned long)op->kp.addr;
kcb->kprobe_status = KPROBE_HIT_ACTIVE; get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
opt_pre_handler(&op->kp, regs); opt_pre_handler(&op->kp, regs);
__this_cpu_write(current_kprobe, NULL); __this_cpu_write(current_kprobe, NULL);
} }
...@@ -140,6 +140,7 @@ static void optimized_callback(struct optimized_kprobe *op, ...@@ -140,6 +140,7 @@ static void optimized_callback(struct optimized_kprobe *op,
* local_irq_restore() will re-enable interrupts, * local_irq_restore() will re-enable interrupts,
* if they were hard disabled. * if they were hard disabled.
*/ */
preempt_enable_no_resched();
local_irq_restore(flags); local_irq_restore(flags);
} }
NOKPROBE_SYMBOL(optimized_callback); NOKPROBE_SYMBOL(optimized_callback);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment