Commit 9a556ab9 authored by Masami Hiramatsu's avatar Masami Hiramatsu Committed by Ingo Molnar

kprobes/x86: Check Interrupt Flag modifier when registering probe

Currently kprobes check whether the copied instruction modifies
IF (interrupt flag) on each probe hit. This results not only in
introducing overhead but also involving
inat_get_opcode_attribute into the kprobes hot path, and it can
cause an infinite recursive call (and kernel panic in the end).

Actually, since the copied instruction itself can never be modified
on the buffer, it is needless to analyze the instruction on every
probe hit.

To fix this issue, we check it only once when registering probe
and store the result on ainsn->if_modifier.
Reported-by: default avatarTimo Juhani Lindfors <timo.lindfors@iki.fi>
Signed-off-by: default avatarMasami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Acked-by: default avatarAnanth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: yrl.pp-manager.tt@hitachi.com
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/20130314115242.19690.33573.stgit@mhiramat-M0-7522Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 65c10553
...@@ -77,6 +77,7 @@ struct arch_specific_insn { ...@@ -77,6 +77,7 @@ struct arch_specific_insn {
* a post_handler or break_handler). * a post_handler or break_handler).
*/ */
int boostable; int boostable;
bool if_modifier;
}; };
struct arch_optimized_insn { struct arch_optimized_insn {
......
...@@ -375,6 +375,9 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p) ...@@ -375,6 +375,9 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
else else
p->ainsn.boostable = -1; p->ainsn.boostable = -1;
/* Check whether the instruction modifies Interrupt Flag or not */
p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn);
/* Also, displacement change doesn't affect the first byte */ /* Also, displacement change doesn't affect the first byte */
p->opcode = p->ainsn.insn[0]; p->opcode = p->ainsn.insn[0];
} }
...@@ -434,7 +437,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, ...@@ -434,7 +437,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
__this_cpu_write(current_kprobe, p); __this_cpu_write(current_kprobe, p);
kcb->kprobe_saved_flags = kcb->kprobe_old_flags kcb->kprobe_saved_flags = kcb->kprobe_old_flags
= (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
if (is_IF_modifier(p->ainsn.insn)) if (p->ainsn.if_modifier)
kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF; kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment