Commit db087ef6 authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Ingo Molnar

uprobes/x86: Make arch_uretprobe_is_alive(RP_CHECK_CALL) more clever

The previous change documents that cleanup_return_instances()
can't always detect the dead frames, the stack can grow. But
there is one special case which imho worth fixing:
arch_uretprobe_is_alive() can return true when the stack didn't
actually grow, but the next "call" insn uses the already
invalidated frame.

Test-case:

	#include <stdio.h>
	#include <setjmp.h>

	jmp_buf jmp;
	int nr = 1024;

	void func_2(void)
	{
		if (--nr == 0)
			return;
		longjmp(jmp, 1);
	}

	void func_1(void)
	{
		setjmp(jmp);
		func_2();
	}

	int main(void)
	{
		func_1();
		return 0;
	}

If you ret-probe func_1() and func_2() prepare_uretprobe() hits
the MAX_URETPROBE_DEPTH limit and "return" from func_2() is not
reported.

When we know that the new call is not chained, we can do the
more strict check. In this case "sp" points to the new ret-addr,
so every frame which uses the same "sp" must be dead. The only
complication is that arch_uretprobe_is_alive() needs to know was
it chained or not, so we add the new RP_CHECK_CHAIN_CALL enum
and change prepare_uretprobe() to pass RP_CHECK_CALL only if
!chained.

Note: arch_uretprobe_is_alive() could also re-read *sp and check
if this word is still trampoline_vaddr. This could obviously
improve the logic, but I would like to avoid another
copy_from_user() especially in the case when we can't avoid the
false "alive == T" positives.
Tested-by: default avatarPratyush Anand <panand@redhat.com>
Signed-off-by: default avatarOleg Nesterov <oleg@redhat.com>
Acked-by: default avatarSrikar Dronamraju <srikar@linux.vnet.ibm.com>
Acked-by: default avatarAnton Arapov <arapov@gmail.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20150721134028.GA4786@redhat.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 86dcb702
...@@ -989,5 +989,8 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs ...@@ -989,5 +989,8 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
struct pt_regs *regs) struct pt_regs *regs)
{ {
return regs->sp <= ret->stack; if (ctx == RP_CHECK_CALL) /* sp was just decremented by "call" insn */
return regs->sp < ret->stack;
else
return regs->sp <= ret->stack;
} }
...@@ -104,6 +104,7 @@ struct return_instance { ...@@ -104,6 +104,7 @@ struct return_instance {
enum rp_check { enum rp_check {
RP_CHECK_CALL, RP_CHECK_CALL,
RP_CHECK_CHAIN_CALL,
RP_CHECK_RET, RP_CHECK_RET,
}; };
......
...@@ -1511,10 +1511,11 @@ static unsigned long get_trampoline_vaddr(void) ...@@ -1511,10 +1511,11 @@ static unsigned long get_trampoline_vaddr(void)
return trampoline_vaddr; return trampoline_vaddr;
} }
static void cleanup_return_instances(struct uprobe_task *utask, struct pt_regs *regs) static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
struct pt_regs *regs)
{ {
struct return_instance *ri = utask->return_instances; struct return_instance *ri = utask->return_instances;
enum rp_check ctx = RP_CHECK_CALL; enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL;
while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) { while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) {
ri = free_ret_instance(ri); ri = free_ret_instance(ri);
...@@ -1528,7 +1529,7 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) ...@@ -1528,7 +1529,7 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
struct return_instance *ri; struct return_instance *ri;
struct uprobe_task *utask; struct uprobe_task *utask;
unsigned long orig_ret_vaddr, trampoline_vaddr; unsigned long orig_ret_vaddr, trampoline_vaddr;
bool chained = false; bool chained;
if (!get_xol_area()) if (!get_xol_area())
return; return;
...@@ -1554,14 +1555,15 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) ...@@ -1554,14 +1555,15 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
goto fail; goto fail;
/* drop the entries invalidated by longjmp() */ /* drop the entries invalidated by longjmp() */
cleanup_return_instances(utask, regs); chained = (orig_ret_vaddr == trampoline_vaddr);
cleanup_return_instances(utask, chained, regs);
/* /*
* We don't want to keep trampoline address in stack, rather keep the * We don't want to keep trampoline address in stack, rather keep the
* original return address of first caller thru all the consequent * original return address of first caller thru all the consequent
* instances. This also makes breakpoint unwrapping easier. * instances. This also makes breakpoint unwrapping easier.
*/ */
if (orig_ret_vaddr == trampoline_vaddr) { if (chained) {
if (!utask->return_instances) { if (!utask->return_instances) {
/* /*
* This situation is not possible. Likely we have an * This situation is not possible. Likely we have an
...@@ -1570,8 +1572,6 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) ...@@ -1570,8 +1572,6 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
uprobe_warn(current, "handle tail call"); uprobe_warn(current, "handle tail call");
goto fail; goto fail;
} }
chained = true;
orig_ret_vaddr = utask->return_instances->orig_ret_vaddr; orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment