Commit 527aa75b authored by Steven Rostedt (Red Hat)'s avatar Steven Rostedt (Red Hat) Committed by Steven Rostedt

ftrace/x86: Simplify save_mcount_regs on getting RIP

Currently save_mcount_regs is passed a "skip" parameter to know how much
stack updated the pt_regs, as it tries to keep the saved pt_regs in the
same location for all users. This is rather stupid, especially since the
part stored on the pt_regs has nothing to do with what is suppose to be
in that location.

Instead of doing that, just pass in an "added" parameter that lets that
macro know how much stack was added before it was called so that it
can get to the RIP.  But the difference is that it will now offset the
pt_regs by that "added" count. The caller now needs to take care of
the offset of the pt_regs.

This will make it easier to simplify the code later.

Link: http://lkml.kernel.org/r/alpine.DEB.2.11.1411262304010.3961@nanosReviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent 094dfc54
...@@ -37,12 +37,12 @@ ...@@ -37,12 +37,12 @@
* be saved in the locations that pt_regs has them in. * be saved in the locations that pt_regs has them in.
*/ */
/* skip is set if the stack was already partially adjusted */ /* @added: the amount of stack added before calling this */
.macro save_mcount_regs skip=0 .macro save_mcount_regs added=0
/* /*
* We add enough stack to save all regs. * We add enough stack to save all regs.
*/ */
subq $(SS+8-\skip), %rsp subq $(SS+8), %rsp
movq %rax, RAX(%rsp) movq %rax, RAX(%rsp)
movq %rcx, RCX(%rsp) movq %rcx, RCX(%rsp)
movq %rdx, RDX(%rsp) movq %rdx, RDX(%rsp)
...@@ -51,11 +51,11 @@ ...@@ -51,11 +51,11 @@
movq %r8, R8(%rsp) movq %r8, R8(%rsp)
movq %r9, R9(%rsp) movq %r9, R9(%rsp)
/* Move RIP to its proper location */ /* Move RIP to its proper location */
movq SS+8(%rsp), %rdi movq SS+8+\added(%rsp), %rdi
movq %rdi, RIP(%rsp) movq %rdi, RIP(%rsp)
.endm .endm
.macro restore_mcount_regs skip=0 .macro restore_mcount_regs
movq R9(%rsp), %r9 movq R9(%rsp), %r9
movq R8(%rsp), %r8 movq R8(%rsp), %r8
movq RDI(%rsp), %rdi movq RDI(%rsp), %rdi
...@@ -63,12 +63,12 @@ ...@@ -63,12 +63,12 @@
movq RDX(%rsp), %rdx movq RDX(%rsp), %rdx
movq RCX(%rsp), %rcx movq RCX(%rsp), %rcx
movq RAX(%rsp), %rax movq RAX(%rsp), %rax
addq $(SS+8-\skip), %rsp addq $(SS+8), %rsp
.endm .endm
/* skip is set if stack has been adjusted */ /* skip is set if stack has been adjusted */
.macro ftrace_caller_setup trace_label skip=0 .macro ftrace_caller_setup trace_label added=0
save_mcount_regs \skip save_mcount_regs \added
/* Save this location */ /* Save this location */
GLOBAL(\trace_label) GLOBAL(\trace_label)
...@@ -79,9 +79,9 @@ GLOBAL(\trace_label) ...@@ -79,9 +79,9 @@ GLOBAL(\trace_label)
subq $MCOUNT_INSN_SIZE, %rdi subq $MCOUNT_INSN_SIZE, %rdi
/* Load the parent_ip into the second parameter */ /* Load the parent_ip into the second parameter */
#ifdef CC_USING_FENTRY #ifdef CC_USING_FENTRY
movq SS+16(%rsp), %rsi movq SS+16+\added(%rsp), %rsi
#else #else
movq 8(%rbp), %rsi movq 8+\added(%rbp), %rsi
#endif #endif
.endm .endm
...@@ -156,10 +156,10 @@ GLOBAL(ftrace_stub) ...@@ -156,10 +156,10 @@ GLOBAL(ftrace_stub)
END(ftrace_caller) END(ftrace_caller)
ENTRY(ftrace_regs_caller) ENTRY(ftrace_regs_caller)
/* Save the current flags before compare (in SS location)*/ /* Save the current flags before any operations that can change them */
pushfq pushfq
/* skip=8 to skip flags saved in SS */ /* added 8 bytes to save flags */
ftrace_caller_setup ftrace_regs_caller_op_ptr 8 ftrace_caller_setup ftrace_regs_caller_op_ptr 8
/* Save the rest of pt_regs */ /* Save the rest of pt_regs */
...@@ -172,15 +172,15 @@ ENTRY(ftrace_regs_caller) ...@@ -172,15 +172,15 @@ ENTRY(ftrace_regs_caller)
movq %rbp, RBP(%rsp) movq %rbp, RBP(%rsp)
movq %rbx, RBX(%rsp) movq %rbx, RBX(%rsp)
/* Copy saved flags */ /* Copy saved flags */
movq SS(%rsp), %rcx movq SS+8(%rsp), %rcx
movq %rcx, EFLAGS(%rsp) movq %rcx, EFLAGS(%rsp)
/* Kernel segments */ /* Kernel segments */
movq $__KERNEL_DS, %rcx movq $__KERNEL_DS, %rcx
movq %rcx, SS(%rsp) movq %rcx, SS(%rsp)
movq $__KERNEL_CS, %rcx movq $__KERNEL_CS, %rcx
movq %rcx, CS(%rsp) movq %rcx, CS(%rsp)
/* Stack - skipping return address */ /* Stack - skipping return address and flags */
leaq SS+16(%rsp), %rcx leaq SS+8*3(%rsp), %rcx
movq %rcx, RSP(%rsp) movq %rcx, RSP(%rsp)
/* regs go into 4th parameter */ /* regs go into 4th parameter */
...@@ -195,11 +195,11 @@ GLOBAL(ftrace_regs_call) ...@@ -195,11 +195,11 @@ GLOBAL(ftrace_regs_call)
/* Copy flags back to SS, to restore them */ /* Copy flags back to SS, to restore them */
movq EFLAGS(%rsp), %rax movq EFLAGS(%rsp), %rax
movq %rax, SS(%rsp) movq %rax, SS+8(%rsp)
/* Handlers can change the RIP */ /* Handlers can change the RIP */
movq RIP(%rsp), %rax movq RIP(%rsp), %rax
movq %rax, SS+8(%rsp) movq %rax, SS+8*2(%rsp)
/* restore the rest of pt_regs */ /* restore the rest of pt_regs */
movq R15(%rsp), %r15 movq R15(%rsp), %r15
...@@ -210,8 +210,7 @@ GLOBAL(ftrace_regs_call) ...@@ -210,8 +210,7 @@ GLOBAL(ftrace_regs_call)
movq RBP(%rsp), %rbp movq RBP(%rsp), %rbp
movq RBX(%rsp), %rbx movq RBX(%rsp), %rbx
/* skip=8 to skip flags saved in SS */ restore_mcount_regs
restore_mcount_regs 8
/* Restore flags */ /* Restore flags */
popfq popfq
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment