Commit 1871853f authored by Frederic Weisbecker's avatar Frederic Weisbecker

x86,64: Simplify save_regs()

The save_regs function that saves the regs on low level
irq entry is complicated because of the fact it changes
its stack in the middle and also because it manipulates
data allocated in the caller frame and accesses there
are directly calculated from callee rsp value with the
return address in the middle of the way.

This complicates the static stack offsets calculation and
require more dynamic ones. It also needs a save/restore
of the function's return address.

To simplify and optimize this, turn save_regs() into a
macro.
Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jan Beulich <JBeulich@novell.com>
parent 47ce11a2
...@@ -297,27 +297,22 @@ ENDPROC(native_usergs_sysret64) ...@@ -297,27 +297,22 @@ ENDPROC(native_usergs_sysret64)
.endm .endm
/* save partial stack frame */ /* save partial stack frame */
.pushsection .kprobes.text, "ax" .macro SAVE_ARGS_IRQ
ENTRY(save_args)
XCPT_FRAME
cld cld
/* /* start from rbp in pt_regs and jump over */
* start from rbp in pt_regs and jump over movq_cfi rdi, RDI-RBP
* return address. movq_cfi rsi, RSI-RBP
*/ movq_cfi rdx, RDX-RBP
movq_cfi rdi, RDI+8-RBP movq_cfi rcx, RCX-RBP
movq_cfi rsi, RSI+8-RBP movq_cfi rax, RAX-RBP
movq_cfi rdx, RDX+8-RBP movq_cfi r8, R8-RBP
movq_cfi rcx, RCX+8-RBP movq_cfi r9, R9-RBP
movq_cfi rax, RAX+8-RBP movq_cfi r10, R10-RBP
movq_cfi r8, R8+8-RBP movq_cfi r11, R11-RBP
movq_cfi r9, R9+8-RBP
movq_cfi r10, R10+8-RBP leaq -RBP(%rsp),%rdi /* arg1 for handler */
movq_cfi r11, R11+8-RBP movq_cfi rbp, 0 /* push %rbp */
movq %rsp, %rbp
leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
movq_cfi rbp, 8 /* push %rbp */
leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
testl $3, CS(%rdi) testl $3, CS(%rdi)
je 1f je 1f
SWAPGS SWAPGS
...@@ -329,19 +324,14 @@ ENTRY(save_args) ...@@ -329,19 +324,14 @@ ENTRY(save_args)
*/ */
1: incl PER_CPU_VAR(irq_count) 1: incl PER_CPU_VAR(irq_count)
jne 2f jne 2f
popq_cfi %rax /* move return address... */
mov PER_CPU_VAR(irq_stack_ptr),%rsp mov PER_CPU_VAR(irq_stack_ptr),%rsp
EMPTY_FRAME 0 EMPTY_FRAME 0
pushq_cfi %rbp /* backlink for unwinder */ pushq_cfi %rbp /* backlink for unwinder */
pushq_cfi %rax /* ... to the new stack */
/* /*
* We entered an interrupt context - irqs are off: * We entered an interrupt context - irqs are off:
*/ */
2: TRACE_IRQS_OFF 2: TRACE_IRQS_OFF
ret .endm
CFI_ENDPROC
END(save_args)
.popsection
ENTRY(save_rest) ENTRY(save_rest)
PARTIAL_FRAME 1 REST_SKIP+8 PARTIAL_FRAME 1 REST_SKIP+8
...@@ -791,7 +781,7 @@ END(interrupt) ...@@ -791,7 +781,7 @@ END(interrupt)
/* reserve pt_regs for scratch regs and rbp */ /* reserve pt_regs for scratch regs and rbp */
subq $ORIG_RAX-RBP, %rsp subq $ORIG_RAX-RBP, %rsp
CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
call save_args SAVE_ARGS_IRQ
PARTIAL_FRAME 0 PARTIAL_FRAME 0
call \func call \func
.endm .endm
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment