Commit a268fcfa authored by Borislav Petkov's avatar Borislav Petkov Committed by H. Peter Anvin

x86, asm: Thin down SAVE/RESTORE_* asm macros

Use dwarf2 cfi annotation macros, making SAVE/RESTORE_* marginally more
readable.

No functionality change.
Signed-off-by: default avatarBorislav Petkov <bp@alien8.de>
Link: http://lkml.kernel.org/r/1306873314-32523-2-git-send-email-bp@alien8.deSigned-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent 55922c9d
...@@ -46,6 +46,7 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -46,6 +46,7 @@ For 32-bit we have the following conventions - kernel is built with
*/ */
#include "dwarf2.h"
/* /*
* 64-bit system call stack frame layout defines and helpers, for * 64-bit system call stack frame layout defines and helpers, for
...@@ -87,30 +88,25 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -87,30 +88,25 @@ For 32-bit we have the following conventions - kernel is built with
.macro SAVE_ARGS addskip=0, norcx=0, nor891011=0 .macro SAVE_ARGS addskip=0, norcx=0, nor891011=0
subq $9*8+\addskip, %rsp subq $9*8+\addskip, %rsp
CFI_ADJUST_CFA_OFFSET 9*8+\addskip CFI_ADJUST_CFA_OFFSET 9*8+\addskip
movq %rdi, 8*8(%rsp) movq_cfi rdi, 8*8
CFI_REL_OFFSET rdi, 8*8 movq_cfi rsi, 7*8
movq %rsi, 7*8(%rsp) movq_cfi rdx, 6*8
CFI_REL_OFFSET rsi, 7*8
movq %rdx, 6*8(%rsp)
CFI_REL_OFFSET rdx, 6*8
.if \norcx .if \norcx
.else .else
movq %rcx, 5*8(%rsp) movq_cfi rcx, 5*8
CFI_REL_OFFSET rcx, 5*8
.endif .endif
movq %rax, 4*8(%rsp)
CFI_REL_OFFSET rax, 4*8 movq_cfi rax, 4*8
.if \nor891011 .if \nor891011
.else .else
movq %r8, 3*8(%rsp) movq_cfi r8, 3*8
CFI_REL_OFFSET r8, 3*8 movq_cfi r9, 2*8
movq %r9, 2*8(%rsp) movq_cfi r10, 1*8
CFI_REL_OFFSET r9, 2*8 movq_cfi r11, 0*8
movq %r10, 1*8(%rsp)
CFI_REL_OFFSET r10, 1*8
movq %r11, (%rsp)
CFI_REL_OFFSET r11, 0*8
.endif .endif
.endm .endm
#define ARG_SKIP (9*8) #define ARG_SKIP (9*8)
...@@ -119,37 +115,34 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -119,37 +115,34 @@ For 32-bit we have the following conventions - kernel is built with
skipr8910=0, skiprdx=0 skipr8910=0, skiprdx=0
.if \skipr11 .if \skipr11
.else .else
movq (%rsp), %r11 movq_cfi_restore 0*8, r11
CFI_RESTORE r11
.endif .endif
.if \skipr8910 .if \skipr8910
.else .else
movq 1*8(%rsp), %r10 movq_cfi_restore 1*8, r10
CFI_RESTORE r10 movq_cfi_restore 2*8, r9
movq 2*8(%rsp), %r9 movq_cfi_restore 3*8, r8
CFI_RESTORE r9
movq 3*8(%rsp), %r8
CFI_RESTORE r8
.endif .endif
.if \skiprax .if \skiprax
.else .else
movq 4*8(%rsp), %rax movq_cfi_restore 4*8, rax
CFI_RESTORE rax
.endif .endif
.if \skiprcx .if \skiprcx
.else .else
movq 5*8(%rsp), %rcx movq_cfi_restore 5*8, rcx
CFI_RESTORE rcx
.endif .endif
.if \skiprdx .if \skiprdx
.else .else
movq 6*8(%rsp), %rdx movq_cfi_restore 6*8, rdx
CFI_RESTORE rdx
.endif .endif
movq 7*8(%rsp), %rsi
CFI_RESTORE rsi movq_cfi_restore 7*8, rsi
movq 8*8(%rsp), %rdi movq_cfi_restore 8*8, rdi
CFI_RESTORE rdi
.if ARG_SKIP+\addskip > 0 .if ARG_SKIP+\addskip > 0
addq $ARG_SKIP+\addskip, %rsp addq $ARG_SKIP+\addskip, %rsp
CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip) CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
...@@ -176,33 +169,21 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -176,33 +169,21 @@ For 32-bit we have the following conventions - kernel is built with
.macro SAVE_REST .macro SAVE_REST
subq $REST_SKIP, %rsp subq $REST_SKIP, %rsp
CFI_ADJUST_CFA_OFFSET REST_SKIP CFI_ADJUST_CFA_OFFSET REST_SKIP
movq %rbx, 5*8(%rsp) movq_cfi rbx, 5*8
CFI_REL_OFFSET rbx, 5*8 movq_cfi rbp, 4*8
movq %rbp, 4*8(%rsp) movq_cfi r12, 3*8
CFI_REL_OFFSET rbp, 4*8 movq_cfi r13, 2*8
movq %r12, 3*8(%rsp) movq_cfi r14, 1*8
CFI_REL_OFFSET r12, 3*8 movq_cfi r15, 0*8
movq %r13, 2*8(%rsp)
CFI_REL_OFFSET r13, 2*8
movq %r14, 1*8(%rsp)
CFI_REL_OFFSET r14, 1*8
movq %r15, (%rsp)
CFI_REL_OFFSET r15, 0*8
.endm .endm
.macro RESTORE_REST .macro RESTORE_REST
movq (%rsp), %r15 movq_cfi_restore 0*8, r15
CFI_RESTORE r15 movq_cfi_restore 1*8, r14
movq 1*8(%rsp), %r14 movq_cfi_restore 2*8, r13
CFI_RESTORE r14 movq_cfi_restore 3*8, r12
movq 2*8(%rsp), %r13 movq_cfi_restore 4*8, rbp
CFI_RESTORE r13 movq_cfi_restore 5*8, rbx
movq 3*8(%rsp), %r12
CFI_RESTORE r12
movq 4*8(%rsp), %rbp
CFI_RESTORE rbp
movq 5*8(%rsp), %rbx
CFI_RESTORE rbx
addq $REST_SKIP, %rsp addq $REST_SKIP, %rsp
CFI_ADJUST_CFA_OFFSET -(REST_SKIP) CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
.endm .endm
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment