Commit 9dda1658 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'x86/asm' into x86/core, to prepare for new patch

Collect all changes to arch/x86/entry/entry_64.S, before applying
patch that changes most of the file.
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents b72e7464 a49976d1
...@@ -18,10 +18,10 @@ Some of these entries are: ...@@ -18,10 +18,10 @@ Some of these entries are:
- system_call: syscall instruction from 64-bit code. - system_call: syscall instruction from 64-bit code.
- ia32_syscall: int 0x80 from 32-bit or 64-bit code; compat syscall - entry_INT80_compat: int 0x80 from 32-bit or 64-bit code; compat syscall
either way. either way.
- ia32_syscall, ia32_sysenter: syscall and sysenter from 32-bit - entry_INT80_compat, ia32_sysenter: syscall and sysenter from 32-bit
code code
- interrupt: An array of entries. Every IDT vector that doesn't - interrupt: An array of entries. Every IDT vector that doesn't
......
...@@ -10893,7 +10893,7 @@ M: Andy Lutomirski <luto@amacapital.net> ...@@ -10893,7 +10893,7 @@ M: Andy Lutomirski <luto@amacapital.net>
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/vdso T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/vdso
S: Maintained S: Maintained
F: arch/x86/vdso/ F: arch/x86/entry/vdso/
XC2028/3028 TUNER DRIVER XC2028/3028 TUNER DRIVER
M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
......
obj-y += entry/
obj-$(CONFIG_KVM) += kvm/ obj-$(CONFIG_KVM) += kvm/
# Xen paravirtualization support # Xen paravirtualization support
...@@ -11,7 +14,7 @@ obj-y += kernel/ ...@@ -11,7 +14,7 @@ obj-y += kernel/
obj-y += mm/ obj-y += mm/
obj-y += crypto/ obj-y += crypto/
obj-y += vdso/
obj-$(CONFIG_IA32_EMULATION) += ia32/ obj-$(CONFIG_IA32_EMULATION) += ia32/
obj-y += platform/ obj-y += platform/
......
...@@ -149,12 +149,6 @@ endif ...@@ -149,12 +149,6 @@ endif
sp-$(CONFIG_X86_32) := esp sp-$(CONFIG_X86_32) := esp
sp-$(CONFIG_X86_64) := rsp sp-$(CONFIG_X86_64) := rsp
# do binutils support CFI?
cfi := $(call as-instr,.cfi_startproc\n.cfi_rel_offset $(sp-y)$(comma)0\n.cfi_endproc,-DCONFIG_AS_CFI=1)
# is .cfi_signal_frame supported too?
cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1)
cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1)
# does binutils support specific instructions? # does binutils support specific instructions?
asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1) asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1)
asinstr += $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1) asinstr += $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1)
...@@ -162,8 +156,8 @@ asinstr += $(call as-instr,crc32l %eax$(comma)%eax,-DCONFIG_AS_CRC32=1) ...@@ -162,8 +156,8 @@ asinstr += $(call as-instr,crc32l %eax$(comma)%eax,-DCONFIG_AS_CRC32=1)
avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1) avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1)
avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1) avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)
KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) KBUILD_AFLAGS += $(asinstr) $(avx_instr) $(avx2_instr)
KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) KBUILD_CFLAGS += $(asinstr) $(avx_instr) $(avx2_instr)
LDFLAGS := -m elf_$(UTS_MACHINE) LDFLAGS := -m elf_$(UTS_MACHINE)
...@@ -187,7 +181,7 @@ archscripts: scripts_basic ...@@ -187,7 +181,7 @@ archscripts: scripts_basic
# Syscall table generation # Syscall table generation
archheaders: archheaders:
$(Q)$(MAKE) $(build)=arch/x86/syscalls all $(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all
archprepare: archprepare:
ifeq ($(CONFIG_KEXEC_FILE),y) ifeq ($(CONFIG_KEXEC_FILE),y)
...@@ -250,7 +244,7 @@ install: ...@@ -250,7 +244,7 @@ install:
PHONY += vdso_install PHONY += vdso_install
vdso_install: vdso_install:
$(Q)$(MAKE) $(build)=arch/x86/vdso $@ $(Q)$(MAKE) $(build)=arch/x86/entry/vdso $@
archclean: archclean:
$(Q)rm -rf $(objtree)/arch/i386 $(Q)rm -rf $(objtree)/arch/i386
......
#
# Makefile for the x86 low level entry code
#
obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
obj-y += vdso/
obj-y += vsyscall/
obj-$(CONFIG_IA32_EMULATION) += entry_64_compat.o syscall_32.o
...@@ -46,8 +46,6 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -46,8 +46,6 @@ For 32-bit we have the following conventions - kernel is built with
*/ */
#include <asm/dwarf2.h>
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
/* /*
...@@ -91,28 +89,27 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -91,28 +89,27 @@ For 32-bit we have the following conventions - kernel is built with
#define SIZEOF_PTREGS 21*8 #define SIZEOF_PTREGS 21*8
.macro ALLOC_PT_GPREGS_ON_STACK addskip=0 .macro ALLOC_PT_GPREGS_ON_STACK addskip=0
subq $15*8+\addskip, %rsp addq $-(15*8+\addskip), %rsp
CFI_ADJUST_CFA_OFFSET 15*8+\addskip
.endm .endm
.macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1 .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
.if \r11 .if \r11
movq_cfi r11, 6*8+\offset movq %r11, 6*8+\offset(%rsp)
.endif .endif
.if \r8910 .if \r8910
movq_cfi r10, 7*8+\offset movq %r10, 7*8+\offset(%rsp)
movq_cfi r9, 8*8+\offset movq %r9, 8*8+\offset(%rsp)
movq_cfi r8, 9*8+\offset movq %r8, 9*8+\offset(%rsp)
.endif .endif
.if \rax .if \rax
movq_cfi rax, 10*8+\offset movq %rax, 10*8+\offset(%rsp)
.endif .endif
.if \rcx .if \rcx
movq_cfi rcx, 11*8+\offset movq %rcx, 11*8+\offset(%rsp)
.endif .endif
movq_cfi rdx, 12*8+\offset movq %rdx, 12*8+\offset(%rsp)
movq_cfi rsi, 13*8+\offset movq %rsi, 13*8+\offset(%rsp)
movq_cfi rdi, 14*8+\offset movq %rdi, 14*8+\offset(%rsp)
.endm .endm
.macro SAVE_C_REGS offset=0 .macro SAVE_C_REGS offset=0
SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1 SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
...@@ -131,24 +128,24 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -131,24 +128,24 @@ For 32-bit we have the following conventions - kernel is built with
.endm .endm
.macro SAVE_EXTRA_REGS offset=0 .macro SAVE_EXTRA_REGS offset=0
movq_cfi r15, 0*8+\offset movq %r15, 0*8+\offset(%rsp)
movq_cfi r14, 1*8+\offset movq %r14, 1*8+\offset(%rsp)
movq_cfi r13, 2*8+\offset movq %r13, 2*8+\offset(%rsp)
movq_cfi r12, 3*8+\offset movq %r12, 3*8+\offset(%rsp)
movq_cfi rbp, 4*8+\offset movq %rbp, 4*8+\offset(%rsp)
movq_cfi rbx, 5*8+\offset movq %rbx, 5*8+\offset(%rsp)
.endm .endm
.macro SAVE_EXTRA_REGS_RBP offset=0 .macro SAVE_EXTRA_REGS_RBP offset=0
movq_cfi rbp, 4*8+\offset movq %rbp, 4*8+\offset(%rsp)
.endm .endm
.macro RESTORE_EXTRA_REGS offset=0 .macro RESTORE_EXTRA_REGS offset=0
movq_cfi_restore 0*8+\offset, r15 movq 0*8+\offset(%rsp), %r15
movq_cfi_restore 1*8+\offset, r14 movq 1*8+\offset(%rsp), %r14
movq_cfi_restore 2*8+\offset, r13 movq 2*8+\offset(%rsp), %r13
movq_cfi_restore 3*8+\offset, r12 movq 3*8+\offset(%rsp), %r12
movq_cfi_restore 4*8+\offset, rbp movq 4*8+\offset(%rsp), %rbp
movq_cfi_restore 5*8+\offset, rbx movq 5*8+\offset(%rsp), %rbx
.endm .endm
.macro ZERO_EXTRA_REGS .macro ZERO_EXTRA_REGS
...@@ -162,24 +159,24 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -162,24 +159,24 @@ For 32-bit we have the following conventions - kernel is built with
.macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1 .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
.if \rstor_r11 .if \rstor_r11
movq_cfi_restore 6*8, r11 movq 6*8(%rsp), %r11
.endif .endif
.if \rstor_r8910 .if \rstor_r8910
movq_cfi_restore 7*8, r10 movq 7*8(%rsp), %r10
movq_cfi_restore 8*8, r9 movq 8*8(%rsp), %r9
movq_cfi_restore 9*8, r8 movq 9*8(%rsp), %r8
.endif .endif
.if \rstor_rax .if \rstor_rax
movq_cfi_restore 10*8, rax movq 10*8(%rsp), %rax
.endif .endif
.if \rstor_rcx .if \rstor_rcx
movq_cfi_restore 11*8, rcx movq 11*8(%rsp), %rcx
.endif .endif
.if \rstor_rdx .if \rstor_rdx
movq_cfi_restore 12*8, rdx movq 12*8(%rsp), %rdx
.endif .endif
movq_cfi_restore 13*8, rsi movq 13*8(%rsp), %rsi
movq_cfi_restore 14*8, rdi movq 14*8(%rsp), %rdi
.endm .endm
.macro RESTORE_C_REGS .macro RESTORE_C_REGS
RESTORE_C_REGS_HELPER 1,1,1,1,1 RESTORE_C_REGS_HELPER 1,1,1,1,1
...@@ -204,8 +201,7 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -204,8 +201,7 @@ For 32-bit we have the following conventions - kernel is built with
.endm .endm
.macro REMOVE_PT_GPREGS_FROM_STACK addskip=0 .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0
addq $15*8+\addskip, %rsp subq $-(15*8+\addskip), %rsp
CFI_ADJUST_CFA_OFFSET -(15*8+\addskip)
.endm .endm
.macro icebp .macro icebp
...@@ -224,23 +220,23 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -224,23 +220,23 @@ For 32-bit we have the following conventions - kernel is built with
*/ */
.macro SAVE_ALL .macro SAVE_ALL
pushl_cfi_reg eax pushl %eax
pushl_cfi_reg ebp pushl %ebp
pushl_cfi_reg edi pushl %edi
pushl_cfi_reg esi pushl %esi
pushl_cfi_reg edx pushl %edx
pushl_cfi_reg ecx pushl %ecx
pushl_cfi_reg ebx pushl %ebx
.endm .endm
.macro RESTORE_ALL .macro RESTORE_ALL
popl_cfi_reg ebx popl %ebx
popl_cfi_reg ecx popl %ecx
popl_cfi_reg edx popl %edx
popl_cfi_reg esi popl %esi
popl_cfi_reg edi popl %edi
popl_cfi_reg ebp popl %ebp
popl_cfi_reg eax popl %eax
.endm .endm
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#else #else
#define SYM(sym, compat) sym #define SYM(sym, compat) sym
#define ia32_sys_call_table sys_call_table #define ia32_sys_call_table sys_call_table
#define __NR_ia32_syscall_max __NR_syscall_max #define __NR_entry_INT80_compat_max __NR_syscall_max
#endif #endif
#define __SYSCALL_I386(nr, sym, compat) extern asmlinkage void SYM(sym, compat)(void) ; #define __SYSCALL_I386(nr, sym, compat) extern asmlinkage void SYM(sym, compat)(void) ;
...@@ -23,11 +23,11 @@ typedef asmlinkage void (*sys_call_ptr_t)(void); ...@@ -23,11 +23,11 @@ typedef asmlinkage void (*sys_call_ptr_t)(void);
extern asmlinkage void sys_ni_syscall(void); extern asmlinkage void sys_ni_syscall(void);
__visible const sys_call_ptr_t ia32_sys_call_table[__NR_ia32_syscall_max+1] = { __visible const sys_call_ptr_t ia32_sys_call_table[__NR_entry_INT80_compat_max+1] = {
/* /*
* Smells like a compiler bug -- it doesn't work * Smells like a compiler bug -- it doesn't work
* when the & below is removed. * when the & below is removed.
*/ */
[0 ... __NR_ia32_syscall_max] = &sys_ni_syscall, [0 ... __NR_entry_INT80_compat_max] = &sys_ni_syscall,
#include <asm/syscalls_32.h> #include <asm/syscalls_32.h>
}; };
out := $(obj)/../include/generated/asm out := $(obj)/../../include/generated/asm
uapi := $(obj)/../include/generated/uapi/asm uapi := $(obj)/../../include/generated/uapi/asm
# Create output directory if not already present # Create output directory if not already present
_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') \ _dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') \
......
...@@ -6,16 +6,14 @@ ...@@ -6,16 +6,14 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/dwarf2.h>
/* put return address in eax (arg1) */ /* put return address in eax (arg1) */
.macro THUNK name, func, put_ret_addr_in_eax=0 .macro THUNK name, func, put_ret_addr_in_eax=0
.globl \name .globl \name
\name: \name:
CFI_STARTPROC pushl %eax
pushl_cfi_reg eax pushl %ecx
pushl_cfi_reg ecx pushl %edx
pushl_cfi_reg edx
.if \put_ret_addr_in_eax .if \put_ret_addr_in_eax
/* Place EIP in the arg1 */ /* Place EIP in the arg1 */
...@@ -23,11 +21,10 @@ ...@@ -23,11 +21,10 @@
.endif .endif
call \func call \func
popl_cfi_reg edx popl %edx
popl_cfi_reg ecx popl %ecx
popl_cfi_reg eax popl %eax
ret ret
CFI_ENDPROC
_ASM_NOKPROBE(\name) _ASM_NOKPROBE(\name)
.endm .endm
......
...@@ -6,35 +6,32 @@ ...@@ -6,35 +6,32 @@
* Subject to the GNU public license, v.2. No warranty of any kind. * Subject to the GNU public license, v.2. No warranty of any kind.
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h> #include "calling.h"
#include <asm/calling.h>
#include <asm/asm.h> #include <asm/asm.h>
/* rdi: arg1 ... normal C conventions. rax is saved/restored. */ /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
.macro THUNK name, func, put_ret_addr_in_rdi=0 .macro THUNK name, func, put_ret_addr_in_rdi=0
.globl \name .globl \name
\name: \name:
CFI_STARTPROC
/* this one pushes 9 elems, the next one would be %rIP */ /* this one pushes 9 elems, the next one would be %rIP */
pushq_cfi_reg rdi pushq %rdi
pushq_cfi_reg rsi pushq %rsi
pushq_cfi_reg rdx pushq %rdx
pushq_cfi_reg rcx pushq %rcx
pushq_cfi_reg rax pushq %rax
pushq_cfi_reg r8 pushq %r8
pushq_cfi_reg r9 pushq %r9
pushq_cfi_reg r10 pushq %r10
pushq_cfi_reg r11 pushq %r11
.if \put_ret_addr_in_rdi .if \put_ret_addr_in_rdi
/* 9*8(%rsp) is return addr on stack */ /* 9*8(%rsp) is return addr on stack */
movq_cfi_restore 9*8, rdi movq 9*8(%rsp), %rdi
.endif .endif
call \func call \func
jmp restore jmp restore
CFI_ENDPROC
_ASM_NOKPROBE(\name) _ASM_NOKPROBE(\name)
.endm .endm
...@@ -57,19 +54,16 @@ ...@@ -57,19 +54,16 @@
#if defined(CONFIG_TRACE_IRQFLAGS) \ #if defined(CONFIG_TRACE_IRQFLAGS) \
|| defined(CONFIG_DEBUG_LOCK_ALLOC) \ || defined(CONFIG_DEBUG_LOCK_ALLOC) \
|| defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT)
CFI_STARTPROC
CFI_ADJUST_CFA_OFFSET 9*8
restore: restore:
popq_cfi_reg r11 popq %r11
popq_cfi_reg r10 popq %r10
popq_cfi_reg r9 popq %r9
popq_cfi_reg r8 popq %r8
popq_cfi_reg rax popq %rax
popq_cfi_reg rcx popq %rcx
popq_cfi_reg rdx popq %rdx
popq_cfi_reg rsi popq %rsi
popq_cfi_reg rdi popq %rdi
ret ret
CFI_ENDPROC
_ASM_NOKPROBE(restore) _ASM_NOKPROBE(restore)
#endif #endif
#
# Makefile for the x86 low level vsyscall code
#
obj-y := vsyscall_gtod.o
obj-$(CONFIG_X86_VSYSCALL_EMULATION) += vsyscall_64.o vsyscall_emu_64.o
...@@ -24,6 +24,6 @@ TRACE_EVENT(emulate_vsyscall, ...@@ -24,6 +24,6 @@ TRACE_EVENT(emulate_vsyscall,
#endif #endif
#undef TRACE_INCLUDE_PATH #undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH ../../arch/x86/kernel #define TRACE_INCLUDE_PATH ../../arch/x86/entry/vsyscall/
#define TRACE_INCLUDE_FILE vsyscall_trace #define TRACE_INCLUDE_FILE vsyscall_trace
#include <trace/define_trace.h> #include <trace/define_trace.h>
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
# Makefile for the ia32 kernel emulation subsystem. # Makefile for the ia32 kernel emulation subsystem.
# #
obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_signal.o obj-$(CONFIG_IA32_EMULATION) := sys_ia32.o ia32_signal.o
obj-$(CONFIG_IA32_AOUT) += ia32_aout.o obj-$(CONFIG_IA32_AOUT) += ia32_aout.o
......
#ifndef _ASM_X86_DWARF2_H
#define _ASM_X86_DWARF2_H
#ifndef __ASSEMBLY__
#warning "asm/dwarf2.h should be only included in pure assembly files"
#endif
/*
* Macros for dwarf2 CFI unwind table entries.
* See "as.info" for details on these pseudo ops. Unfortunately
* they are only supported in very new binutils, so define them
* away for older version.
*/
#ifdef CONFIG_AS_CFI
#define CFI_STARTPROC .cfi_startproc
#define CFI_ENDPROC .cfi_endproc
#define CFI_DEF_CFA .cfi_def_cfa
#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
#define CFI_OFFSET .cfi_offset
#define CFI_REL_OFFSET .cfi_rel_offset
#define CFI_REGISTER .cfi_register
#define CFI_RESTORE .cfi_restore
#define CFI_REMEMBER_STATE .cfi_remember_state
#define CFI_RESTORE_STATE .cfi_restore_state
#define CFI_UNDEFINED .cfi_undefined
#define CFI_ESCAPE .cfi_escape
#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
#define CFI_SIGNAL_FRAME .cfi_signal_frame
#else
#define CFI_SIGNAL_FRAME
#endif
#if defined(CONFIG_AS_CFI_SECTIONS) && defined(__ASSEMBLY__)
/*
* Emit CFI data in .debug_frame sections, not .eh_frame sections.
* The latter we currently just discard since we don't do DWARF
* unwinding at runtime. So only the offline DWARF information is
* useful to anyone. Note we should not use this directive if this
* file is used in the vDSO assembly, or if vmlinux.lds.S gets
* changed so it doesn't discard .eh_frame.
*/
.cfi_sections .debug_frame
#endif
#else
/*
* Due to the structure of pre-exisiting code, don't use assembler line
* comment character # to ignore the arguments. Instead, use a dummy macro.
*/
.macro cfi_ignore a=0, b=0, c=0, d=0
.endm
#define CFI_STARTPROC cfi_ignore
#define CFI_ENDPROC cfi_ignore
#define CFI_DEF_CFA cfi_ignore
#define CFI_DEF_CFA_REGISTER cfi_ignore
#define CFI_DEF_CFA_OFFSET cfi_ignore
#define CFI_ADJUST_CFA_OFFSET cfi_ignore
#define CFI_OFFSET cfi_ignore
#define CFI_REL_OFFSET cfi_ignore
#define CFI_REGISTER cfi_ignore
#define CFI_RESTORE cfi_ignore
#define CFI_REMEMBER_STATE cfi_ignore
#define CFI_RESTORE_STATE cfi_ignore
#define CFI_UNDEFINED cfi_ignore
#define CFI_ESCAPE cfi_ignore
#define CFI_SIGNAL_FRAME cfi_ignore
#endif
/*
* An attempt to make CFI annotations more or less
* correct and shorter. It is implied that you know
* what you're doing if you use them.
*/
#ifdef __ASSEMBLY__
#ifdef CONFIG_X86_64
.macro pushq_cfi reg
pushq \reg
CFI_ADJUST_CFA_OFFSET 8
.endm
.macro pushq_cfi_reg reg
pushq %\reg
CFI_ADJUST_CFA_OFFSET 8
CFI_REL_OFFSET \reg, 0
.endm
.macro popq_cfi reg
popq \reg
CFI_ADJUST_CFA_OFFSET -8
.endm
.macro popq_cfi_reg reg
popq %\reg
CFI_ADJUST_CFA_OFFSET -8
CFI_RESTORE \reg
.endm
.macro pushfq_cfi
pushfq
CFI_ADJUST_CFA_OFFSET 8
.endm
.macro popfq_cfi
popfq
CFI_ADJUST_CFA_OFFSET -8
.endm
.macro movq_cfi reg offset=0
movq %\reg, \offset(%rsp)
CFI_REL_OFFSET \reg, \offset
.endm
.macro movq_cfi_restore offset reg
movq \offset(%rsp), %\reg
CFI_RESTORE \reg
.endm
#else /*!CONFIG_X86_64*/
.macro pushl_cfi reg
pushl \reg
CFI_ADJUST_CFA_OFFSET 4
.endm
.macro pushl_cfi_reg reg
pushl %\reg
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET \reg, 0
.endm
.macro popl_cfi reg
popl \reg
CFI_ADJUST_CFA_OFFSET -4
.endm
.macro popl_cfi_reg reg
popl %\reg
CFI_ADJUST_CFA_OFFSET -4
CFI_RESTORE \reg
.endm
.macro pushfl_cfi
pushfl
CFI_ADJUST_CFA_OFFSET 4
.endm
.macro popfl_cfi
popfl
CFI_ADJUST_CFA_OFFSET -4
.endm
.macro movl_cfi reg offset=0
movl %\reg, \offset(%esp)
CFI_REL_OFFSET \reg, \offset
.endm
.macro movl_cfi_restore offset reg
movl \offset(%esp), %\reg
CFI_RESTORE \reg
.endm
#endif /*!CONFIG_X86_64*/
#endif /*__ASSEMBLY__*/
#endif /* _ASM_X86_DWARF2_H */
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/dwarf2.h>
/* The annotation hides the frame from the unwinder and makes it look /* The annotation hides the frame from the unwinder and makes it look
like a ordinary ebp save/restore. This avoids some special cases for like a ordinary ebp save/restore. This avoids some special cases for
frame pointer later */ frame pointer later */
#ifdef CONFIG_FRAME_POINTER #ifdef CONFIG_FRAME_POINTER
.macro FRAME .macro FRAME
__ASM_SIZE(push,_cfi) %__ASM_REG(bp) __ASM_SIZE(push,) %__ASM_REG(bp)
CFI_REL_OFFSET __ASM_REG(bp), 0
__ASM_SIZE(mov) %__ASM_REG(sp), %__ASM_REG(bp) __ASM_SIZE(mov) %__ASM_REG(sp), %__ASM_REG(bp)
.endm .endm
.macro ENDFRAME .macro ENDFRAME
__ASM_SIZE(pop,_cfi) %__ASM_REG(bp) __ASM_SIZE(pop,) %__ASM_REG(bp)
CFI_RESTORE __ASM_REG(bp)
.endm .endm
#else #else
.macro FRAME .macro FRAME
......
...@@ -206,8 +206,13 @@ do { \ ...@@ -206,8 +206,13 @@ do { \
#endif /* !CONFIG_PARAVIRT */ #endif /* !CONFIG_PARAVIRT */
#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \ /*
(u32)((val) >> 32)) * 64-bit version of wrmsr_safe():
*/
static inline int wrmsrl_safe(u32 msr, u64 val)
{
return wrmsr_safe(msr, (u32)val, (u32)(val >> 32));
}
#define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high)) #define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
......
...@@ -5,12 +5,14 @@ ...@@ -5,12 +5,14 @@
/* misc architecture specific prototypes */ /* misc architecture specific prototypes */
void system_call(void);
void syscall_init(void); void syscall_init(void);
void ia32_syscall(void); void entry_SYSCALL_64(void);
void ia32_cstar_target(void); void entry_SYSCALL_compat(void);
void ia32_sysenter_target(void); void entry_INT80_32(void);
void entry_INT80_compat(void);
void entry_SYSENTER_32(void);
void entry_SYSENTER_compat(void);
void x86_configure_nx(void); void x86_configure_nx(void);
void x86_report_nx(void); void x86_report_nx(void);
......
...@@ -231,11 +231,21 @@ ...@@ -231,11 +231,21 @@
#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES* 8) #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES* 8)
#ifdef __KERNEL__ #ifdef __KERNEL__
/*
* early_idt_handler_array is an array of entry points referenced in the
* early IDT. For simplicity, it's a real array with one entry point
* every nine bytes. That leaves room for an optional 'push $0' if the
* vector has no error code (two bytes), a 'push $vector_number' (two
* bytes), and a jump to the common entry code (up to five bytes).
*/
#define EARLY_IDT_HANDLER_SIZE 9
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5]; extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
# define trace_early_idt_handlers early_idt_handlers # define trace_early_idt_handler_array early_idt_handler_array
#endif #endif
/* /*
......
...@@ -22,7 +22,7 @@ KASAN_SANITIZE_dumpstack_$(BITS).o := n ...@@ -22,7 +22,7 @@ KASAN_SANITIZE_dumpstack_$(BITS).o := n
CFLAGS_irq.o := -I$(src)/../include/asm/trace CFLAGS_irq.o := -I$(src)/../include/asm/trace
obj-y := process_$(BITS).o signal.o entry_$(BITS).o obj-y := process_$(BITS).o signal.o
obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
...@@ -31,9 +31,6 @@ obj-y += probe_roms.o ...@@ -31,9 +31,6 @@ obj-y += probe_roms.o
obj-$(CONFIG_X86_32) += i386_ksyms_32.o obj-$(CONFIG_X86_32) += i386_ksyms_32.o
obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
obj-$(CONFIG_X86_64) += mcount_64.o obj-$(CONFIG_X86_64) += mcount_64.o
obj-y += syscall_$(BITS).o vsyscall_gtod.o
obj-$(CONFIG_IA32_EMULATION) += syscall_32.o
obj-$(CONFIG_X86_VSYSCALL_EMULATION) += vsyscall_64.o vsyscall_emu_64.o
obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o
obj-$(CONFIG_SYSFS) += ksysfs.o obj-$(CONFIG_SYSFS) += ksysfs.o
obj-y += bootflag.o e820.o obj-y += bootflag.o e820.o
......
...@@ -66,7 +66,7 @@ int main(void) ...@@ -66,7 +66,7 @@ int main(void)
DEFINE(__NR_syscall_max, sizeof(syscalls_64) - 1); DEFINE(__NR_syscall_max, sizeof(syscalls_64) - 1);
DEFINE(NR_syscalls, sizeof(syscalls_64)); DEFINE(NR_syscalls, sizeof(syscalls_64));
DEFINE(__NR_ia32_syscall_max, sizeof(syscalls_ia32) - 1); DEFINE(__NR_entry_INT80_compat_max, sizeof(syscalls_ia32) - 1);
DEFINE(IA32_NR_syscalls, sizeof(syscalls_ia32)); DEFINE(IA32_NR_syscalls, sizeof(syscalls_ia32));
return 0; return 0;
......
...@@ -1026,7 +1026,7 @@ void enable_sep_cpu(void) ...@@ -1026,7 +1026,7 @@ void enable_sep_cpu(void)
(unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack), (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack),
0); 0);
wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)ia32_sysenter_target, 0); wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
out: out:
put_cpu(); put_cpu();
...@@ -1204,10 +1204,10 @@ void syscall_init(void) ...@@ -1204,10 +1204,10 @@ void syscall_init(void)
* set CS/DS but only a 32bit target. LSTAR sets the 64bit rip. * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
*/ */
wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
wrmsrl(MSR_LSTAR, system_call); wrmsrl(MSR_LSTAR, entry_SYSCALL_64);
#ifdef CONFIG_IA32_EMULATION #ifdef CONFIG_IA32_EMULATION
wrmsrl(MSR_CSTAR, ia32_cstar_target); wrmsrl(MSR_CSTAR, entry_SYSCALL_compat);
/* /*
* This only works on Intel CPUs. * This only works on Intel CPUs.
* On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP. * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
...@@ -1216,7 +1216,7 @@ void syscall_init(void) ...@@ -1216,7 +1216,7 @@ void syscall_init(void)
*/ */
wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target); wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
#else #else
wrmsrl(MSR_CSTAR, ignore_sysret); wrmsrl(MSR_CSTAR, ignore_sysret);
wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG); wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
......
...@@ -167,7 +167,7 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data) ...@@ -167,7 +167,7 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
clear_bss(); clear_bss();
for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
set_intr_gate(i, early_idt_handlers[i]); set_intr_gate(i, early_idt_handler_array[i]);
load_idt((const struct desc_ptr *)&idt_descr); load_idt((const struct desc_ptr *)&idt_descr);
copy_bootdata(__va(real_mode_data)); copy_bootdata(__va(real_mode_data));
......
...@@ -478,21 +478,22 @@ is486: ...@@ -478,21 +478,22 @@ is486:
__INIT __INIT
setup_once: setup_once:
/* /*
* Set up a idt with 256 entries pointing to ignore_int, * Set up a idt with 256 interrupt gates that push zero if there
* interrupt gates. It doesn't actually load idt - that needs * is no error code and then jump to early_idt_handler_common.
* to be done on each CPU. Interrupts are enabled elsewhere, * It doesn't actually load the idt - that needs to be done on
* when we can be relatively sure everything is ok. * each CPU. Interrupts are enabled elsewhere, when we can be
* relatively sure everything is ok.
*/ */
movl $idt_table,%edi movl $idt_table,%edi
movl $early_idt_handlers,%eax movl $early_idt_handler_array,%eax
movl $NUM_EXCEPTION_VECTORS,%ecx movl $NUM_EXCEPTION_VECTORS,%ecx
1: 1:
movl %eax,(%edi) movl %eax,(%edi)
movl %eax,4(%edi) movl %eax,4(%edi)
/* interrupt gate, dpl=0, present */ /* interrupt gate, dpl=0, present */
movl $(0x8E000000 + __KERNEL_CS),2(%edi) movl $(0x8E000000 + __KERNEL_CS),2(%edi)
addl $9,%eax addl $EARLY_IDT_HANDLER_SIZE,%eax
addl $8,%edi addl $8,%edi
loop 1b loop 1b
...@@ -524,26 +525,28 @@ setup_once: ...@@ -524,26 +525,28 @@ setup_once:
andl $0,setup_once_ref /* Once is enough, thanks */ andl $0,setup_once_ref /* Once is enough, thanks */
ret ret
ENTRY(early_idt_handlers) ENTRY(early_idt_handler_array)
# 36(%esp) %eflags # 36(%esp) %eflags
# 32(%esp) %cs # 32(%esp) %cs
# 28(%esp) %eip # 28(%esp) %eip
# 24(%rsp) error code # 24(%rsp) error code
i = 0 i = 0
.rept NUM_EXCEPTION_VECTORS .rept NUM_EXCEPTION_VECTORS
.if (EXCEPTION_ERRCODE_MASK >> i) & 1 .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
ASM_NOP2
.else
pushl $0 # Dummy error code, to make stack frame uniform pushl $0 # Dummy error code, to make stack frame uniform
.endif .endif
pushl $i # 20(%esp) Vector number pushl $i # 20(%esp) Vector number
jmp early_idt_handler jmp early_idt_handler_common
i = i + 1 i = i + 1
.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
.endr .endr
ENDPROC(early_idt_handlers) ENDPROC(early_idt_handler_array)
/* This is global to keep gas from relaxing the jumps */ early_idt_handler_common:
ENTRY(early_idt_handler) /*
* The stack is the hardware frame, an error code or zero, and the
* vector number.
*/
cld cld
cmpl $2,(%esp) # X86_TRAP_NMI cmpl $2,(%esp) # X86_TRAP_NMI
...@@ -603,7 +606,7 @@ ex_entry: ...@@ -603,7 +606,7 @@ ex_entry:
.Lis_nmi: .Lis_nmi:
addl $8,%esp /* drop vector number and error code */ addl $8,%esp /* drop vector number and error code */
iret iret
ENDPROC(early_idt_handler) ENDPROC(early_idt_handler_common)
/* This is the default interrupt "handler" :-) */ /* This is the default interrupt "handler" :-) */
ALIGN ALIGN
......
...@@ -321,26 +321,28 @@ bad_address: ...@@ -321,26 +321,28 @@ bad_address:
jmp bad_address jmp bad_address
__INIT __INIT
.globl early_idt_handlers ENTRY(early_idt_handler_array)
early_idt_handlers:
# 104(%rsp) %rflags # 104(%rsp) %rflags
# 96(%rsp) %cs # 96(%rsp) %cs
# 88(%rsp) %rip # 88(%rsp) %rip
# 80(%rsp) error code # 80(%rsp) error code
i = 0 i = 0
.rept NUM_EXCEPTION_VECTORS .rept NUM_EXCEPTION_VECTORS
.if (EXCEPTION_ERRCODE_MASK >> i) & 1 .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
ASM_NOP2
.else
pushq $0 # Dummy error code, to make stack frame uniform pushq $0 # Dummy error code, to make stack frame uniform
.endif .endif
pushq $i # 72(%rsp) Vector number pushq $i # 72(%rsp) Vector number
jmp early_idt_handler jmp early_idt_handler_common
i = i + 1 i = i + 1
.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
.endr .endr
ENDPROC(early_idt_handler_array)
/* This is global to keep gas from relaxing the jumps */ early_idt_handler_common:
ENTRY(early_idt_handler) /*
* The stack is the hardware frame, an error code or zero, and the
* vector number.
*/
cld cld
cmpl $2,(%rsp) # X86_TRAP_NMI cmpl $2,(%rsp) # X86_TRAP_NMI
...@@ -412,7 +414,7 @@ ENTRY(early_idt_handler) ...@@ -412,7 +414,7 @@ ENTRY(early_idt_handler)
.Lis_nmi: .Lis_nmi:
addq $16,%rsp # drop vector number and error code addq $16,%rsp # drop vector number and error code
INTERRUPT_RETURN INTERRUPT_RETURN
ENDPROC(early_idt_handler) ENDPROC(early_idt_handler_common)
__INITDATA __INITDATA
......
...@@ -72,8 +72,7 @@ gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss; ...@@ -72,8 +72,7 @@ gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
#else #else
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/proto.h>
asmlinkage int system_call(void);
#endif #endif
/* Must be page-aligned because the real IDT is used in a fixmap. */ /* Must be page-aligned because the real IDT is used in a fixmap. */
...@@ -980,12 +979,12 @@ void __init trap_init(void) ...@@ -980,12 +979,12 @@ void __init trap_init(void)
set_bit(i, used_vectors); set_bit(i, used_vectors);
#ifdef CONFIG_IA32_EMULATION #ifdef CONFIG_IA32_EMULATION
set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall); set_system_intr_gate(IA32_SYSCALL_VECTOR, entry_INT80_compat);
set_bit(IA32_SYSCALL_VECTOR, used_vectors); set_bit(IA32_SYSCALL_VECTOR, used_vectors);
#endif #endif
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
set_system_trap_gate(IA32_SYSCALL_VECTOR, &system_call); set_system_trap_gate(IA32_SYSCALL_VECTOR, entry_INT80_32);
set_bit(IA32_SYSCALL_VECTOR, used_vectors); set_bit(IA32_SYSCALL_VECTOR, used_vectors);
#endif #endif
......
...@@ -17,7 +17,6 @@ clean-files := inat-tables.c ...@@ -17,7 +17,6 @@ clean-files := inat-tables.c
obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
lib-y := delay.o misc.o cmdline.o lib-y := delay.o misc.o cmdline.o
lib-y += thunk_$(BITS).o
lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
lib-y += memcpy_$(BITS).o lib-y += memcpy_$(BITS).o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
......
...@@ -11,26 +11,23 @@ ...@@ -11,26 +11,23 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/dwarf2.h>
/* if you want SMP support, implement these with real spinlocks */ /* if you want SMP support, implement these with real spinlocks */
.macro LOCK reg .macro LOCK reg
pushfl_cfi pushfl
cli cli
.endm .endm
.macro UNLOCK reg .macro UNLOCK reg
popfl_cfi popfl
.endm .endm
#define BEGIN(op) \ #define BEGIN(op) \
.macro endp; \ .macro endp; \
CFI_ENDPROC; \
ENDPROC(atomic64_##op##_386); \ ENDPROC(atomic64_##op##_386); \
.purgem endp; \ .purgem endp; \
.endm; \ .endm; \
ENTRY(atomic64_##op##_386); \ ENTRY(atomic64_##op##_386); \
CFI_STARTPROC; \
LOCK v; LOCK v;
#define ENDP endp #define ENDP endp
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/dwarf2.h>
.macro read64 reg .macro read64 reg
movl %ebx, %eax movl %ebx, %eax
...@@ -22,16 +21,11 @@ ...@@ -22,16 +21,11 @@
.endm .endm
ENTRY(atomic64_read_cx8) ENTRY(atomic64_read_cx8)
CFI_STARTPROC
read64 %ecx read64 %ecx
ret ret
CFI_ENDPROC
ENDPROC(atomic64_read_cx8) ENDPROC(atomic64_read_cx8)
ENTRY(atomic64_set_cx8) ENTRY(atomic64_set_cx8)
CFI_STARTPROC
1: 1:
/* we don't need LOCK_PREFIX since aligned 64-bit writes /* we don't need LOCK_PREFIX since aligned 64-bit writes
* are atomic on 586 and newer */ * are atomic on 586 and newer */
...@@ -39,28 +33,23 @@ ENTRY(atomic64_set_cx8) ...@@ -39,28 +33,23 @@ ENTRY(atomic64_set_cx8)
jne 1b jne 1b
ret ret
CFI_ENDPROC
ENDPROC(atomic64_set_cx8) ENDPROC(atomic64_set_cx8)
ENTRY(atomic64_xchg_cx8) ENTRY(atomic64_xchg_cx8)
CFI_STARTPROC
1: 1:
LOCK_PREFIX LOCK_PREFIX
cmpxchg8b (%esi) cmpxchg8b (%esi)
jne 1b jne 1b
ret ret
CFI_ENDPROC
ENDPROC(atomic64_xchg_cx8) ENDPROC(atomic64_xchg_cx8)
.macro addsub_return func ins insc .macro addsub_return func ins insc
ENTRY(atomic64_\func\()_return_cx8) ENTRY(atomic64_\func\()_return_cx8)
CFI_STARTPROC pushl %ebp
pushl_cfi_reg ebp pushl %ebx
pushl_cfi_reg ebx pushl %esi
pushl_cfi_reg esi pushl %edi
pushl_cfi_reg edi
movl %eax, %esi movl %eax, %esi
movl %edx, %edi movl %edx, %edi
...@@ -79,12 +68,11 @@ ENTRY(atomic64_\func\()_return_cx8) ...@@ -79,12 +68,11 @@ ENTRY(atomic64_\func\()_return_cx8)
10: 10:
movl %ebx, %eax movl %ebx, %eax
movl %ecx, %edx movl %ecx, %edx
popl_cfi_reg edi popl %edi
popl_cfi_reg esi popl %esi
popl_cfi_reg ebx popl %ebx
popl_cfi_reg ebp popl %ebp
ret ret
CFI_ENDPROC
ENDPROC(atomic64_\func\()_return_cx8) ENDPROC(atomic64_\func\()_return_cx8)
.endm .endm
...@@ -93,8 +81,7 @@ addsub_return sub sub sbb ...@@ -93,8 +81,7 @@ addsub_return sub sub sbb
.macro incdec_return func ins insc .macro incdec_return func ins insc
ENTRY(atomic64_\func\()_return_cx8) ENTRY(atomic64_\func\()_return_cx8)
CFI_STARTPROC pushl %ebx
pushl_cfi_reg ebx
read64 %esi read64 %esi
1: 1:
...@@ -109,9 +96,8 @@ ENTRY(atomic64_\func\()_return_cx8) ...@@ -109,9 +96,8 @@ ENTRY(atomic64_\func\()_return_cx8)
10: 10:
movl %ebx, %eax movl %ebx, %eax
movl %ecx, %edx movl %ecx, %edx
popl_cfi_reg ebx popl %ebx
ret ret
CFI_ENDPROC
ENDPROC(atomic64_\func\()_return_cx8) ENDPROC(atomic64_\func\()_return_cx8)
.endm .endm
...@@ -119,8 +105,7 @@ incdec_return inc add adc ...@@ -119,8 +105,7 @@ incdec_return inc add adc
incdec_return dec sub sbb incdec_return dec sub sbb
ENTRY(atomic64_dec_if_positive_cx8) ENTRY(atomic64_dec_if_positive_cx8)
CFI_STARTPROC pushl %ebx
pushl_cfi_reg ebx
read64 %esi read64 %esi
1: 1:
...@@ -136,18 +121,16 @@ ENTRY(atomic64_dec_if_positive_cx8) ...@@ -136,18 +121,16 @@ ENTRY(atomic64_dec_if_positive_cx8)
2: 2:
movl %ebx, %eax movl %ebx, %eax
movl %ecx, %edx movl %ecx, %edx
popl_cfi_reg ebx popl %ebx
ret ret
CFI_ENDPROC
ENDPROC(atomic64_dec_if_positive_cx8) ENDPROC(atomic64_dec_if_positive_cx8)
ENTRY(atomic64_add_unless_cx8) ENTRY(atomic64_add_unless_cx8)
CFI_STARTPROC pushl %ebp
pushl_cfi_reg ebp pushl %ebx
pushl_cfi_reg ebx
/* these just push these two parameters on the stack */ /* these just push these two parameters on the stack */
pushl_cfi_reg edi pushl %edi
pushl_cfi_reg ecx pushl %ecx
movl %eax, %ebp movl %eax, %ebp
movl %edx, %edi movl %edx, %edi
...@@ -168,21 +151,18 @@ ENTRY(atomic64_add_unless_cx8) ...@@ -168,21 +151,18 @@ ENTRY(atomic64_add_unless_cx8)
movl $1, %eax movl $1, %eax
3: 3:
addl $8, %esp addl $8, %esp
CFI_ADJUST_CFA_OFFSET -8 popl %ebx
popl_cfi_reg ebx popl %ebp
popl_cfi_reg ebp
ret ret
4: 4:
cmpl %edx, 4(%esp) cmpl %edx, 4(%esp)
jne 2b jne 2b
xorl %eax, %eax xorl %eax, %eax
jmp 3b jmp 3b
CFI_ENDPROC
ENDPROC(atomic64_add_unless_cx8) ENDPROC(atomic64_add_unless_cx8)
ENTRY(atomic64_inc_not_zero_cx8) ENTRY(atomic64_inc_not_zero_cx8)
CFI_STARTPROC pushl %ebx
pushl_cfi_reg ebx
read64 %esi read64 %esi
1: 1:
...@@ -199,7 +179,6 @@ ENTRY(atomic64_inc_not_zero_cx8) ...@@ -199,7 +179,6 @@ ENTRY(atomic64_inc_not_zero_cx8)
movl $1, %eax movl $1, %eax
3: 3:
popl_cfi_reg ebx popl %ebx
ret ret
CFI_ENDPROC
ENDPROC(atomic64_inc_not_zero_cx8) ENDPROC(atomic64_inc_not_zero_cx8)
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/asm.h> #include <asm/asm.h>
...@@ -50,9 +49,8 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) ...@@ -50,9 +49,8 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
* alignment for the unrolled loop. * alignment for the unrolled loop.
*/ */
ENTRY(csum_partial) ENTRY(csum_partial)
CFI_STARTPROC pushl %esi
pushl_cfi_reg esi pushl %ebx
pushl_cfi_reg ebx
movl 20(%esp),%eax # Function arg: unsigned int sum movl 20(%esp),%eax # Function arg: unsigned int sum
movl 16(%esp),%ecx # Function arg: int len movl 16(%esp),%ecx # Function arg: int len
movl 12(%esp),%esi # Function arg: unsigned char *buff movl 12(%esp),%esi # Function arg: unsigned char *buff
...@@ -129,10 +127,9 @@ ENTRY(csum_partial) ...@@ -129,10 +127,9 @@ ENTRY(csum_partial)
jz 8f jz 8f
roll $8, %eax roll $8, %eax
8: 8:
popl_cfi_reg ebx popl %ebx
popl_cfi_reg esi popl %esi
ret ret
CFI_ENDPROC
ENDPROC(csum_partial) ENDPROC(csum_partial)
#else #else
...@@ -140,9 +137,8 @@ ENDPROC(csum_partial) ...@@ -140,9 +137,8 @@ ENDPROC(csum_partial)
/* Version for PentiumII/PPro */ /* Version for PentiumII/PPro */
ENTRY(csum_partial) ENTRY(csum_partial)
CFI_STARTPROC pushl %esi
pushl_cfi_reg esi pushl %ebx
pushl_cfi_reg ebx
movl 20(%esp),%eax # Function arg: unsigned int sum movl 20(%esp),%eax # Function arg: unsigned int sum
movl 16(%esp),%ecx # Function arg: int len movl 16(%esp),%ecx # Function arg: int len
movl 12(%esp),%esi # Function arg: const unsigned char *buf movl 12(%esp),%esi # Function arg: const unsigned char *buf
...@@ -249,10 +245,9 @@ ENTRY(csum_partial) ...@@ -249,10 +245,9 @@ ENTRY(csum_partial)
jz 90f jz 90f
roll $8, %eax roll $8, %eax
90: 90:
popl_cfi_reg ebx popl %ebx
popl_cfi_reg esi popl %esi
ret ret
CFI_ENDPROC
ENDPROC(csum_partial) ENDPROC(csum_partial)
#endif #endif
...@@ -287,12 +282,10 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, ...@@ -287,12 +282,10 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
#define FP 12 #define FP 12
ENTRY(csum_partial_copy_generic) ENTRY(csum_partial_copy_generic)
CFI_STARTPROC
subl $4,%esp subl $4,%esp
CFI_ADJUST_CFA_OFFSET 4 pushl %edi
pushl_cfi_reg edi pushl %esi
pushl_cfi_reg esi pushl %ebx
pushl_cfi_reg ebx
movl ARGBASE+16(%esp),%eax # sum movl ARGBASE+16(%esp),%eax # sum
movl ARGBASE+12(%esp),%ecx # len movl ARGBASE+12(%esp),%ecx # len
movl ARGBASE+4(%esp),%esi # src movl ARGBASE+4(%esp),%esi # src
...@@ -401,12 +394,11 @@ DST( movb %cl, (%edi) ) ...@@ -401,12 +394,11 @@ DST( movb %cl, (%edi) )
.previous .previous
popl_cfi_reg ebx popl %ebx
popl_cfi_reg esi popl %esi
popl_cfi_reg edi popl %edi
popl_cfi %ecx # equivalent to addl $4,%esp popl %ecx # equivalent to addl $4,%esp
ret ret
CFI_ENDPROC
ENDPROC(csum_partial_copy_generic) ENDPROC(csum_partial_copy_generic)
#else #else
...@@ -426,10 +418,9 @@ ENDPROC(csum_partial_copy_generic) ...@@ -426,10 +418,9 @@ ENDPROC(csum_partial_copy_generic)
#define ARGBASE 12 #define ARGBASE 12
ENTRY(csum_partial_copy_generic) ENTRY(csum_partial_copy_generic)
CFI_STARTPROC pushl %ebx
pushl_cfi_reg ebx pushl %edi
pushl_cfi_reg edi pushl %esi
pushl_cfi_reg esi
movl ARGBASE+4(%esp),%esi #src movl ARGBASE+4(%esp),%esi #src
movl ARGBASE+8(%esp),%edi #dst movl ARGBASE+8(%esp),%edi #dst
movl ARGBASE+12(%esp),%ecx #len movl ARGBASE+12(%esp),%ecx #len
...@@ -489,11 +480,10 @@ DST( movb %dl, (%edi) ) ...@@ -489,11 +480,10 @@ DST( movb %dl, (%edi) )
jmp 7b jmp 7b
.previous .previous
popl_cfi_reg esi popl %esi
popl_cfi_reg edi popl %edi
popl_cfi_reg ebx popl %ebx
ret ret
CFI_ENDPROC
ENDPROC(csum_partial_copy_generic) ENDPROC(csum_partial_copy_generic)
#undef ROUND #undef ROUND
......
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
...@@ -15,7 +14,6 @@ ...@@ -15,7 +14,6 @@
* %rdi - page * %rdi - page
*/ */
ENTRY(clear_page) ENTRY(clear_page)
CFI_STARTPROC
ALTERNATIVE_2 "jmp clear_page_orig", "", X86_FEATURE_REP_GOOD, \ ALTERNATIVE_2 "jmp clear_page_orig", "", X86_FEATURE_REP_GOOD, \
"jmp clear_page_c_e", X86_FEATURE_ERMS "jmp clear_page_c_e", X86_FEATURE_ERMS
...@@ -24,11 +22,9 @@ ENTRY(clear_page) ...@@ -24,11 +22,9 @@ ENTRY(clear_page)
xorl %eax,%eax xorl %eax,%eax
rep stosq rep stosq
ret ret
CFI_ENDPROC
ENDPROC(clear_page) ENDPROC(clear_page)
ENTRY(clear_page_orig) ENTRY(clear_page_orig)
CFI_STARTPROC
xorl %eax,%eax xorl %eax,%eax
movl $4096/64,%ecx movl $4096/64,%ecx
...@@ -48,14 +44,11 @@ ENTRY(clear_page_orig) ...@@ -48,14 +44,11 @@ ENTRY(clear_page_orig)
jnz .Lloop jnz .Lloop
nop nop
ret ret
CFI_ENDPROC
ENDPROC(clear_page_orig) ENDPROC(clear_page_orig)
ENTRY(clear_page_c_e) ENTRY(clear_page_c_e)
CFI_STARTPROC
movl $4096,%ecx movl $4096,%ecx
xorl %eax,%eax xorl %eax,%eax
rep stosb rep stosb
ret ret
CFI_ENDPROC
ENDPROC(clear_page_c_e) ENDPROC(clear_page_c_e)
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
* *
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/percpu.h> #include <asm/percpu.h>
.text .text
...@@ -21,7 +20,6 @@ ...@@ -21,7 +20,6 @@
* %al : Operation successful * %al : Operation successful
*/ */
ENTRY(this_cpu_cmpxchg16b_emu) ENTRY(this_cpu_cmpxchg16b_emu)
CFI_STARTPROC
# #
# Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not # Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not
...@@ -32,7 +30,7 @@ CFI_STARTPROC ...@@ -32,7 +30,7 @@ CFI_STARTPROC
# *atomic* on a single cpu (as provided by the this_cpu_xx class of # *atomic* on a single cpu (as provided by the this_cpu_xx class of
# macros). # macros).
# #
pushfq_cfi pushfq
cli cli
cmpq PER_CPU_VAR((%rsi)), %rax cmpq PER_CPU_VAR((%rsi)), %rax
...@@ -43,17 +41,13 @@ CFI_STARTPROC ...@@ -43,17 +41,13 @@ CFI_STARTPROC
movq %rbx, PER_CPU_VAR((%rsi)) movq %rbx, PER_CPU_VAR((%rsi))
movq %rcx, PER_CPU_VAR(8(%rsi)) movq %rcx, PER_CPU_VAR(8(%rsi))
CFI_REMEMBER_STATE popfq
popfq_cfi
mov $1, %al mov $1, %al
ret ret
CFI_RESTORE_STATE
.Lnot_same: .Lnot_same:
popfq_cfi popfq
xor %al,%al xor %al,%al
ret ret
CFI_ENDPROC
ENDPROC(this_cpu_cmpxchg16b_emu) ENDPROC(this_cpu_cmpxchg16b_emu)
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
.text .text
...@@ -20,14 +19,13 @@ ...@@ -20,14 +19,13 @@
* %ecx : high 32 bits of new value * %ecx : high 32 bits of new value
*/ */
ENTRY(cmpxchg8b_emu) ENTRY(cmpxchg8b_emu)
CFI_STARTPROC
# #
# Emulate 'cmpxchg8b (%esi)' on UP except we don't # Emulate 'cmpxchg8b (%esi)' on UP except we don't
# set the whole ZF thing (caller will just compare # set the whole ZF thing (caller will just compare
# eax:edx with the expected value) # eax:edx with the expected value)
# #
pushfl_cfi pushfl
cli cli
cmpl (%esi), %eax cmpl (%esi), %eax
...@@ -38,18 +36,15 @@ CFI_STARTPROC ...@@ -38,18 +36,15 @@ CFI_STARTPROC
movl %ebx, (%esi) movl %ebx, (%esi)
movl %ecx, 4(%esi) movl %ecx, 4(%esi)
CFI_REMEMBER_STATE popfl
popfl_cfi
ret ret
CFI_RESTORE_STATE
.Lnot_same: .Lnot_same:
movl (%esi), %eax movl (%esi), %eax
.Lhalf_same: .Lhalf_same:
movl 4(%esi), %edx movl 4(%esi), %edx
popfl_cfi popfl
ret ret
CFI_ENDPROC
ENDPROC(cmpxchg8b_emu) ENDPROC(cmpxchg8b_emu)
/* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */ /* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
...@@ -13,22 +12,16 @@ ...@@ -13,22 +12,16 @@
*/ */
ALIGN ALIGN
ENTRY(copy_page) ENTRY(copy_page)
CFI_STARTPROC
ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD
movl $4096/8, %ecx movl $4096/8, %ecx
rep movsq rep movsq
ret ret
CFI_ENDPROC
ENDPROC(copy_page) ENDPROC(copy_page)
ENTRY(copy_page_regs) ENTRY(copy_page_regs)
CFI_STARTPROC
subq $2*8, %rsp subq $2*8, %rsp
CFI_ADJUST_CFA_OFFSET 2*8
movq %rbx, (%rsp) movq %rbx, (%rsp)
CFI_REL_OFFSET rbx, 0
movq %r12, 1*8(%rsp) movq %r12, 1*8(%rsp)
CFI_REL_OFFSET r12, 1*8
movl $(4096/64)-5, %ecx movl $(4096/64)-5, %ecx
.p2align 4 .p2align 4
...@@ -87,11 +80,7 @@ ENTRY(copy_page_regs) ...@@ -87,11 +80,7 @@ ENTRY(copy_page_regs)
jnz .Loop2 jnz .Loop2
movq (%rsp), %rbx movq (%rsp), %rbx
CFI_RESTORE rbx
movq 1*8(%rsp), %r12 movq 1*8(%rsp), %r12
CFI_RESTORE r12
addq $2*8, %rsp addq $2*8, %rsp
CFI_ADJUST_CFA_OFFSET -2*8
ret ret
CFI_ENDPROC
ENDPROC(copy_page_regs) ENDPROC(copy_page_regs)
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/current.h> #include <asm/current.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
...@@ -18,7 +17,6 @@ ...@@ -18,7 +17,6 @@
/* Standard copy_to_user with segment limit checking */ /* Standard copy_to_user with segment limit checking */
ENTRY(_copy_to_user) ENTRY(_copy_to_user)
CFI_STARTPROC
GET_THREAD_INFO(%rax) GET_THREAD_INFO(%rax)
movq %rdi,%rcx movq %rdi,%rcx
addq %rdx,%rcx addq %rdx,%rcx
...@@ -30,12 +28,10 @@ ENTRY(_copy_to_user) ...@@ -30,12 +28,10 @@ ENTRY(_copy_to_user)
X86_FEATURE_REP_GOOD, \ X86_FEATURE_REP_GOOD, \
"jmp copy_user_enhanced_fast_string", \ "jmp copy_user_enhanced_fast_string", \
X86_FEATURE_ERMS X86_FEATURE_ERMS
CFI_ENDPROC
ENDPROC(_copy_to_user) ENDPROC(_copy_to_user)
/* Standard copy_from_user with segment limit checking */ /* Standard copy_from_user with segment limit checking */
ENTRY(_copy_from_user) ENTRY(_copy_from_user)
CFI_STARTPROC
GET_THREAD_INFO(%rax) GET_THREAD_INFO(%rax)
movq %rsi,%rcx movq %rsi,%rcx
addq %rdx,%rcx addq %rdx,%rcx
...@@ -47,14 +43,12 @@ ENTRY(_copy_from_user) ...@@ -47,14 +43,12 @@ ENTRY(_copy_from_user)
X86_FEATURE_REP_GOOD, \ X86_FEATURE_REP_GOOD, \
"jmp copy_user_enhanced_fast_string", \ "jmp copy_user_enhanced_fast_string", \
X86_FEATURE_ERMS X86_FEATURE_ERMS
CFI_ENDPROC
ENDPROC(_copy_from_user) ENDPROC(_copy_from_user)
.section .fixup,"ax" .section .fixup,"ax"
/* must zero dest */ /* must zero dest */
ENTRY(bad_from_user) ENTRY(bad_from_user)
bad_from_user: bad_from_user:
CFI_STARTPROC
movl %edx,%ecx movl %edx,%ecx
xorl %eax,%eax xorl %eax,%eax
rep rep
...@@ -62,7 +56,6 @@ bad_from_user: ...@@ -62,7 +56,6 @@ bad_from_user:
bad_to_user: bad_to_user:
movl %edx,%eax movl %edx,%eax
ret ret
CFI_ENDPROC
ENDPROC(bad_from_user) ENDPROC(bad_from_user)
.previous .previous
...@@ -80,7 +73,6 @@ ENDPROC(bad_from_user) ...@@ -80,7 +73,6 @@ ENDPROC(bad_from_user)
* eax uncopied bytes or 0 if successful. * eax uncopied bytes or 0 if successful.
*/ */
ENTRY(copy_user_generic_unrolled) ENTRY(copy_user_generic_unrolled)
CFI_STARTPROC
ASM_STAC ASM_STAC
cmpl $8,%edx cmpl $8,%edx
jb 20f /* less then 8 bytes, go to byte copy loop */ jb 20f /* less then 8 bytes, go to byte copy loop */
...@@ -162,7 +154,6 @@ ENTRY(copy_user_generic_unrolled) ...@@ -162,7 +154,6 @@ ENTRY(copy_user_generic_unrolled)
_ASM_EXTABLE(19b,40b) _ASM_EXTABLE(19b,40b)
_ASM_EXTABLE(21b,50b) _ASM_EXTABLE(21b,50b)
_ASM_EXTABLE(22b,50b) _ASM_EXTABLE(22b,50b)
CFI_ENDPROC
ENDPROC(copy_user_generic_unrolled) ENDPROC(copy_user_generic_unrolled)
/* Some CPUs run faster using the string copy instructions. /* Some CPUs run faster using the string copy instructions.
...@@ -184,7 +175,6 @@ ENDPROC(copy_user_generic_unrolled) ...@@ -184,7 +175,6 @@ ENDPROC(copy_user_generic_unrolled)
* eax uncopied bytes or 0 if successful. * eax uncopied bytes or 0 if successful.
*/ */
ENTRY(copy_user_generic_string) ENTRY(copy_user_generic_string)
CFI_STARTPROC
ASM_STAC ASM_STAC
cmpl $8,%edx cmpl $8,%edx
jb 2f /* less than 8 bytes, go to byte copy loop */ jb 2f /* less than 8 bytes, go to byte copy loop */
...@@ -209,7 +199,6 @@ ENTRY(copy_user_generic_string) ...@@ -209,7 +199,6 @@ ENTRY(copy_user_generic_string)
_ASM_EXTABLE(1b,11b) _ASM_EXTABLE(1b,11b)
_ASM_EXTABLE(3b,12b) _ASM_EXTABLE(3b,12b)
CFI_ENDPROC
ENDPROC(copy_user_generic_string) ENDPROC(copy_user_generic_string)
/* /*
...@@ -225,7 +214,6 @@ ENDPROC(copy_user_generic_string) ...@@ -225,7 +214,6 @@ ENDPROC(copy_user_generic_string)
* eax uncopied bytes or 0 if successful. * eax uncopied bytes or 0 if successful.
*/ */
ENTRY(copy_user_enhanced_fast_string) ENTRY(copy_user_enhanced_fast_string)
CFI_STARTPROC
ASM_STAC ASM_STAC
movl %edx,%ecx movl %edx,%ecx
1: rep 1: rep
...@@ -240,7 +228,6 @@ ENTRY(copy_user_enhanced_fast_string) ...@@ -240,7 +228,6 @@ ENTRY(copy_user_enhanced_fast_string)
.previous .previous
_ASM_EXTABLE(1b,12b) _ASM_EXTABLE(1b,12b)
CFI_ENDPROC
ENDPROC(copy_user_enhanced_fast_string) ENDPROC(copy_user_enhanced_fast_string)
/* /*
...@@ -248,7 +235,6 @@ ENDPROC(copy_user_enhanced_fast_string) ...@@ -248,7 +235,6 @@ ENDPROC(copy_user_enhanced_fast_string)
* This will force destination/source out of cache for more performance. * This will force destination/source out of cache for more performance.
*/ */
ENTRY(__copy_user_nocache) ENTRY(__copy_user_nocache)
CFI_STARTPROC
ASM_STAC ASM_STAC
cmpl $8,%edx cmpl $8,%edx
jb 20f /* less then 8 bytes, go to byte copy loop */ jb 20f /* less then 8 bytes, go to byte copy loop */
...@@ -332,5 +318,4 @@ ENTRY(__copy_user_nocache) ...@@ -332,5 +318,4 @@ ENTRY(__copy_user_nocache)
_ASM_EXTABLE(19b,40b) _ASM_EXTABLE(19b,40b)
_ASM_EXTABLE(21b,50b) _ASM_EXTABLE(21b,50b)
_ASM_EXTABLE(22b,50b) _ASM_EXTABLE(22b,50b)
CFI_ENDPROC
ENDPROC(__copy_user_nocache) ENDPROC(__copy_user_nocache)
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
* for more details. No warranty for anything given at all. * for more details. No warranty for anything given at all.
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/asm.h> #include <asm/asm.h>
...@@ -47,23 +46,16 @@ ...@@ -47,23 +46,16 @@
ENTRY(csum_partial_copy_generic) ENTRY(csum_partial_copy_generic)
CFI_STARTPROC
cmpl $3*64, %edx cmpl $3*64, %edx
jle .Lignore jle .Lignore
.Lignore: .Lignore:
subq $7*8, %rsp subq $7*8, %rsp
CFI_ADJUST_CFA_OFFSET 7*8
movq %rbx, 2*8(%rsp) movq %rbx, 2*8(%rsp)
CFI_REL_OFFSET rbx, 2*8
movq %r12, 3*8(%rsp) movq %r12, 3*8(%rsp)
CFI_REL_OFFSET r12, 3*8
movq %r14, 4*8(%rsp) movq %r14, 4*8(%rsp)
CFI_REL_OFFSET r14, 4*8
movq %r13, 5*8(%rsp) movq %r13, 5*8(%rsp)
CFI_REL_OFFSET r13, 5*8
movq %rbp, 6*8(%rsp) movq %rbp, 6*8(%rsp)
CFI_REL_OFFSET rbp, 6*8
movq %r8, (%rsp) movq %r8, (%rsp)
movq %r9, 1*8(%rsp) movq %r9, 1*8(%rsp)
...@@ -206,22 +198,14 @@ ENTRY(csum_partial_copy_generic) ...@@ -206,22 +198,14 @@ ENTRY(csum_partial_copy_generic)
addl %ebx, %eax addl %ebx, %eax
adcl %r9d, %eax /* carry */ adcl %r9d, %eax /* carry */
CFI_REMEMBER_STATE
.Lende: .Lende:
movq 2*8(%rsp), %rbx movq 2*8(%rsp), %rbx
CFI_RESTORE rbx
movq 3*8(%rsp), %r12 movq 3*8(%rsp), %r12
CFI_RESTORE r12
movq 4*8(%rsp), %r14 movq 4*8(%rsp), %r14
CFI_RESTORE r14
movq 5*8(%rsp), %r13 movq 5*8(%rsp), %r13
CFI_RESTORE r13
movq 6*8(%rsp), %rbp movq 6*8(%rsp), %rbp
CFI_RESTORE rbp
addq $7*8, %rsp addq $7*8, %rsp
CFI_ADJUST_CFA_OFFSET -7*8
ret ret
CFI_RESTORE_STATE
/* Exception handlers. Very simple, zeroing is done in the wrappers */ /* Exception handlers. Very simple, zeroing is done in the wrappers */
.Lbad_source: .Lbad_source:
...@@ -237,5 +221,4 @@ ENTRY(csum_partial_copy_generic) ...@@ -237,5 +221,4 @@ ENTRY(csum_partial_copy_generic)
jz .Lende jz .Lende
movl $-EFAULT, (%rax) movl $-EFAULT, (%rax)
jmp .Lende jmp .Lende
CFI_ENDPROC
ENDPROC(csum_partial_copy_generic) ENDPROC(csum_partial_copy_generic)
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/page_types.h> #include <asm/page_types.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
...@@ -36,7 +35,6 @@ ...@@ -36,7 +35,6 @@
.text .text
ENTRY(__get_user_1) ENTRY(__get_user_1)
CFI_STARTPROC
GET_THREAD_INFO(%_ASM_DX) GET_THREAD_INFO(%_ASM_DX)
cmp TI_addr_limit(%_ASM_DX),%_ASM_AX cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user jae bad_get_user
...@@ -45,11 +43,9 @@ ENTRY(__get_user_1) ...@@ -45,11 +43,9 @@ ENTRY(__get_user_1)
xor %eax,%eax xor %eax,%eax
ASM_CLAC ASM_CLAC
ret ret
CFI_ENDPROC
ENDPROC(__get_user_1) ENDPROC(__get_user_1)
ENTRY(__get_user_2) ENTRY(__get_user_2)
CFI_STARTPROC
add $1,%_ASM_AX add $1,%_ASM_AX
jc bad_get_user jc bad_get_user
GET_THREAD_INFO(%_ASM_DX) GET_THREAD_INFO(%_ASM_DX)
...@@ -60,11 +56,9 @@ ENTRY(__get_user_2) ...@@ -60,11 +56,9 @@ ENTRY(__get_user_2)
xor %eax,%eax xor %eax,%eax
ASM_CLAC ASM_CLAC
ret ret
CFI_ENDPROC
ENDPROC(__get_user_2) ENDPROC(__get_user_2)
ENTRY(__get_user_4) ENTRY(__get_user_4)
CFI_STARTPROC
add $3,%_ASM_AX add $3,%_ASM_AX
jc bad_get_user jc bad_get_user
GET_THREAD_INFO(%_ASM_DX) GET_THREAD_INFO(%_ASM_DX)
...@@ -75,11 +69,9 @@ ENTRY(__get_user_4) ...@@ -75,11 +69,9 @@ ENTRY(__get_user_4)
xor %eax,%eax xor %eax,%eax
ASM_CLAC ASM_CLAC
ret ret
CFI_ENDPROC
ENDPROC(__get_user_4) ENDPROC(__get_user_4)
ENTRY(__get_user_8) ENTRY(__get_user_8)
CFI_STARTPROC
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
add $7,%_ASM_AX add $7,%_ASM_AX
jc bad_get_user jc bad_get_user
...@@ -104,28 +96,23 @@ ENTRY(__get_user_8) ...@@ -104,28 +96,23 @@ ENTRY(__get_user_8)
ASM_CLAC ASM_CLAC
ret ret
#endif #endif
CFI_ENDPROC
ENDPROC(__get_user_8) ENDPROC(__get_user_8)
bad_get_user: bad_get_user:
CFI_STARTPROC
xor %edx,%edx xor %edx,%edx
mov $(-EFAULT),%_ASM_AX mov $(-EFAULT),%_ASM_AX
ASM_CLAC ASM_CLAC
ret ret
CFI_ENDPROC
END(bad_get_user) END(bad_get_user)
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
bad_get_user_8: bad_get_user_8:
CFI_STARTPROC
xor %edx,%edx xor %edx,%edx
xor %ecx,%ecx xor %ecx,%ecx
mov $(-EFAULT),%_ASM_AX mov $(-EFAULT),%_ASM_AX
ASM_CLAC ASM_CLAC
ret ret
CFI_ENDPROC
END(bad_get_user_8) END(bad_get_user_8)
#endif #endif
......
...@@ -16,15 +16,12 @@ ...@@ -16,15 +16,12 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
/* /*
* override generic version in lib/iomap_copy.c * override generic version in lib/iomap_copy.c
*/ */
ENTRY(__iowrite32_copy) ENTRY(__iowrite32_copy)
CFI_STARTPROC
movl %edx,%ecx movl %edx,%ecx
rep movsd rep movsd
ret ret
CFI_ENDPROC
ENDPROC(__iowrite32_copy) ENDPROC(__iowrite32_copy)
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/dwarf2.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
/* /*
...@@ -53,7 +52,6 @@ ENTRY(memcpy_erms) ...@@ -53,7 +52,6 @@ ENTRY(memcpy_erms)
ENDPROC(memcpy_erms) ENDPROC(memcpy_erms)
ENTRY(memcpy_orig) ENTRY(memcpy_orig)
CFI_STARTPROC
movq %rdi, %rax movq %rdi, %rax
cmpq $0x20, %rdx cmpq $0x20, %rdx
...@@ -178,5 +176,4 @@ ENTRY(memcpy_orig) ...@@ -178,5 +176,4 @@ ENTRY(memcpy_orig)
.Lend: .Lend:
retq retq
CFI_ENDPROC
ENDPROC(memcpy_orig) ENDPROC(memcpy_orig)
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -8,7 +8,6 @@ ...@@ -8,7 +8,6 @@
* of the License. * of the License.
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
/* /*
* Calling convention : * Calling convention :
......
This diff is collapsed.
This diff is collapsed.
...@@ -212,5 +212,5 @@ EOF ...@@ -212,5 +212,5 @@ EOF
) )
} }
(ignore_list && syscall_list $(dirname $0)/../arch/x86/syscalls/syscall_32.tbl) | \ (ignore_list && syscall_list $(dirname $0)/../arch/x86/entry/syscalls/syscall_32.tbl) | \
$* -E -x c - > /dev/null $* -E -x c - > /dev/null
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment