Commit 784d5699 authored by Al Viro's avatar Al Viro

x86: move exports to actual definitions

Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 22823ab4
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/smap.h> #include <asm/smap.h>
#include <asm/export.h>
.section .entry.text, "ax" .section .entry.text, "ax"
...@@ -955,6 +956,7 @@ trace: ...@@ -955,6 +956,7 @@ trace:
jmp ftrace_stub jmp ftrace_stub
END(mcount) END(mcount)
#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_DYNAMIC_FTRACE */
EXPORT_SYMBOL(mcount)
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/smap.h> #include <asm/smap.h>
#include <asm/pgtable_types.h> #include <asm/pgtable_types.h>
#include <asm/export.h>
#include <linux/err.h> #include <linux/err.h>
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
...@@ -785,6 +786,7 @@ ENTRY(native_load_gs_index) ...@@ -785,6 +786,7 @@ ENTRY(native_load_gs_index)
popfq popfq
ret ret
END(native_load_gs_index) END(native_load_gs_index)
EXPORT_SYMBOL(native_load_gs_index)
_ASM_EXTABLE(.Lgs_change, bad_gs) _ASM_EXTABLE(.Lgs_change, bad_gs)
.section .fixup, "ax" .section .fixup, "ax"
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/export.h>
/* put return address in eax (arg1) */ /* put return address in eax (arg1) */
.macro THUNK name, func, put_ret_addr_in_eax=0 .macro THUNK name, func, put_ret_addr_in_eax=0
...@@ -36,5 +37,7 @@ ...@@ -36,5 +37,7 @@
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
THUNK ___preempt_schedule, preempt_schedule THUNK ___preempt_schedule, preempt_schedule
THUNK ___preempt_schedule_notrace, preempt_schedule_notrace THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
EXPORT_SYMBOL(___preempt_schedule)
EXPORT_SYMBOL(___preempt_schedule_notrace)
#endif #endif
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include "calling.h" #include "calling.h"
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/export.h>
/* rdi: arg1 ... normal C conventions. rax is saved/restored. */ /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
.macro THUNK name, func, put_ret_addr_in_rdi=0 .macro THUNK name, func, put_ret_addr_in_rdi=0
...@@ -49,6 +50,8 @@ ...@@ -49,6 +50,8 @@
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
THUNK ___preempt_schedule, preempt_schedule THUNK ___preempt_schedule, preempt_schedule
THUNK ___preempt_schedule_notrace, preempt_schedule_notrace THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
EXPORT_SYMBOL(___preempt_schedule)
EXPORT_SYMBOL(___preempt_schedule_notrace)
#endif #endif
#if defined(CONFIG_TRACE_IRQFLAGS) \ #if defined(CONFIG_TRACE_IRQFLAGS) \
......
#ifdef CONFIG_64BIT
#define KSYM_ALIGN 16
#endif
#include <asm-generic/export.h>
...@@ -46,9 +46,7 @@ obj-$(CONFIG_MODIFY_LDT_SYSCALL) += ldt.o ...@@ -46,9 +46,7 @@ obj-$(CONFIG_MODIFY_LDT_SYSCALL) += ldt.o
obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
obj-$(CONFIG_IRQ_WORK) += irq_work.o obj-$(CONFIG_IRQ_WORK) += irq_work.o
obj-y += probe_roms.o obj-y += probe_roms.o
obj-$(CONFIG_X86_32) += i386_ksyms_32.o obj-$(CONFIG_X86_64) += sys_x86_64.o mcount_64.o
obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
obj-$(CONFIG_X86_64) += mcount_64.o
obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o
obj-$(CONFIG_SYSFS) += ksysfs.o obj-$(CONFIG_SYSFS) += ksysfs.o
obj-y += bootflag.o e820.o obj-y += bootflag.o e820.o
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <asm/percpu.h> #include <asm/percpu.h>
#include <asm/nops.h> #include <asm/nops.h>
#include <asm/bootparam.h> #include <asm/bootparam.h>
#include <asm/export.h>
/* Physical address */ /* Physical address */
#define pa(X) ((X) - __PAGE_OFFSET) #define pa(X) ((X) - __PAGE_OFFSET)
...@@ -673,6 +674,7 @@ ENTRY(empty_zero_page) ...@@ -673,6 +674,7 @@ ENTRY(empty_zero_page)
.fill 4096,1,0 .fill 4096,1,0
ENTRY(swapper_pg_dir) ENTRY(swapper_pg_dir)
.fill 1024,4,0 .fill 1024,4,0
EXPORT_SYMBOL(empty_zero_page)
/* /*
* This starts the data section. * This starts the data section.
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <asm/percpu.h> #include <asm/percpu.h>
#include <asm/nops.h> #include <asm/nops.h>
#include "../entry/calling.h" #include "../entry/calling.h"
#include <asm/export.h>
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
...@@ -488,10 +489,12 @@ early_gdt_descr_base: ...@@ -488,10 +489,12 @@ early_gdt_descr_base:
ENTRY(phys_base) ENTRY(phys_base)
/* This must match the first entry in level2_kernel_pgt */ /* This must match the first entry in level2_kernel_pgt */
.quad 0x0000000000000000 .quad 0x0000000000000000
EXPORT_SYMBOL(phys_base)
#include "../../x86/xen/xen-head.S" #include "../../x86/xen/xen-head.S"
__PAGE_ALIGNED_BSS __PAGE_ALIGNED_BSS
NEXT_PAGE(empty_zero_page) NEXT_PAGE(empty_zero_page)
.skip PAGE_SIZE .skip PAGE_SIZE
EXPORT_SYMBOL(empty_zero_page)
#include <linux/export.h>
#include <linux/spinlock_types.h>
#include <asm/checksum.h>
#include <asm/pgtable.h>
#include <asm/desc.h>
#include <asm/ftrace.h>
#ifdef CONFIG_FUNCTION_TRACER
/* mcount is defined in assembly */
EXPORT_SYMBOL(mcount);
#endif
/*
* Note, this is a prototype to get at the symbol for
* the export, but dont use it from C code, it is used
* by assembly code and is not using C calling convention!
*/
#ifndef CONFIG_X86_CMPXCHG64
extern void cmpxchg8b_emu(void);
EXPORT_SYMBOL(cmpxchg8b_emu);
#endif
/* Networking helper routines. */
EXPORT_SYMBOL(csum_partial_copy_generic);
EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2);
EXPORT_SYMBOL(__get_user_4);
EXPORT_SYMBOL(__get_user_8);
EXPORT_SYMBOL(__put_user_1);
EXPORT_SYMBOL(__put_user_2);
EXPORT_SYMBOL(__put_user_4);
EXPORT_SYMBOL(__put_user_8);
EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(empty_zero_page);
#ifdef CONFIG_PREEMPT
EXPORT_SYMBOL(___preempt_schedule);
EXPORT_SYMBOL(___preempt_schedule_notrace);
#endif
EXPORT_SYMBOL(__sw_hweight32);
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/ftrace.h> #include <asm/ftrace.h>
#include <asm/export.h>
.code64 .code64
...@@ -294,6 +295,7 @@ trace: ...@@ -294,6 +295,7 @@ trace:
jmp fgraph_trace jmp fgraph_trace
END(function_hook) END(function_hook)
#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_DYNAMIC_FTRACE */
EXPORT_SYMBOL(function_hook)
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
......
/* Exports for assembly files.
All C exports should go in the respective C files. */
#include <linux/export.h>
#include <linux/spinlock_types.h>
#include <linux/smp.h>
#include <net/checksum.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
#include <asm/desc.h>
#include <asm/ftrace.h>
#ifdef CONFIG_FUNCTION_TRACER
/* mcount and __fentry__ are defined in assembly */
#ifdef CC_USING_FENTRY
EXPORT_SYMBOL(__fentry__);
#else
EXPORT_SYMBOL(mcount);
#endif
#endif
EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2);
EXPORT_SYMBOL(__get_user_4);
EXPORT_SYMBOL(__get_user_8);
EXPORT_SYMBOL(__put_user_1);
EXPORT_SYMBOL(__put_user_2);
EXPORT_SYMBOL(__put_user_4);
EXPORT_SYMBOL(__put_user_8);
EXPORT_SYMBOL(copy_user_generic_string);
EXPORT_SYMBOL(copy_user_generic_unrolled);
EXPORT_SYMBOL(copy_user_enhanced_fast_string);
EXPORT_SYMBOL(__copy_user_nocache);
EXPORT_SYMBOL(_copy_from_user);
EXPORT_SYMBOL(_copy_to_user);
EXPORT_SYMBOL_GPL(memcpy_mcsafe);
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(__sw_hweight32);
EXPORT_SYMBOL(__sw_hweight64);
/*
* Export string functions. We normally rely on gcc builtin for most of these,
* but gcc sometimes decides not to inline them.
*/
#undef memcpy
#undef memset
#undef memmove
extern void *__memset(void *, int, __kernel_size_t);
extern void *__memcpy(void *, const void *, __kernel_size_t);
extern void *__memmove(void *, const void *, __kernel_size_t);
extern void *memset(void *, int, __kernel_size_t);
extern void *memcpy(void *, const void *, __kernel_size_t);
extern void *memmove(void *, const void *, __kernel_size_t);
EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(__memmove);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
#ifndef CONFIG_DEBUG_VIRTUAL
EXPORT_SYMBOL(phys_base);
#endif
EXPORT_SYMBOL(empty_zero_page);
#ifndef CONFIG_PARAVIRT
EXPORT_SYMBOL(native_load_gs_index);
#endif
#ifdef CONFIG_PREEMPT
EXPORT_SYMBOL(___preempt_schedule);
EXPORT_SYMBOL(___preempt_schedule_notrace);
#endif
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/export.h>
/* /*
* computes a partial checksum, e.g. for TCP/UDP fragments * computes a partial checksum, e.g. for TCP/UDP fragments
...@@ -251,6 +252,7 @@ ENTRY(csum_partial) ...@@ -251,6 +252,7 @@ ENTRY(csum_partial)
ENDPROC(csum_partial) ENDPROC(csum_partial)
#endif #endif
EXPORT_SYMBOL(csum_partial)
/* /*
unsigned int csum_partial_copy_generic (const char *src, char *dst, unsigned int csum_partial_copy_generic (const char *src, char *dst,
...@@ -490,3 +492,4 @@ ENDPROC(csum_partial_copy_generic) ...@@ -490,3 +492,4 @@ ENDPROC(csum_partial_copy_generic)
#undef ROUND1 #undef ROUND1
#endif #endif
EXPORT_SYMBOL(csum_partial_copy_generic)
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/cpufeatures.h> #include <asm/cpufeatures.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/export.h>
/* /*
* Most CPUs support enhanced REP MOVSB/STOSB instructions. It is * Most CPUs support enhanced REP MOVSB/STOSB instructions. It is
...@@ -23,6 +24,7 @@ ENTRY(clear_page) ...@@ -23,6 +24,7 @@ ENTRY(clear_page)
rep stosq rep stosq
ret ret
ENDPROC(clear_page) ENDPROC(clear_page)
EXPORT_SYMBOL(clear_page)
ENTRY(clear_page_orig) ENTRY(clear_page_orig)
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/export.h>
.text .text
...@@ -48,3 +49,4 @@ ENTRY(cmpxchg8b_emu) ...@@ -48,3 +49,4 @@ ENTRY(cmpxchg8b_emu)
ret ret
ENDPROC(cmpxchg8b_emu) ENDPROC(cmpxchg8b_emu)
EXPORT_SYMBOL(cmpxchg8b_emu)
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/cpufeatures.h> #include <asm/cpufeatures.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/export.h>
/* /*
* Some CPUs run faster using the string copy instructions (sane microcode). * Some CPUs run faster using the string copy instructions (sane microcode).
...@@ -17,6 +18,7 @@ ENTRY(copy_page) ...@@ -17,6 +18,7 @@ ENTRY(copy_page)
rep movsq rep movsq
ret ret
ENDPROC(copy_page) ENDPROC(copy_page)
EXPORT_SYMBOL(copy_page)
ENTRY(copy_page_regs) ENTRY(copy_page_regs)
subq $2*8, %rsp subq $2*8, %rsp
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/smap.h> #include <asm/smap.h>
#include <asm/export.h>
/* Standard copy_to_user with segment limit checking */ /* Standard copy_to_user with segment limit checking */
ENTRY(_copy_to_user) ENTRY(_copy_to_user)
...@@ -29,6 +30,7 @@ ENTRY(_copy_to_user) ...@@ -29,6 +30,7 @@ ENTRY(_copy_to_user)
"jmp copy_user_enhanced_fast_string", \ "jmp copy_user_enhanced_fast_string", \
X86_FEATURE_ERMS X86_FEATURE_ERMS
ENDPROC(_copy_to_user) ENDPROC(_copy_to_user)
EXPORT_SYMBOL(_copy_to_user)
/* Standard copy_from_user with segment limit checking */ /* Standard copy_from_user with segment limit checking */
ENTRY(_copy_from_user) ENTRY(_copy_from_user)
...@@ -44,6 +46,8 @@ ENTRY(_copy_from_user) ...@@ -44,6 +46,8 @@ ENTRY(_copy_from_user)
"jmp copy_user_enhanced_fast_string", \ "jmp copy_user_enhanced_fast_string", \
X86_FEATURE_ERMS X86_FEATURE_ERMS
ENDPROC(_copy_from_user) ENDPROC(_copy_from_user)
EXPORT_SYMBOL(_copy_from_user)
.section .fixup,"ax" .section .fixup,"ax"
/* must zero dest */ /* must zero dest */
...@@ -155,6 +159,7 @@ ENTRY(copy_user_generic_unrolled) ...@@ -155,6 +159,7 @@ ENTRY(copy_user_generic_unrolled)
_ASM_EXTABLE(21b,50b) _ASM_EXTABLE(21b,50b)
_ASM_EXTABLE(22b,50b) _ASM_EXTABLE(22b,50b)
ENDPROC(copy_user_generic_unrolled) ENDPROC(copy_user_generic_unrolled)
EXPORT_SYMBOL(copy_user_generic_unrolled)
/* Some CPUs run faster using the string copy instructions. /* Some CPUs run faster using the string copy instructions.
* This is also a lot simpler. Use them when possible. * This is also a lot simpler. Use them when possible.
...@@ -200,6 +205,7 @@ ENTRY(copy_user_generic_string) ...@@ -200,6 +205,7 @@ ENTRY(copy_user_generic_string)
_ASM_EXTABLE(1b,11b) _ASM_EXTABLE(1b,11b)
_ASM_EXTABLE(3b,12b) _ASM_EXTABLE(3b,12b)
ENDPROC(copy_user_generic_string) ENDPROC(copy_user_generic_string)
EXPORT_SYMBOL(copy_user_generic_string)
/* /*
* Some CPUs are adding enhanced REP MOVSB/STOSB instructions. * Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
...@@ -229,6 +235,7 @@ ENTRY(copy_user_enhanced_fast_string) ...@@ -229,6 +235,7 @@ ENTRY(copy_user_enhanced_fast_string)
_ASM_EXTABLE(1b,12b) _ASM_EXTABLE(1b,12b)
ENDPROC(copy_user_enhanced_fast_string) ENDPROC(copy_user_enhanced_fast_string)
EXPORT_SYMBOL(copy_user_enhanced_fast_string)
/* /*
* copy_user_nocache - Uncached memory copy with exception handling * copy_user_nocache - Uncached memory copy with exception handling
...@@ -379,3 +386,4 @@ ENTRY(__copy_user_nocache) ...@@ -379,3 +386,4 @@ ENTRY(__copy_user_nocache)
_ASM_EXTABLE(40b,.L_fixup_1b_copy) _ASM_EXTABLE(40b,.L_fixup_1b_copy)
_ASM_EXTABLE(41b,.L_fixup_1b_copy) _ASM_EXTABLE(41b,.L_fixup_1b_copy)
ENDPROC(__copy_user_nocache) ENDPROC(__copy_user_nocache)
EXPORT_SYMBOL(__copy_user_nocache)
...@@ -135,6 +135,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum) ...@@ -135,6 +135,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
return (__force __wsum)add32_with_carry(do_csum(buff, len), return (__force __wsum)add32_with_carry(do_csum(buff, len),
(__force u32)sum); (__force u32)sum);
} }
EXPORT_SYMBOL(csum_partial);
/* /*
* this routine is used for miscellaneous IP-like checksums, mainly * this routine is used for miscellaneous IP-like checksums, mainly
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/smap.h> #include <asm/smap.h>
#include <asm/export.h>
.text .text
ENTRY(__get_user_1) ENTRY(__get_user_1)
...@@ -44,6 +45,7 @@ ENTRY(__get_user_1) ...@@ -44,6 +45,7 @@ ENTRY(__get_user_1)
ASM_CLAC ASM_CLAC
ret ret
ENDPROC(__get_user_1) ENDPROC(__get_user_1)
EXPORT_SYMBOL(__get_user_1)
ENTRY(__get_user_2) ENTRY(__get_user_2)
add $1,%_ASM_AX add $1,%_ASM_AX
...@@ -57,6 +59,7 @@ ENTRY(__get_user_2) ...@@ -57,6 +59,7 @@ ENTRY(__get_user_2)
ASM_CLAC ASM_CLAC
ret ret
ENDPROC(__get_user_2) ENDPROC(__get_user_2)
EXPORT_SYMBOL(__get_user_2)
ENTRY(__get_user_4) ENTRY(__get_user_4)
add $3,%_ASM_AX add $3,%_ASM_AX
...@@ -70,6 +73,7 @@ ENTRY(__get_user_4) ...@@ -70,6 +73,7 @@ ENTRY(__get_user_4)
ASM_CLAC ASM_CLAC
ret ret
ENDPROC(__get_user_4) ENDPROC(__get_user_4)
EXPORT_SYMBOL(__get_user_4)
ENTRY(__get_user_8) ENTRY(__get_user_8)
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
...@@ -97,6 +101,7 @@ ENTRY(__get_user_8) ...@@ -97,6 +101,7 @@ ENTRY(__get_user_8)
ret ret
#endif #endif
ENDPROC(__get_user_8) ENDPROC(__get_user_8)
EXPORT_SYMBOL(__get_user_8)
bad_get_user: bad_get_user:
......
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/export.h>
#include <asm/asm.h> #include <asm/asm.h>
...@@ -32,6 +33,7 @@ ENTRY(__sw_hweight32) ...@@ -32,6 +33,7 @@ ENTRY(__sw_hweight32)
__ASM_SIZE(pop,) %__ASM_REG(dx) __ASM_SIZE(pop,) %__ASM_REG(dx)
ret ret
ENDPROC(__sw_hweight32) ENDPROC(__sw_hweight32)
EXPORT_SYMBOL(__sw_hweight32)
ENTRY(__sw_hweight64) ENTRY(__sw_hweight64)
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
...@@ -75,3 +77,4 @@ ENTRY(__sw_hweight64) ...@@ -75,3 +77,4 @@ ENTRY(__sw_hweight64)
ret ret
#endif #endif
ENDPROC(__sw_hweight64) ENDPROC(__sw_hweight64)
EXPORT_SYMBOL(__sw_hweight64)
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/cpufeatures.h> #include <asm/cpufeatures.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/export.h>
/* /*
* We build a jump to memcpy_orig by default which gets NOPped out on * We build a jump to memcpy_orig by default which gets NOPped out on
...@@ -40,6 +41,8 @@ ENTRY(memcpy) ...@@ -40,6 +41,8 @@ ENTRY(memcpy)
ret ret
ENDPROC(memcpy) ENDPROC(memcpy)
ENDPROC(__memcpy) ENDPROC(__memcpy)
EXPORT_SYMBOL(memcpy)
EXPORT_SYMBOL(__memcpy)
/* /*
* memcpy_erms() - enhanced fast string memcpy. This is faster and * memcpy_erms() - enhanced fast string memcpy. This is faster and
...@@ -274,6 +277,7 @@ ENTRY(memcpy_mcsafe) ...@@ -274,6 +277,7 @@ ENTRY(memcpy_mcsafe)
xorq %rax, %rax xorq %rax, %rax
ret ret
ENDPROC(memcpy_mcsafe) ENDPROC(memcpy_mcsafe)
EXPORT_SYMBOL_GPL(memcpy_mcsafe)
.section .fixup, "ax" .section .fixup, "ax"
/* Return -EFAULT for any failure */ /* Return -EFAULT for any failure */
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/cpufeatures.h> #include <asm/cpufeatures.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/export.h>
#undef memmove #undef memmove
...@@ -207,3 +208,5 @@ ENTRY(__memmove) ...@@ -207,3 +208,5 @@ ENTRY(__memmove)
retq retq
ENDPROC(__memmove) ENDPROC(__memmove)
ENDPROC(memmove) ENDPROC(memmove)
EXPORT_SYMBOL(__memmove)
EXPORT_SYMBOL(memmove)
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/cpufeatures.h> #include <asm/cpufeatures.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/export.h>
.weak memset .weak memset
...@@ -43,6 +44,8 @@ ENTRY(__memset) ...@@ -43,6 +44,8 @@ ENTRY(__memset)
ret ret
ENDPROC(memset) ENDPROC(memset)
ENDPROC(__memset) ENDPROC(__memset)
EXPORT_SYMBOL(memset)
EXPORT_SYMBOL(__memset)
/* /*
* ISO C memset - set a memory block to a byte value. This function uses * ISO C memset - set a memory block to a byte value. This function uses
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/smap.h> #include <asm/smap.h>
#include <asm/export.h>
/* /*
...@@ -43,6 +44,7 @@ ENTRY(__put_user_1) ...@@ -43,6 +44,7 @@ ENTRY(__put_user_1)
xor %eax,%eax xor %eax,%eax
EXIT EXIT
ENDPROC(__put_user_1) ENDPROC(__put_user_1)
EXPORT_SYMBOL(__put_user_1)
ENTRY(__put_user_2) ENTRY(__put_user_2)
ENTER ENTER
...@@ -55,6 +57,7 @@ ENTRY(__put_user_2) ...@@ -55,6 +57,7 @@ ENTRY(__put_user_2)
xor %eax,%eax xor %eax,%eax
EXIT EXIT
ENDPROC(__put_user_2) ENDPROC(__put_user_2)
EXPORT_SYMBOL(__put_user_2)
ENTRY(__put_user_4) ENTRY(__put_user_4)
ENTER ENTER
...@@ -67,6 +70,7 @@ ENTRY(__put_user_4) ...@@ -67,6 +70,7 @@ ENTRY(__put_user_4)
xor %eax,%eax xor %eax,%eax
EXIT EXIT
ENDPROC(__put_user_4) ENDPROC(__put_user_4)
EXPORT_SYMBOL(__put_user_4)
ENTRY(__put_user_8) ENTRY(__put_user_8)
ENTER ENTER
...@@ -82,6 +86,7 @@ ENTRY(__put_user_8) ...@@ -82,6 +86,7 @@ ENTRY(__put_user_8)
xor %eax,%eax xor %eax,%eax
EXIT EXIT
ENDPROC(__put_user_8) ENDPROC(__put_user_8)
EXPORT_SYMBOL(__put_user_8)
bad_put_user: bad_put_user:
movl $-EFAULT,%eax movl $-EFAULT,%eax
......
#include <linux/string.h> #include <linux/string.h>
#include <linux/export.h>
char *strstr(const char *cs, const char *ct) char *strstr(const char *cs, const char *ct)
{ {
...@@ -28,4 +29,4 @@ __asm__ __volatile__( ...@@ -28,4 +29,4 @@ __asm__ __volatile__(
: "dx", "di"); : "dx", "di");
return __res; return __res;
} }
EXPORT_SYMBOL(strstr);
...@@ -8,7 +8,7 @@ else ...@@ -8,7 +8,7 @@ else
BITS := 64 BITS := 64
endif endif
obj-y = bug.o bugs_$(BITS).o delay.o fault.o ksyms.o ldt.o \ obj-y = bug.o bugs_$(BITS).o delay.o fault.o ldt.o \
ptrace_$(BITS).o ptrace_user.o setjmp_$(BITS).o signal.o \ ptrace_$(BITS).o ptrace_user.o setjmp_$(BITS).o signal.o \
stub_$(BITS).o stub_segv.o \ stub_$(BITS).o stub_segv.o \
sys_call_table_$(BITS).o sysrq_$(BITS).o tls_$(BITS).o \ sys_call_table_$(BITS).o sysrq_$(BITS).o tls_$(BITS).o \
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/export.h>
/* /*
* computes a partial checksum, e.g. for TCP/UDP fragments * computes a partial checksum, e.g. for TCP/UDP fragments
...@@ -214,3 +215,4 @@ csum_partial: ...@@ -214,3 +215,4 @@ csum_partial:
ret ret
#endif #endif
EXPORT_SYMBOL(csum_partial)
#include <linux/module.h>
#include <asm/string.h>
#include <asm/checksum.h>
#ifndef CONFIG_X86_32
/*XXX: we need them because they would be exported by x86_64 */
#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
EXPORT_SYMBOL(memcpy);
#else
EXPORT_SYMBOL(__memcpy);
#endif
#endif
EXPORT_SYMBOL(csum_partial);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment