Commit d3867f04 authored by Al Viro's avatar Al Viro

sparc: move exports to definitions

Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 9445aa1a
...@@ -6,6 +6,7 @@ generic-y += cputime.h ...@@ -6,6 +6,7 @@ generic-y += cputime.h
generic-y += div64.h generic-y += div64.h
generic-y += emergency-restart.h generic-y += emergency-restart.h
generic-y += exec.h generic-y += exec.h
generic-y += export.h
generic-y += irq_regs.h generic-y += irq_regs.h
generic-y += irq_work.h generic-y += irq_work.h
generic-y += linkage.h generic-y += linkage.h
......
...@@ -86,7 +86,7 @@ obj-y += auxio_$(BITS).o ...@@ -86,7 +86,7 @@ obj-y += auxio_$(BITS).o
obj-$(CONFIG_SUN_PM) += apc.o pmc.o obj-$(CONFIG_SUN_PM) += apc.o pmc.o
obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_MODULES) += sparc_ksyms_$(BITS).o obj-$(CONFIG_MODULES) += sparc_ksyms.o
obj-$(CONFIG_SPARC_LED) += led.o obj-$(CONFIG_SPARC_LED) += led.o
obj-$(CONFIG_KGDB) += kgdb_$(BITS).o obj-$(CONFIG_KGDB) += kgdb_$(BITS).o
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
#include <asm/export.h>
#define curptr g6 #define curptr g6
...@@ -1207,6 +1208,8 @@ delay_continue: ...@@ -1207,6 +1208,8 @@ delay_continue:
ret ret
restore restore
EXPORT_SYMBOL(__udelay)
EXPORT_SYMBOL(__ndelay)
/* Handle a software breakpoint */ /* Handle a software breakpoint */
/* We have to inform parent that child has stopped */ /* We have to inform parent that child has stopped */
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <asm/thread_info.h> /* TI_UWINMASK */ #include <asm/thread_info.h> /* TI_UWINMASK */
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/pgtsrmmu.h> /* SRMMU_PGDIR_SHIFT */ #include <asm/pgtsrmmu.h> /* SRMMU_PGDIR_SHIFT */
#include <asm/export.h>
.data .data
/* The following are used with the prom_vector node-ops to figure out /* The following are used with the prom_vector node-ops to figure out
...@@ -60,6 +61,7 @@ sun4e_notsup: ...@@ -60,6 +61,7 @@ sun4e_notsup:
*/ */
.globl empty_zero_page .globl empty_zero_page
empty_zero_page: .skip PAGE_SIZE empty_zero_page: .skip PAGE_SIZE
EXPORT_SYMBOL(empty_zero_page)
.global root_flags .global root_flags
.global ram_flags .global ram_flags
...@@ -813,3 +815,4 @@ lvl14_save: ...@@ -813,3 +815,4 @@ lvl14_save:
__ret_efault: __ret_efault:
ret ret
restore %g0, -EFAULT, %o0 restore %g0, -EFAULT, %o0
EXPORT_SYMBOL(__ret_efault)
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <asm/estate.h> #include <asm/estate.h>
#include <asm/sfafsr.h> #include <asm/sfafsr.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/export.h>
/* This section from from _start to sparc64_boot_end should fit into /* This section from from _start to sparc64_boot_end should fit into
* 0x0000000000404000 to 0x0000000000408000. * 0x0000000000404000 to 0x0000000000408000.
...@@ -143,6 +144,7 @@ prom_cpu_compatible: ...@@ -143,6 +144,7 @@ prom_cpu_compatible:
.skip 64 .skip 64
prom_root_node: prom_root_node:
.word 0 .word 0
EXPORT_SYMBOL(prom_root_node)
prom_mmu_ihandle_cache: prom_mmu_ihandle_cache:
.word 0 .word 0
prom_boot_mapped_pc: prom_boot_mapped_pc:
...@@ -158,6 +160,7 @@ is_sun4v: ...@@ -158,6 +160,7 @@ is_sun4v:
.word 0 .word 0
sun4v_chip_type: sun4v_chip_type:
.word SUN4V_CHIP_INVALID .word SUN4V_CHIP_INVALID
EXPORT_SYMBOL(sun4v_chip_type)
1: 1:
rd %pc, %l0 rd %pc, %l0
...@@ -920,6 +923,7 @@ swapper_4m_tsb: ...@@ -920,6 +923,7 @@ swapper_4m_tsb:
.globl prom_tba, tlb_type .globl prom_tba, tlb_type
prom_tba: .xword 0 prom_tba: .xword 0
tlb_type: .word 0 /* Must NOT end up in BSS */ tlb_type: .word 0 /* Must NOT end up in BSS */
EXPORT_SYMBOL(tlb_type)
.section ".fixup",#alloc,#execinstr .section ".fixup",#alloc,#execinstr
.globl __ret_efault, __retl_efault, __ret_one, __retl_one .globl __ret_efault, __retl_efault, __ret_one, __retl_one
...@@ -927,6 +931,7 @@ ENTRY(__ret_efault) ...@@ -927,6 +931,7 @@ ENTRY(__ret_efault)
ret ret
restore %g0, -EFAULT, %o0 restore %g0, -EFAULT, %o0
ENDPROC(__ret_efault) ENDPROC(__ret_efault)
EXPORT_SYMBOL(__ret_efault)
ENTRY(__retl_efault) ENTRY(__retl_efault)
retl retl
......
...@@ -15,6 +15,7 @@ __flushw_user: ...@@ -15,6 +15,7 @@ __flushw_user:
2: retl 2: retl
nop nop
.size __flushw_user,.-__flushw_user .size __flushw_user,.-__flushw_user
EXPORT_SYMBOL(__flushw_user)
/* Flush %fp and %i7 to the stack for all register /* Flush %fp and %i7 to the stack for all register
* windows active inside of the cpu. This allows * windows active inside of the cpu. This allows
...@@ -61,3 +62,4 @@ real_hard_smp_processor_id: ...@@ -61,3 +62,4 @@ real_hard_smp_processor_id:
.size hard_smp_processor_id,.-hard_smp_processor_id .size hard_smp_processor_id,.-hard_smp_processor_id
#endif #endif
.size real_hard_smp_processor_id,.-real_hard_smp_processor_id .size real_hard_smp_processor_id,.-real_hard_smp_processor_id
EXPORT_SYMBOL_GPL(real_hard_smp_processor_id)
...@@ -343,6 +343,7 @@ ENTRY(sun4v_mach_set_watchdog) ...@@ -343,6 +343,7 @@ ENTRY(sun4v_mach_set_watchdog)
0: retl 0: retl
nop nop
ENDPROC(sun4v_mach_set_watchdog) ENDPROC(sun4v_mach_set_watchdog)
EXPORT_SYMBOL(sun4v_mach_set_watchdog)
/* No inputs and does not return. */ /* No inputs and does not return. */
ENTRY(sun4v_mach_sir) ENTRY(sun4v_mach_sir)
...@@ -776,6 +777,7 @@ ENTRY(sun4v_niagara_getperf) ...@@ -776,6 +777,7 @@ ENTRY(sun4v_niagara_getperf)
retl retl
nop nop
ENDPROC(sun4v_niagara_getperf) ENDPROC(sun4v_niagara_getperf)
EXPORT_SYMBOL(sun4v_niagara_getperf)
ENTRY(sun4v_niagara_setperf) ENTRY(sun4v_niagara_setperf)
mov HV_FAST_SET_PERFREG, %o5 mov HV_FAST_SET_PERFREG, %o5
...@@ -783,6 +785,7 @@ ENTRY(sun4v_niagara_setperf) ...@@ -783,6 +785,7 @@ ENTRY(sun4v_niagara_setperf)
retl retl
nop nop
ENDPROC(sun4v_niagara_setperf) ENDPROC(sun4v_niagara_setperf)
EXPORT_SYMBOL(sun4v_niagara_setperf)
ENTRY(sun4v_niagara2_getperf) ENTRY(sun4v_niagara2_getperf)
mov %o0, %o4 mov %o0, %o4
...@@ -792,6 +795,7 @@ ENTRY(sun4v_niagara2_getperf) ...@@ -792,6 +795,7 @@ ENTRY(sun4v_niagara2_getperf)
retl retl
nop nop
ENDPROC(sun4v_niagara2_getperf) ENDPROC(sun4v_niagara2_getperf)
EXPORT_SYMBOL(sun4v_niagara2_getperf)
ENTRY(sun4v_niagara2_setperf) ENTRY(sun4v_niagara2_setperf)
mov HV_FAST_N2_SET_PERFREG, %o5 mov HV_FAST_N2_SET_PERFREG, %o5
...@@ -799,6 +803,7 @@ ENTRY(sun4v_niagara2_setperf) ...@@ -799,6 +803,7 @@ ENTRY(sun4v_niagara2_setperf)
retl retl
nop nop
ENDPROC(sun4v_niagara2_setperf) ENDPROC(sun4v_niagara2_setperf)
EXPORT_SYMBOL(sun4v_niagara2_setperf)
ENTRY(sun4v_reboot_data_set) ENTRY(sun4v_reboot_data_set)
mov HV_FAST_REBOOT_DATA_SET, %o5 mov HV_FAST_REBOOT_DATA_SET, %o5
......
...@@ -5,27 +5,8 @@ ...@@ -5,27 +5,8 @@
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
*/ */
#include <linux/module.h> #include <linux/init.h>
#include <linux/export.h>
#include <asm/pgtable.h> /* This is needed only for drivers/sbus/char/openprom.c */
#include <asm/uaccess.h>
#include <asm/delay.h>
#include <asm/head.h>
#include <asm/dma.h>
struct poll {
int fd;
short events;
short revents;
};
/* from entry.S */
EXPORT_SYMBOL(__udelay);
EXPORT_SYMBOL(__ndelay);
/* from head_32.S */
EXPORT_SYMBOL(__ret_efault);
EXPORT_SYMBOL(empty_zero_page);
/* Exporting a symbol from /init/main.c */
EXPORT_SYMBOL(saved_command_line); EXPORT_SYMBOL(saved_command_line);
/* arch/sparc64/kernel/sparc64_ksyms.c: Sparc64 specific ksyms support.
*
* Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
#include <linux/export.h>
#include <linux/pci.h>
#include <linux/bitops.h>
#include <asm/cpudata.h>
#include <asm/uaccess.h>
#include <asm/spitfire.h>
#include <asm/oplib.h>
#include <asm/hypervisor.h>
#include <asm/cacheflush.h>
struct poll {
int fd;
short events;
short revents;
};
/* from helpers.S */
EXPORT_SYMBOL(__flushw_user);
EXPORT_SYMBOL_GPL(real_hard_smp_processor_id);
/* from head_64.S */
EXPORT_SYMBOL(__ret_efault);
EXPORT_SYMBOL(tlb_type);
EXPORT_SYMBOL(sun4v_chip_type);
EXPORT_SYMBOL(prom_root_node);
/* from hvcalls.S */
EXPORT_SYMBOL(sun4v_niagara_getperf);
EXPORT_SYMBOL(sun4v_niagara_setperf);
EXPORT_SYMBOL(sun4v_niagara2_getperf);
EXPORT_SYMBOL(sun4v_niagara2_setperf);
EXPORT_SYMBOL(sun4v_mach_set_watchdog);
/* from hweight.S */
EXPORT_SYMBOL(__arch_hweight8);
EXPORT_SYMBOL(__arch_hweight16);
EXPORT_SYMBOL(__arch_hweight32);
EXPORT_SYMBOL(__arch_hweight64);
/* from ffs_ffz.S */
EXPORT_SYMBOL(ffs);
EXPORT_SYMBOL(__ffs);
/* Exporting a symbol from /init/main.c */
EXPORT_SYMBOL(saved_command_line);
...@@ -43,5 +43,4 @@ lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o ...@@ -43,5 +43,4 @@ lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o
obj-$(CONFIG_SPARC64) += iomap.o obj-$(CONFIG_SPARC64) += iomap.o
obj-$(CONFIG_SPARC32) += atomic32.o ucmpdi2.o obj-$(CONFIG_SPARC32) += atomic32.o ucmpdi2.o
obj-y += ksyms.o
obj-$(CONFIG_SPARC64) += PeeCeeI.o obj-$(CONFIG_SPARC64) += PeeCeeI.o
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <asm/visasm.h> #include <asm/visasm.h>
#include <asm/asi.h> #include <asm/asi.h>
#include <asm/export.h>
#define GLOBAL_SPARE g7 #define GLOBAL_SPARE g7
#else #else
#define GLOBAL_SPARE g5 #define GLOBAL_SPARE g5
...@@ -567,3 +568,4 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ ...@@ -567,3 +568,4 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
mov EX_RETVAL(%o4), %o0 mov EX_RETVAL(%o4), %o0
.size FUNC_NAME, .-FUNC_NAME .size FUNC_NAME, .-FUNC_NAME
EXPORT_SYMBOL(FUNC_NAME)
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/visasm.h> #include <asm/visasm.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/export.h>
/* On entry: %o5=current FPRS value, %g7 is callers address */ /* On entry: %o5=current FPRS value, %g7 is callers address */
/* May clobber %o5, %g1, %g2, %g3, %g7, %icc, %xcc */ /* May clobber %o5, %g1, %g2, %g3, %g7, %icc, %xcc */
...@@ -79,3 +80,4 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3 ...@@ -79,3 +80,4 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
80: jmpl %g7 + %g0, %g0 80: jmpl %g7 + %g0, %g0
nop nop
ENDPROC(VISenter) ENDPROC(VISenter)
EXPORT_SYMBOL(VISenter)
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/export.h>
.text .text
ENTRY(__ashldi3) ENTRY(__ashldi3)
...@@ -33,3 +34,4 @@ ENTRY(__ashldi3) ...@@ -33,3 +34,4 @@ ENTRY(__ashldi3)
retl retl
nop nop
ENDPROC(__ashldi3) ENDPROC(__ashldi3)
EXPORT_SYMBOL(__ashldi3)
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/export.h>
.text .text
ENTRY(__ashrdi3) ENTRY(__ashrdi3)
...@@ -35,3 +36,4 @@ ENTRY(__ashrdi3) ...@@ -35,3 +36,4 @@ ENTRY(__ashrdi3)
jmpl %o7 + 8, %g0 jmpl %o7 + 8, %g0
nop nop
ENDPROC(__ashrdi3) ENDPROC(__ashrdi3)
EXPORT_SYMBOL(__ashrdi3)
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/asi.h> #include <asm/asi.h>
#include <asm/backoff.h> #include <asm/backoff.h>
#include <asm/export.h>
.text .text
...@@ -29,6 +30,7 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ ...@@ -29,6 +30,7 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
nop; \ nop; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \
ENDPROC(atomic_##op); \ ENDPROC(atomic_##op); \
EXPORT_SYMBOL(atomic_##op);
#define ATOMIC_OP_RETURN(op) \ #define ATOMIC_OP_RETURN(op) \
ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
...@@ -42,7 +44,8 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ ...@@ -42,7 +44,8 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \ retl; \
sra %g1, 0, %o0; \ sra %g1, 0, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \
ENDPROC(atomic_##op##_return); ENDPROC(atomic_##op##_return); \
EXPORT_SYMBOL(atomic_##op##_return);
#define ATOMIC_FETCH_OP(op) \ #define ATOMIC_FETCH_OP(op) \
ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
...@@ -56,7 +59,8 @@ ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ ...@@ -56,7 +59,8 @@ ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \ retl; \
sra %g1, 0, %o0; \ sra %g1, 0, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \
ENDPROC(atomic_fetch_##op); ENDPROC(atomic_fetch_##op); \
EXPORT_SYMBOL(atomic_fetch_##op);
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op) #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
...@@ -88,6 +92,7 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ ...@@ -88,6 +92,7 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
nop; \ nop; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \
ENDPROC(atomic64_##op); \ ENDPROC(atomic64_##op); \
EXPORT_SYMBOL(atomic64_##op);
#define ATOMIC64_OP_RETURN(op) \ #define ATOMIC64_OP_RETURN(op) \
ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
...@@ -101,7 +106,8 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ ...@@ -101,7 +106,8 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \ retl; \
op %g1, %o0, %o0; \ op %g1, %o0, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \
ENDPROC(atomic64_##op##_return); ENDPROC(atomic64_##op##_return); \
EXPORT_SYMBOL(atomic64_##op##_return);
#define ATOMIC64_FETCH_OP(op) \ #define ATOMIC64_FETCH_OP(op) \
ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
...@@ -115,7 +121,8 @@ ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ ...@@ -115,7 +121,8 @@ ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \ retl; \
mov %g1, %o0; \ mov %g1, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \
ENDPROC(atomic64_fetch_##op); ENDPROC(atomic64_fetch_##op); \
EXPORT_SYMBOL(atomic64_fetch_##op);
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op) #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
...@@ -147,3 +154,4 @@ ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */ ...@@ -147,3 +154,4 @@ ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
sub %g1, 1, %o0 sub %g1, 1, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b) 2: BACKOFF_SPIN(%o2, %o3, 1b)
ENDPROC(atomic64_dec_if_positive) ENDPROC(atomic64_dec_if_positive)
EXPORT_SYMBOL(atomic64_dec_if_positive)
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/asi.h> #include <asm/asi.h>
#include <asm/backoff.h> #include <asm/backoff.h>
#include <asm/export.h>
.text .text
...@@ -29,6 +30,7 @@ ENTRY(test_and_set_bit) /* %o0=nr, %o1=addr */ ...@@ -29,6 +30,7 @@ ENTRY(test_and_set_bit) /* %o0=nr, %o1=addr */
nop nop
2: BACKOFF_SPIN(%o3, %o4, 1b) 2: BACKOFF_SPIN(%o3, %o4, 1b)
ENDPROC(test_and_set_bit) ENDPROC(test_and_set_bit)
EXPORT_SYMBOL(test_and_set_bit)
ENTRY(test_and_clear_bit) /* %o0=nr, %o1=addr */ ENTRY(test_and_clear_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3) BACKOFF_SETUP(%o3)
...@@ -50,6 +52,7 @@ ENTRY(test_and_clear_bit) /* %o0=nr, %o1=addr */ ...@@ -50,6 +52,7 @@ ENTRY(test_and_clear_bit) /* %o0=nr, %o1=addr */
nop nop
2: BACKOFF_SPIN(%o3, %o4, 1b) 2: BACKOFF_SPIN(%o3, %o4, 1b)
ENDPROC(test_and_clear_bit) ENDPROC(test_and_clear_bit)
EXPORT_SYMBOL(test_and_clear_bit)
ENTRY(test_and_change_bit) /* %o0=nr, %o1=addr */ ENTRY(test_and_change_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3) BACKOFF_SETUP(%o3)
...@@ -71,6 +74,7 @@ ENTRY(test_and_change_bit) /* %o0=nr, %o1=addr */ ...@@ -71,6 +74,7 @@ ENTRY(test_and_change_bit) /* %o0=nr, %o1=addr */
nop nop
2: BACKOFF_SPIN(%o3, %o4, 1b) 2: BACKOFF_SPIN(%o3, %o4, 1b)
ENDPROC(test_and_change_bit) ENDPROC(test_and_change_bit)
EXPORT_SYMBOL(test_and_change_bit)
ENTRY(set_bit) /* %o0=nr, %o1=addr */ ENTRY(set_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3) BACKOFF_SETUP(%o3)
...@@ -90,6 +94,7 @@ ENTRY(set_bit) /* %o0=nr, %o1=addr */ ...@@ -90,6 +94,7 @@ ENTRY(set_bit) /* %o0=nr, %o1=addr */
nop nop
2: BACKOFF_SPIN(%o3, %o4, 1b) 2: BACKOFF_SPIN(%o3, %o4, 1b)
ENDPROC(set_bit) ENDPROC(set_bit)
EXPORT_SYMBOL(set_bit)
ENTRY(clear_bit) /* %o0=nr, %o1=addr */ ENTRY(clear_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3) BACKOFF_SETUP(%o3)
...@@ -109,6 +114,7 @@ ENTRY(clear_bit) /* %o0=nr, %o1=addr */ ...@@ -109,6 +114,7 @@ ENTRY(clear_bit) /* %o0=nr, %o1=addr */
nop nop
2: BACKOFF_SPIN(%o3, %o4, 1b) 2: BACKOFF_SPIN(%o3, %o4, 1b)
ENDPROC(clear_bit) ENDPROC(clear_bit)
EXPORT_SYMBOL(clear_bit)
ENTRY(change_bit) /* %o0=nr, %o1=addr */ ENTRY(change_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3) BACKOFF_SETUP(%o3)
...@@ -128,3 +134,4 @@ ENTRY(change_bit) /* %o0=nr, %o1=addr */ ...@@ -128,3 +134,4 @@ ENTRY(change_bit) /* %o0=nr, %o1=addr */
nop nop
2: BACKOFF_SPIN(%o3, %o4, 1b) 2: BACKOFF_SPIN(%o3, %o4, 1b)
ENDPROC(change_bit) ENDPROC(change_bit)
EXPORT_SYMBOL(change_bit)
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/export.h>
/* Zero out 64 bytes of memory at (buf + offset). /* Zero out 64 bytes of memory at (buf + offset).
* Assumes %g1 contains zero. * Assumes %g1 contains zero.
...@@ -64,6 +65,7 @@ ENTRY(bzero_1page) ...@@ -64,6 +65,7 @@ ENTRY(bzero_1page)
retl retl
nop nop
ENDPROC(bzero_1page) ENDPROC(bzero_1page)
EXPORT_SYMBOL(bzero_1page)
ENTRY(__copy_1page) ENTRY(__copy_1page)
/* NOTE: If you change the number of insns of this routine, please check /* NOTE: If you change the number of insns of this routine, please check
...@@ -87,3 +89,4 @@ ENTRY(__copy_1page) ...@@ -87,3 +89,4 @@ ENTRY(__copy_1page)
retl retl
nop nop
ENDPROC(__copy_1page) ENDPROC(__copy_1page)
EXPORT_SYMBOL(__copy_1page)
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/export.h>
.text .text
...@@ -78,6 +79,8 @@ __bzero_done: ...@@ -78,6 +79,8 @@ __bzero_done:
mov %o3, %o0 mov %o3, %o0
ENDPROC(__bzero) ENDPROC(__bzero)
ENDPROC(memset) ENDPROC(memset)
EXPORT_SYMBOL(__bzero)
EXPORT_SYMBOL(memset)
#define EX_ST(x,y) \ #define EX_ST(x,y) \
98: x,y; \ 98: x,y; \
...@@ -143,3 +146,4 @@ __clear_user_done: ...@@ -143,3 +146,4 @@ __clear_user_done:
retl retl
clr %o0 clr %o0
ENDPROC(__clear_user) ENDPROC(__clear_user)
EXPORT_SYMBOL(__clear_user)
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
*/ */
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/export.h>
#define CSUM_BIGCHUNK(buf, offset, sum, t0, t1, t2, t3, t4, t5) \ #define CSUM_BIGCHUNK(buf, offset, sum, t0, t1, t2, t3, t4, t5) \
ldd [buf + offset + 0x00], t0; \ ldd [buf + offset + 0x00], t0; \
...@@ -104,6 +105,7 @@ csum_partial_fix_alignment: ...@@ -104,6 +105,7 @@ csum_partial_fix_alignment:
* buffer of size 0x20. Follow the code path for that case. * buffer of size 0x20. Follow the code path for that case.
*/ */
.globl csum_partial .globl csum_partial
EXPORT_SYMBOL(csum_partial)
csum_partial: /* %o0=buf, %o1=len, %o2=sum */ csum_partial: /* %o0=buf, %o1=len, %o2=sum */
andcc %o0, 0x7, %g0 ! alignment problems? andcc %o0, 0x7, %g0 ! alignment problems?
bne csum_partial_fix_alignment ! yep, handle it bne csum_partial_fix_alignment ! yep, handle it
...@@ -335,6 +337,7 @@ cc_dword_align: ...@@ -335,6 +337,7 @@ cc_dword_align:
*/ */
.align 8 .align 8
.globl __csum_partial_copy_sparc_generic .globl __csum_partial_copy_sparc_generic
EXPORT_SYMBOL(__csum_partial_copy_sparc_generic)
__csum_partial_copy_sparc_generic: __csum_partial_copy_sparc_generic:
/* %o0=src, %o1=dest, %g1=len, %g7=sum */ /* %o0=src, %o1=dest, %g1=len, %g7=sum */
xor %o0, %o1, %o4 ! get changing bits xor %o0, %o1, %o4 ! get changing bits
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
* BSD4.4 portable checksum routine * BSD4.4 portable checksum routine
*/ */
#include <asm/export.h>
.text .text
csum_partial_fix_alignment: csum_partial_fix_alignment:
...@@ -37,6 +38,7 @@ csum_partial_fix_alignment: ...@@ -37,6 +38,7 @@ csum_partial_fix_alignment:
.align 32 .align 32
.globl csum_partial .globl csum_partial
EXPORT_SYMBOL(csum_partial)
csum_partial: /* %o0=buff, %o1=len, %o2=sum */ csum_partial: /* %o0=buff, %o1=len, %o2=sum */
prefetch [%o0 + 0x000], #n_reads prefetch [%o0 + 0x000], #n_reads
clr %o4 clr %o4
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/spitfire.h> #include <asm/spitfire.h>
#include <asm/head.h> #include <asm/head.h>
#include <asm/export.h>
/* What we used to do was lock a TLB entry into a specific /* What we used to do was lock a TLB entry into a specific
* TLB slot, clear the page with interrupts disabled, then * TLB slot, clear the page with interrupts disabled, then
...@@ -26,6 +27,7 @@ ...@@ -26,6 +27,7 @@
.text .text
.globl _clear_page .globl _clear_page
EXPORT_SYMBOL(_clear_page)
_clear_page: /* %o0=dest */ _clear_page: /* %o0=dest */
ba,pt %xcc, clear_page_common ba,pt %xcc, clear_page_common
clr %o4 clr %o4
...@@ -35,6 +37,7 @@ _clear_page: /* %o0=dest */ ...@@ -35,6 +37,7 @@ _clear_page: /* %o0=dest */
*/ */
.align 32 .align 32
.globl clear_user_page .globl clear_user_page
EXPORT_SYMBOL(clear_user_page)
clear_user_page: /* %o0=dest, %o1=vaddr */ clear_user_page: /* %o0=dest, %o1=vaddr */
lduw [%g6 + TI_PRE_COUNT], %o2 lduw [%g6 + TI_PRE_COUNT], %o2
sethi %hi(PAGE_OFFSET), %g2 sethi %hi(PAGE_OFFSET), %g2
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/asi.h> #include <asm/asi.h>
#include <asm/export.h>
#define XCC xcc #define XCC xcc
...@@ -90,3 +91,4 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */ ...@@ -90,3 +91,4 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */
retl retl
clr %o0 clr %o0
ENDPROC(___copy_in_user) ENDPROC(___copy_in_user)
EXPORT_SYMBOL(___copy_in_user)
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/spitfire.h> #include <asm/spitfire.h>
#include <asm/head.h> #include <asm/head.h>
#include <asm/export.h>
/* What we used to do was lock a TLB entry into a specific /* What we used to do was lock a TLB entry into a specific
* TLB slot, clear the page with interrupts disabled, then * TLB slot, clear the page with interrupts disabled, then
...@@ -44,6 +45,7 @@ ...@@ -44,6 +45,7 @@
.align 32 .align 32
.globl copy_user_page .globl copy_user_page
.type copy_user_page,#function .type copy_user_page,#function
EXPORT_SYMBOL(copy_user_page)
copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */ copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */
lduw [%g6 + TI_PRE_COUNT], %o4 lduw [%g6 + TI_PRE_COUNT], %o4
sethi %hi(PAGE_OFFSET), %g2 sethi %hi(PAGE_OFFSET), %g2
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/export.h>
/* Work around cpp -rob */ /* Work around cpp -rob */
#define ALLOC #alloc #define ALLOC #alloc
...@@ -119,6 +120,7 @@ ...@@ -119,6 +120,7 @@
__copy_user_begin: __copy_user_begin:
.globl __copy_user .globl __copy_user
EXPORT_SYMBOL(__copy_user)
dword_align: dword_align:
andcc %o1, 1, %g0 andcc %o1, 1, %g0
be 4f be 4f
......
...@@ -3,6 +3,8 @@ ...@@ -3,6 +3,8 @@
* Copyright (C) 2005 David S. Miller <davem@davemloft.net> * Copyright (C) 2005 David S. Miller <davem@davemloft.net>
*/ */
#include <asm/export.h>
#ifdef __KERNEL__ #ifdef __KERNEL__
#define GLOBAL_SPARE %g7 #define GLOBAL_SPARE %g7
#else #else
...@@ -63,6 +65,7 @@ ...@@ -63,6 +65,7 @@
add %o5, %o4, %o4 add %o5, %o4, %o4
.globl FUNC_NAME .globl FUNC_NAME
EXPORT_SYMBOL(FUNC_NAME)
FUNC_NAME: /* %o0=src, %o1=dst, %o2=len, %o3=sum */ FUNC_NAME: /* %o0=src, %o1=dst, %o2=len, %o3=sum */
LOAD(prefetch, %o0 + 0x000, #n_reads) LOAD(prefetch, %o0 + 0x000, #n_reads)
xor %o0, %o1, %g1 xor %o0, %o1, %g1
......
...@@ -17,6 +17,7 @@ along with GNU CC; see the file COPYING. If not, write to ...@@ -17,6 +17,7 @@ along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330, the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */ Boston, MA 02111-1307, USA. */
#include <asm/export.h>
.text .text
.align 4 .align 4
.globl __divdi3 .globl __divdi3
...@@ -279,3 +280,4 @@ __divdi3: ...@@ -279,3 +280,4 @@ __divdi3:
.LL81: .LL81:
ret ret
restore restore
EXPORT_SYMBOL(__divdi3)
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/export.h>
.register %g2,#scratch .register %g2,#scratch
...@@ -65,6 +66,8 @@ ENTRY(__ffs) ...@@ -65,6 +66,8 @@ ENTRY(__ffs)
add %o2, %g1, %o0 add %o2, %g1, %o0
ENDPROC(ffs) ENDPROC(ffs)
ENDPROC(__ffs) ENDPROC(__ffs)
EXPORT_SYMBOL(__ffs)
EXPORT_SYMBOL(ffs)
.section .popc_6insn_patch, "ax" .section .popc_6insn_patch, "ax"
.word ffs .word ffs
......
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/export.h>
.text .text
.align 32 .align 32
...@@ -7,6 +8,7 @@ ENTRY(__arch_hweight8) ...@@ -7,6 +8,7 @@ ENTRY(__arch_hweight8)
nop nop
nop nop
ENDPROC(__arch_hweight8) ENDPROC(__arch_hweight8)
EXPORT_SYMBOL(__arch_hweight8)
.section .popc_3insn_patch, "ax" .section .popc_3insn_patch, "ax"
.word __arch_hweight8 .word __arch_hweight8
sllx %o0, 64-8, %g1 sllx %o0, 64-8, %g1
...@@ -19,6 +21,7 @@ ENTRY(__arch_hweight16) ...@@ -19,6 +21,7 @@ ENTRY(__arch_hweight16)
nop nop
nop nop
ENDPROC(__arch_hweight16) ENDPROC(__arch_hweight16)
EXPORT_SYMBOL(__arch_hweight16)
.section .popc_3insn_patch, "ax" .section .popc_3insn_patch, "ax"
.word __arch_hweight16 .word __arch_hweight16
sllx %o0, 64-16, %g1 sllx %o0, 64-16, %g1
...@@ -31,6 +34,7 @@ ENTRY(__arch_hweight32) ...@@ -31,6 +34,7 @@ ENTRY(__arch_hweight32)
nop nop
nop nop
ENDPROC(__arch_hweight32) ENDPROC(__arch_hweight32)
EXPORT_SYMBOL(__arch_hweight32)
.section .popc_3insn_patch, "ax" .section .popc_3insn_patch, "ax"
.word __arch_hweight32 .word __arch_hweight32
sllx %o0, 64-32, %g1 sllx %o0, 64-32, %g1
...@@ -43,6 +47,7 @@ ENTRY(__arch_hweight64) ...@@ -43,6 +47,7 @@ ENTRY(__arch_hweight64)
nop nop
nop nop
ENDPROC(__arch_hweight64) ENDPROC(__arch_hweight64)
EXPORT_SYMBOL(__arch_hweight64)
.section .popc_3insn_patch, "ax" .section .popc_3insn_patch, "ax"
.word __arch_hweight64 .word __arch_hweight64
retl retl
......
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/export.h>
.text .text
ENTRY(ip_fast_csum) /* %o0 = iph, %o1 = ihl */ ENTRY(ip_fast_csum) /* %o0 = iph, %o1 = ihl */
...@@ -31,3 +32,4 @@ ENTRY(ip_fast_csum) /* %o0 = iph, %o1 = ihl */ ...@@ -31,3 +32,4 @@ ENTRY(ip_fast_csum) /* %o0 = iph, %o1 = ihl */
retl retl
and %o2, %o1, %o0 and %o2, %o1, %o0
ENDPROC(ip_fast_csum) ENDPROC(ip_fast_csum)
EXPORT_SYMBOL(ip_fast_csum)
/*
* Export of symbols defined in assembler
*/
/* Tell string.h we don't want memcpy etc. as cpp defines */
#define EXPORT_SYMTAB_STROPS
#include <linux/module.h>
#include <linux/string.h>
#include <linux/types.h>
#include <asm/checksum.h>
#include <asm/uaccess.h>
#include <asm/ftrace.h>
/* string functions */
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strncmp);
/* mem* functions */
extern void *__memscan_zero(void *, size_t);
extern void *__memscan_generic(void *, int, size_t);
extern void *__bzero(void *, size_t);
EXPORT_SYMBOL(memscan);
EXPORT_SYMBOL(__memscan_zero);
EXPORT_SYMBOL(__memscan_generic);
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(__bzero);
/* Networking helper routines. */
EXPORT_SYMBOL(csum_partial);
#ifdef CONFIG_MCOUNT
EXPORT_SYMBOL(_mcount);
#endif
/*
* sparc
*/
#ifdef CONFIG_SPARC32
extern int __ashrdi3(int, int);
extern int __ashldi3(int, int);
extern int __lshrdi3(int, int);
extern int __muldi3(int, int);
extern int __divdi3(int, int);
extern void (*__copy_1page)(void *, const void *);
extern void (*bzero_1page)(void *);
extern void ___rw_read_enter(void);
extern void ___rw_read_try(void);
extern void ___rw_read_exit(void);
extern void ___rw_write_enter(void);
/* Networking helper routines. */
EXPORT_SYMBOL(__csum_partial_copy_sparc_generic);
/* Special internal versions of library functions. */
EXPORT_SYMBOL(__copy_1page);
EXPORT_SYMBOL(__memmove);
EXPORT_SYMBOL(bzero_1page);
/* Moving data to/from/in userspace. */
EXPORT_SYMBOL(__copy_user);
/* Used by asm/spinlock.h */
#ifdef CONFIG_SMP
EXPORT_SYMBOL(___rw_read_enter);
EXPORT_SYMBOL(___rw_read_try);
EXPORT_SYMBOL(___rw_read_exit);
EXPORT_SYMBOL(___rw_write_enter);
#endif
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__muldi3);
EXPORT_SYMBOL(__divdi3);
#endif
/*
* sparc64
*/
#ifdef CONFIG_SPARC64
/* Networking helper routines. */
EXPORT_SYMBOL(csum_partial_copy_nocheck);
EXPORT_SYMBOL(__csum_partial_copy_from_user);
EXPORT_SYMBOL(__csum_partial_copy_to_user);
EXPORT_SYMBOL(ip_fast_csum);
/* Moving data to/from/in userspace. */
EXPORT_SYMBOL(___copy_to_user);
EXPORT_SYMBOL(___copy_from_user);
EXPORT_SYMBOL(___copy_in_user);
EXPORT_SYMBOL(__clear_user);
/* Atomic counter implementation. */
#define ATOMIC_OP(op) \
EXPORT_SYMBOL(atomic_##op); \
EXPORT_SYMBOL(atomic64_##op);
#define ATOMIC_OP_RETURN(op) \
EXPORT_SYMBOL(atomic_##op##_return); \
EXPORT_SYMBOL(atomic64_##op##_return);
#define ATOMIC_FETCH_OP(op) \
EXPORT_SYMBOL(atomic_fetch_##op); \
EXPORT_SYMBOL(atomic64_fetch_##op);
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
#undef ATOMIC_OPS
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS(and)
ATOMIC_OPS(or)
ATOMIC_OPS(xor)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
EXPORT_SYMBOL(atomic64_dec_if_positive);
/* Atomic bit operations. */
EXPORT_SYMBOL(test_and_set_bit);
EXPORT_SYMBOL(test_and_clear_bit);
EXPORT_SYMBOL(test_and_change_bit);
EXPORT_SYMBOL(set_bit);
EXPORT_SYMBOL(clear_bit);
EXPORT_SYMBOL(change_bit);
/* Special internal versions of library functions. */
EXPORT_SYMBOL(_clear_page);
EXPORT_SYMBOL(clear_user_page);
EXPORT_SYMBOL(copy_user_page);
/* RAID code needs this */
void VISenter(void);
EXPORT_SYMBOL(VISenter);
extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
unsigned long *);
extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *,
unsigned long *, unsigned long *);
extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *,
unsigned long *, unsigned long *, unsigned long *);
EXPORT_SYMBOL(xor_vis_2);
EXPORT_SYMBOL(xor_vis_3);
EXPORT_SYMBOL(xor_vis_4);
EXPORT_SYMBOL(xor_vis_5);
extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *);
extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *,
unsigned long *);
extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *,
unsigned long *, unsigned long *);
extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *,
unsigned long *, unsigned long *, unsigned long *);
EXPORT_SYMBOL(xor_niagara_2);
EXPORT_SYMBOL(xor_niagara_3);
EXPORT_SYMBOL(xor_niagara_4);
EXPORT_SYMBOL(xor_niagara_5);
#endif
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <asm/psr.h> #include <asm/psr.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/spinlock.h> #include <asm/spinlock.h>
#include <asm/export.h>
.text .text
.align 4 .align 4
...@@ -48,6 +49,7 @@ ___rw_write_enter_spin_on_wlock: ...@@ -48,6 +49,7 @@ ___rw_write_enter_spin_on_wlock:
ld [%g1], %g2 ld [%g1], %g2
.globl ___rw_read_enter .globl ___rw_read_enter
EXPORT_SYMBOL(___rw_read_enter)
___rw_read_enter: ___rw_read_enter:
orcc %g2, 0x0, %g0 orcc %g2, 0x0, %g0
bne,a ___rw_read_enter_spin_on_wlock bne,a ___rw_read_enter_spin_on_wlock
...@@ -59,6 +61,7 @@ ___rw_read_enter: ...@@ -59,6 +61,7 @@ ___rw_read_enter:
mov %g4, %o7 mov %g4, %o7
.globl ___rw_read_exit .globl ___rw_read_exit
EXPORT_SYMBOL(___rw_read_exit)
___rw_read_exit: ___rw_read_exit:
orcc %g2, 0x0, %g0 orcc %g2, 0x0, %g0
bne,a ___rw_read_exit_spin_on_wlock bne,a ___rw_read_exit_spin_on_wlock
...@@ -70,6 +73,7 @@ ___rw_read_exit: ...@@ -70,6 +73,7 @@ ___rw_read_exit:
mov %g4, %o7 mov %g4, %o7
.globl ___rw_read_try .globl ___rw_read_try
EXPORT_SYMBOL(___rw_read_try)
___rw_read_try: ___rw_read_try:
orcc %g2, 0x0, %g0 orcc %g2, 0x0, %g0
bne ___rw_read_try_spin_on_wlock bne ___rw_read_try_spin_on_wlock
...@@ -81,6 +85,7 @@ ___rw_read_try: ...@@ -81,6 +85,7 @@ ___rw_read_try:
mov %g4, %o7 mov %g4, %o7
.globl ___rw_write_enter .globl ___rw_write_enter
EXPORT_SYMBOL(___rw_write_enter)
___rw_write_enter: ___rw_write_enter:
orcc %g2, 0x0, %g0 orcc %g2, 0x0, %g0
bne ___rw_write_enter_spin_on_wlock bne ___rw_write_enter_spin_on_wlock
......
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/export.h>
ENTRY(__lshrdi3) ENTRY(__lshrdi3)
cmp %o2, 0 cmp %o2, 0
...@@ -25,3 +26,4 @@ ENTRY(__lshrdi3) ...@@ -25,3 +26,4 @@ ENTRY(__lshrdi3)
retl retl
nop nop
ENDPROC(__lshrdi3) ENDPROC(__lshrdi3)
EXPORT_SYMBOL(__lshrdi3)
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/export.h>
/* /*
* This is the main variant and is called by C code. GCC's -pg option * This is the main variant and is called by C code. GCC's -pg option
...@@ -16,6 +17,7 @@ ...@@ -16,6 +17,7 @@
.align 32 .align 32
.globl _mcount .globl _mcount
.type _mcount,#function .type _mcount,#function
EXPORT_SYMBOL(_mcount)
.globl mcount .globl mcount
.type mcount,#function .type mcount,#function
_mcount: _mcount:
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/export.h>
.text .text
ENTRY(memcmp) ENTRY(memcmp)
...@@ -25,3 +26,4 @@ ENTRY(memcmp) ...@@ -25,3 +26,4 @@ ENTRY(memcmp)
2: retl 2: retl
mov 0, %o0 mov 0, %o0
ENDPROC(memcmp) ENDPROC(memcmp)
EXPORT_SYMBOL(memcmp)
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
* Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz) * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/ */
#include <asm/export.h>
#define FUNC(x) \ #define FUNC(x) \
.globl x; \ .globl x; \
.type x,@function; \ .type x,@function; \
...@@ -143,8 +144,10 @@ x: ...@@ -143,8 +144,10 @@ x:
#ifdef __KERNEL__ #ifdef __KERNEL__
FUNC(amemmove) FUNC(amemmove)
FUNC(__memmove) FUNC(__memmove)
EXPORT_SYMBOL(__memmove)
#endif #endif
FUNC(memmove) FUNC(memmove)
EXPORT_SYMBOL(memmove)
cmp %o0, %o1 cmp %o0, %o1
mov %o0, %g7 mov %o0, %g7
bleu 9f bleu 9f
...@@ -202,6 +205,7 @@ FUNC(memmove) ...@@ -202,6 +205,7 @@ FUNC(memmove)
add %o0, 2, %o0 add %o0, 2, %o0
FUNC(memcpy) /* %o0=dst %o1=src %o2=len */ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
EXPORT_SYMBOL(memcpy)
sub %o0, %o1, %o4 sub %o0, %o1, %o4
mov %o0, %g7 mov %o0, %g7
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/export.h>
.text .text
ENTRY(memmove) /* o0=dst o1=src o2=len */ ENTRY(memmove) /* o0=dst o1=src o2=len */
...@@ -57,3 +58,4 @@ ENTRY(memmove) /* o0=dst o1=src o2=len */ ...@@ -57,3 +58,4 @@ ENTRY(memmove) /* o0=dst o1=src o2=len */
stb %g7, [%o0 - 0x1] stb %g7, [%o0 - 0x1]
ba,a,pt %xcc, 99b ba,a,pt %xcc, 99b
ENDPROC(memmove) ENDPROC(memmove)
EXPORT_SYMBOL(memmove)
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/ */
#include <asm/export.h>
/* In essence, this is just a fancy strlen. */ /* In essence, this is just a fancy strlen. */
#define LO_MAGIC 0x01010101 #define LO_MAGIC 0x01010101
...@@ -13,6 +15,8 @@ ...@@ -13,6 +15,8 @@
.align 4 .align 4
.globl __memscan_zero, __memscan_generic .globl __memscan_zero, __memscan_generic
.globl memscan .globl memscan
EXPORT_SYMBOL(__memscan_zero)
EXPORT_SYMBOL(__memscan_generic)
__memscan_zero: __memscan_zero:
/* %o0 = addr, %o1 = size */ /* %o0 = addr, %o1 = size */
cmp %o1, 0 cmp %o1, 0
......
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
* Copyright (C) 1998 David S. Miller (davem@redhat.com) * Copyright (C) 1998 David S. Miller (davem@redhat.com)
*/ */
#include <asm/export.h>
#define HI_MAGIC 0x8080808080808080 #define HI_MAGIC 0x8080808080808080
#define LO_MAGIC 0x0101010101010101 #define LO_MAGIC 0x0101010101010101
#define ASI_PL 0x88 #define ASI_PL 0x88
...@@ -13,6 +15,8 @@ ...@@ -13,6 +15,8 @@
.align 32 .align 32
.globl __memscan_zero, __memscan_generic .globl __memscan_zero, __memscan_generic
.globl memscan .globl memscan
EXPORT_SYMBOL(__memscan_zero)
EXPORT_SYMBOL(__memscan_generic)
__memscan_zero: __memscan_zero:
/* %o0 = bufp, %o1 = size */ /* %o0 = bufp, %o1 = size */
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
*/ */
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/export.h>
/* Work around cpp -rob */ /* Work around cpp -rob */
#define ALLOC #alloc #define ALLOC #alloc
...@@ -63,6 +64,8 @@ __bzero_begin: ...@@ -63,6 +64,8 @@ __bzero_begin:
.globl __bzero .globl __bzero
.globl memset .globl memset
EXPORT_SYMBOL(__bzero)
EXPORT_SYMBOL(memset)
.globl __memset_start, __memset_end .globl __memset_start, __memset_end
__memset_start: __memset_start:
memset: memset:
......
...@@ -17,6 +17,7 @@ along with GNU CC; see the file COPYING. If not, write to ...@@ -17,6 +17,7 @@ along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330, the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */ Boston, MA 02111-1307, USA. */
#include <asm/export.h>
.text .text
.align 4 .align 4
.globl __muldi3 .globl __muldi3
...@@ -74,3 +75,4 @@ __muldi3: ...@@ -74,3 +75,4 @@ __muldi3:
add %l2, %l0, %i0 add %l2, %l0, %i0
ret ret
restore %g0, %l3, %o1 restore %g0, %l3, %o1
EXPORT_SYMBOL(__muldi3)
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/export.h>
#define LO_MAGIC 0x01010101 #define LO_MAGIC 0x01010101
#define HI_MAGIC 0x80808080 #define HI_MAGIC 0x80808080
...@@ -78,3 +79,4 @@ ENTRY(strlen) ...@@ -78,3 +79,4 @@ ENTRY(strlen)
retl retl
mov 2, %o0 mov 2, %o0
ENDPROC(strlen) ENDPROC(strlen)
EXPORT_SYMBOL(strlen)
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/export.h>
.text .text
ENTRY(strncmp) ENTRY(strncmp)
...@@ -116,3 +117,4 @@ ENTRY(strncmp) ...@@ -116,3 +117,4 @@ ENTRY(strncmp)
retl retl
sub %o3, %o0, %o0 sub %o3, %o0, %o0
ENDPROC(strncmp) ENDPROC(strncmp)
EXPORT_SYMBOL(strncmp)
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/asi.h> #include <asm/asi.h>
#include <asm/export.h>
.text .text
ENTRY(strncmp) ENTRY(strncmp)
...@@ -28,3 +29,4 @@ ENTRY(strncmp) ...@@ -28,3 +29,4 @@ ENTRY(strncmp)
retl retl
clr %o0 clr %o0
ENDPROC(strncmp) ENDPROC(strncmp)
EXPORT_SYMBOL(strncmp)
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <asm/asi.h> #include <asm/asi.h>
#include <asm/dcu.h> #include <asm/dcu.h>
#include <asm/spitfire.h> #include <asm/spitfire.h>
#include <asm/export.h>
/* /*
* Requirements: * Requirements:
...@@ -90,6 +91,7 @@ ENTRY(xor_vis_2) ...@@ -90,6 +91,7 @@ ENTRY(xor_vis_2)
retl retl
wr %g0, 0, %fprs wr %g0, 0, %fprs
ENDPROC(xor_vis_2) ENDPROC(xor_vis_2)
EXPORT_SYMBOL(xor_vis_2)
ENTRY(xor_vis_3) ENTRY(xor_vis_3)
rd %fprs, %o5 rd %fprs, %o5
...@@ -156,6 +158,7 @@ ENTRY(xor_vis_3) ...@@ -156,6 +158,7 @@ ENTRY(xor_vis_3)
retl retl
wr %g0, 0, %fprs wr %g0, 0, %fprs
ENDPROC(xor_vis_3) ENDPROC(xor_vis_3)
EXPORT_SYMBOL(xor_vis_3)
ENTRY(xor_vis_4) ENTRY(xor_vis_4)
rd %fprs, %o5 rd %fprs, %o5
...@@ -241,6 +244,7 @@ ENTRY(xor_vis_4) ...@@ -241,6 +244,7 @@ ENTRY(xor_vis_4)
retl retl
wr %g0, 0, %fprs wr %g0, 0, %fprs
ENDPROC(xor_vis_4) ENDPROC(xor_vis_4)
EXPORT_SYMBOL(xor_vis_4)
ENTRY(xor_vis_5) ENTRY(xor_vis_5)
save %sp, -192, %sp save %sp, -192, %sp
...@@ -347,6 +351,7 @@ ENTRY(xor_vis_5) ...@@ -347,6 +351,7 @@ ENTRY(xor_vis_5)
ret ret
restore restore
ENDPROC(xor_vis_5) ENDPROC(xor_vis_5)
EXPORT_SYMBOL(xor_vis_5)
/* Niagara versions. */ /* Niagara versions. */
ENTRY(xor_niagara_2) /* %o0=bytes, %o1=dest, %o2=src */ ENTRY(xor_niagara_2) /* %o0=bytes, %o1=dest, %o2=src */
...@@ -393,6 +398,7 @@ ENTRY(xor_niagara_2) /* %o0=bytes, %o1=dest, %o2=src */ ...@@ -393,6 +398,7 @@ ENTRY(xor_niagara_2) /* %o0=bytes, %o1=dest, %o2=src */
ret ret
restore restore
ENDPROC(xor_niagara_2) ENDPROC(xor_niagara_2)
EXPORT_SYMBOL(xor_niagara_2)
ENTRY(xor_niagara_3) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */ ENTRY(xor_niagara_3) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */
save %sp, -192, %sp save %sp, -192, %sp
...@@ -454,6 +460,7 @@ ENTRY(xor_niagara_3) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */ ...@@ -454,6 +460,7 @@ ENTRY(xor_niagara_3) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */
ret ret
restore restore
ENDPROC(xor_niagara_3) ENDPROC(xor_niagara_3)
EXPORT_SYMBOL(xor_niagara_3)
ENTRY(xor_niagara_4) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */ ENTRY(xor_niagara_4) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */
save %sp, -192, %sp save %sp, -192, %sp
...@@ -536,6 +543,7 @@ ENTRY(xor_niagara_4) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */ ...@@ -536,6 +543,7 @@ ENTRY(xor_niagara_4) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */
ret ret
restore restore
ENDPROC(xor_niagara_4) ENDPROC(xor_niagara_4)
EXPORT_SYMBOL(xor_niagara_4)
ENTRY(xor_niagara_5) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=src4 */ ENTRY(xor_niagara_5) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=src4 */
save %sp, -192, %sp save %sp, -192, %sp
...@@ -634,3 +642,4 @@ ENTRY(xor_niagara_5) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=s ...@@ -634,3 +642,4 @@ ENTRY(xor_niagara_5) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=s
ret ret
restore restore
ENDPROC(xor_niagara_5) ENDPROC(xor_niagara_5)
EXPORT_SYMBOL(xor_niagara_5)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment