Commit 9e8bd663 authored by Keith M. Wesolowski's avatar Keith M. Wesolowski

Merge foobazco.org:/sources/2.5-bk

into foobazco.org:/sources/2.5-sparc-todave
parents 8e764e0d 16253e07
...@@ -350,21 +350,15 @@ smp4m_ticker: ...@@ -350,21 +350,15 @@ smp4m_ticker:
* for cross calls. That has a separate entry point below. * for cross calls. That has a separate entry point below.
*/ */
maybe_smp4m_msg: maybe_smp4m_msg:
GET_PROCESSOR_MID(o3) GET_PROCESSOR4M_ID(o3)
set sun4m_interrupts, %l5 set sun4m_interrupts, %l5
ld [%l5], %o5 ld [%l5], %o5
sethi %hi(0x60000000), %o4 sethi %hi(0x40000000), %o2
sll %o3, 12, %o3 sll %o3, 12, %o3
ld [%o5 + %o3], %o1 ld [%o5 + %o3], %o1
andcc %o1, %o4, %g0 andcc %o1, %o2, %g0
be,a smp4m_ticker be,a smp4m_ticker
cmp %l7, 14 cmp %l7, 14
sethi %hi(0x40000000), %o2
add %o5, %o3, %o5
andcc %o1, %o2, %g0
be,a 1f
sethi %hi(0x20000000), %o2
1:
st %o2, [%o5 + 0x4] st %o2, [%o5 + 0x4]
WRITE_PAUSE WRITE_PAUSE
ld [%o5], %g0 ld [%o5], %g0
...@@ -374,15 +368,9 @@ maybe_smp4m_msg: ...@@ -374,15 +368,9 @@ maybe_smp4m_msg:
WRITE_PAUSE WRITE_PAUSE
wr %l4, PSR_ET, %psr wr %l4, PSR_ET, %psr
WRITE_PAUSE WRITE_PAUSE
srl %o2, (16+14), %o2
tst %o2
bne 2f
nop
call smp_reschedule_irq call smp_reschedule_irq
add %o7, 8, %o7
2:
call smp_stop_cpu_irq
nop nop
RESTORE_ALL RESTORE_ALL
.align 4 .align 4
...@@ -390,7 +378,7 @@ maybe_smp4m_msg: ...@@ -390,7 +378,7 @@ maybe_smp4m_msg:
linux_trap_ipi15_sun4m: linux_trap_ipi15_sun4m:
SAVE_ALL SAVE_ALL
sethi %hi(0x80000000), %o2 sethi %hi(0x80000000), %o2
GET_PROCESSOR_MID(o0) GET_PROCESSOR4M_ID(o0)
set sun4m_interrupts, %l5 set sun4m_interrupts, %l5
ld [%l5], %o5 ld [%l5], %o5
sll %o0, 12, %o0 sll %o0, 12, %o0
......
...@@ -746,9 +746,6 @@ go_to_highmem: ...@@ -746,9 +746,6 @@ go_to_highmem:
jmpl %g1, %g0 jmpl %g1, %g0
nop nop
/* This is to align init_thread_union properly, be careful. -DaveM */
.align 8192
/* The code above should be at beginning and we have to take care about /* The code above should be at beginning and we have to take care about
* short jumps, as branching to .text.init section from .text is usually * short jumps, as branching to .text.init section from .text is usually
* impossible */ * impossible */
......
...@@ -21,5 +21,7 @@ EXPORT_SYMBOL(init_task); ...@@ -21,5 +21,7 @@ EXPORT_SYMBOL(init_task);
* If this is not aligned on a 8k boundry, then you should change code * If this is not aligned on a 8k boundry, then you should change code
* in etrap.S which assumes it. * in etrap.S which assumes it.
*/ */
__asm__(".section \".text\",#alloc\n"); union thread_union init_thread_union
union thread_union init_thread_union = { INIT_THREAD_INFO(init_task) }; __attribute__((section (".text")))
__attribute__((aligned (THREAD_SIZE)))
= { INIT_THREAD_INFO(init_task) };
...@@ -180,21 +180,20 @@ EXPORT_SYMBOL(io_remap_page_range); ...@@ -180,21 +180,20 @@ EXPORT_SYMBOL(io_remap_page_range);
/* EXPORT_SYMBOL(iounit_map_dma_init); */ /* EXPORT_SYMBOL(iounit_map_dma_init); */
/* EXPORT_SYMBOL(iounit_map_dma_page); */ /* EXPORT_SYMBOL(iounit_map_dma_page); */
/* Btfixup stuff cannot have versions, it would be complicated too much */
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
EXPORT_SYMBOL_NOVERS(BTFIXUP_CALL(___xchg32)); EXPORT_SYMBOL(BTFIXUP_CALL(___xchg32));
#else #else
EXPORT_SYMBOL_NOVERS(BTFIXUP_CALL(__smp_processor_id)); EXPORT_SYMBOL(BTFIXUP_CALL(__hard_smp_processor_id));
#endif #endif
EXPORT_SYMBOL_NOVERS(BTFIXUP_CALL(enable_irq)); EXPORT_SYMBOL(BTFIXUP_CALL(enable_irq));
EXPORT_SYMBOL_NOVERS(BTFIXUP_CALL(disable_irq)); EXPORT_SYMBOL(BTFIXUP_CALL(disable_irq));
EXPORT_SYMBOL_NOVERS(BTFIXUP_CALL(__irq_itoa)); EXPORT_SYMBOL(BTFIXUP_CALL(__irq_itoa));
EXPORT_SYMBOL_NOVERS(BTFIXUP_CALL(mmu_unlockarea)); EXPORT_SYMBOL(BTFIXUP_CALL(mmu_unlockarea));
EXPORT_SYMBOL_NOVERS(BTFIXUP_CALL(mmu_lockarea)); EXPORT_SYMBOL(BTFIXUP_CALL(mmu_lockarea));
EXPORT_SYMBOL_NOVERS(BTFIXUP_CALL(mmu_get_scsi_sgl)); EXPORT_SYMBOL(BTFIXUP_CALL(mmu_get_scsi_sgl));
EXPORT_SYMBOL_NOVERS(BTFIXUP_CALL(mmu_get_scsi_one)); EXPORT_SYMBOL(BTFIXUP_CALL(mmu_get_scsi_one));
EXPORT_SYMBOL_NOVERS(BTFIXUP_CALL(mmu_release_scsi_sgl)); EXPORT_SYMBOL(BTFIXUP_CALL(mmu_release_scsi_sgl));
EXPORT_SYMBOL_NOVERS(BTFIXUP_CALL(mmu_release_scsi_one)); EXPORT_SYMBOL(BTFIXUP_CALL(mmu_release_scsi_one));
#ifdef CONFIG_SBUS #ifdef CONFIG_SBUS
EXPORT_SYMBOL(sbus_root); EXPORT_SYMBOL(sbus_root);
...@@ -271,15 +270,15 @@ EXPORT_SYMBOL(__prom_getsibling); ...@@ -271,15 +270,15 @@ EXPORT_SYMBOL(__prom_getsibling);
/* sparc library symbols */ /* sparc library symbols */
EXPORT_SYMBOL(bcopy); EXPORT_SYMBOL(bcopy);
EXPORT_SYMBOL(memchr); EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL_NOVERS(memscan); EXPORT_SYMBOL(memscan);
EXPORT_SYMBOL_NOVERS(strlen); EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strnlen); EXPORT_SYMBOL(strnlen);
EXPORT_SYMBOL(strcpy); EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strncpy); EXPORT_SYMBOL(strncpy);
EXPORT_SYMBOL(strcat); EXPORT_SYMBOL(strcat);
EXPORT_SYMBOL(strncat); EXPORT_SYMBOL(strncat);
EXPORT_SYMBOL(strcmp); EXPORT_SYMBOL(strcmp);
EXPORT_SYMBOL_NOVERS(strncmp); EXPORT_SYMBOL(strncmp);
EXPORT_SYMBOL(strchr); EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strrchr); EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(strpbrk); EXPORT_SYMBOL(strpbrk);
...@@ -309,21 +308,17 @@ EXPORT_SYMBOL(csum_partial); ...@@ -309,21 +308,17 @@ EXPORT_SYMBOL(csum_partial);
/* Cache flushing. */ /* Cache flushing. */
EXPORT_SYMBOL(sparc_flush_page_to_ram); EXPORT_SYMBOL(sparc_flush_page_to_ram);
/* No version information on this, heavily used in inline asm, EXPORT_SYMBOL(__ret_efault);
* and will always be 'void __ret_efault(void)'.
*/ EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL_NOVERS(__ret_efault); EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
/* No version information on these, as gcc produces such symbols. */ EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL_NOVERS(memcmp); EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL_NOVERS(memcpy); EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL_NOVERS(memset); EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL_NOVERS(memmove); EXPORT_SYMBOL(__muldi3);
EXPORT_SYMBOL_NOVERS(__ashrdi3); EXPORT_SYMBOL(__divdi3);
EXPORT_SYMBOL_NOVERS(__ashldi3);
EXPORT_SYMBOL_NOVERS(__lshrdi3);
EXPORT_SYMBOL_NOVERS(__muldi3);
EXPORT_SYMBOL_NOVERS(__divdi3);
EXPORT_SYMBOL_DOT(rem); EXPORT_SYMBOL_DOT(rem);
EXPORT_SYMBOL_DOT(urem); EXPORT_SYMBOL_DOT(urem);
......
...@@ -343,7 +343,7 @@ static int sunos_filldir(void * __buf, const char * name, int namlen, ...@@ -343,7 +343,7 @@ static int sunos_filldir(void * __buf, const char * name, int namlen,
put_user(reclen, &dirent->d_reclen); put_user(reclen, &dirent->d_reclen);
copy_to_user(dirent->d_name, name, namlen); copy_to_user(dirent->d_name, name, namlen);
put_user(0, dirent->d_name + namlen); put_user(0, dirent->d_name + namlen);
((char *) dirent) += reclen; dirent = (void *)dirent + reclen;
buf->curr = dirent; buf->curr = dirent;
buf->count -= reclen; buf->count -= reclen;
return 0; return 0;
...@@ -422,7 +422,7 @@ static int sunos_filldirentry(void * __buf, const char * name, int namlen, ...@@ -422,7 +422,7 @@ static int sunos_filldirentry(void * __buf, const char * name, int namlen,
put_user(reclen, &dirent->d_reclen); put_user(reclen, &dirent->d_reclen);
copy_to_user(dirent->d_name, name, namlen); copy_to_user(dirent->d_name, name, namlen);
put_user(0, dirent->d_name + namlen); put_user(0, dirent->d_name + namlen);
((char *) dirent) += reclen; dirent = (void *)dirent + reclen;
buf->curr = dirent; buf->curr = dirent;
buf->count -= reclen; buf->count -= reclen;
return 0; return 0;
......
...@@ -7,5 +7,7 @@ EXTRA_AFLAGS := -ansi -DST_DIV0=0x02 ...@@ -7,5 +7,7 @@ EXTRA_AFLAGS := -ansi -DST_DIV0=0x02
lib-y := mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o memset.o \ lib-y := mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o memset.o \
strlen.o checksum.o blockops.o memscan.o memcmp.o strncmp.o \ strlen.o checksum.o blockops.o memscan.o memcmp.o strncmp.o \
strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \ strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \
copy_user.o locks.o atomic.o atomic32.o bitops.o debuglocks.o \ copy_user.o locks.o atomic.o atomic32.o bitops.o \
lshrdi3.o ashldi3.o rwsem.o muldi3.o bitext.o lshrdi3.o ashldi3.o rwsem.o muldi3.o bitext.o
lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o
...@@ -12,8 +12,7 @@ ...@@ -12,8 +12,7 @@
#include <asm/psr.h> #include <asm/psr.h>
#include <asm/system.h> #include <asm/system.h>
/* To enable this code, just define SPIN_LOCK_DEBUG in asm/spinlock.h */ #ifdef CONFIG_SMP
#ifdef SPIN_LOCK_DEBUG
/* Some notes on how these debugging routines work. When a lock is acquired /* Some notes on how these debugging routines work. When a lock is acquired
* an extra debugging member lock->owner_pc is set to the caller of the lock * an extra debugging member lock->owner_pc is set to the caller of the lock
...@@ -200,4 +199,4 @@ void _do_write_unlock(rwlock_t *rw) ...@@ -200,4 +199,4 @@ void _do_write_unlock(rwlock_t *rw)
rw->lock = 0; rw->lock = 0;
} }
#endif /* SPIN_LOCK_DEBUG */ #endif /* SMP */
...@@ -327,6 +327,7 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs) ...@@ -327,6 +327,7 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)
#ifdef DEBUG_MATHEMU #ifdef DEBUG_MATHEMU
printk("unknown FPop1: %03lx\n",(insn>>5)&0x1ff); printk("unknown FPop1: %03lx\n",(insn>>5)&0x1ff);
#endif #endif
break;
} }
} else if ((insn & 0xc1f80000) == 0x81a80000) /* FPOP2 */ { } else if ((insn & 0xc1f80000) == 0x81a80000) /* FPOP2 */ {
switch ((insn >> 5) & 0x1ff) { switch ((insn >> 5) & 0x1ff) {
...@@ -340,6 +341,7 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs) ...@@ -340,6 +341,7 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)
#ifdef DEBUG_MATHEMU #ifdef DEBUG_MATHEMU
printk("unknown FPop2: %03lx\n",(insn>>5)&0x1ff); printk("unknown FPop2: %03lx\n",(insn>>5)&0x1ff);
#endif #endif
break;
} }
} }
......
...@@ -18,17 +18,6 @@ ...@@ -18,17 +18,6 @@
#define GET_PROCESSOR4D_ID(reg) \ #define GET_PROCESSOR4D_ID(reg) \
lda [%g0] ASI_M_VIKING_TMP1, %reg; lda [%g0] ASI_M_VIKING_TMP1, %reg;
/* Blackbox */
#define GET_PROCESSOR_ID(reg) \
sethi %hi(___b_smp_processor_id), %reg; \
sethi %hi(boot_cpu_id), %reg; \
ldub [%reg + %lo(boot_cpu_id)], %reg;
#define GET_PROCESSOR_MID(reg) \
rd %tbr, %reg; \
srl %reg, 12, %reg; \
and %reg, 3, %reg;
/* All trap entry points _must_ begin with this macro or else you /* All trap entry points _must_ begin with this macro or else you
* lose. It makes sure the kernel has a proper window so that * lose. It makes sure the kernel has a proper window so that
* c-code can be called. * c-code can be called.
......
...@@ -53,8 +53,8 @@ void smp_info(struct seq_file *); ...@@ -53,8 +53,8 @@ void smp_info(struct seq_file *);
BTFIXUPDEF_CALL(void, smp_cross_call, smpfunc_t, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) BTFIXUPDEF_CALL(void, smp_cross_call, smpfunc_t, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long)
BTFIXUPDEF_CALL(void, smp_message_pass, int, int, unsigned long, int) BTFIXUPDEF_CALL(void, smp_message_pass, int, int, unsigned long, int)
BTFIXUPDEF_CALL(int, __smp_processor_id, void) BTFIXUPDEF_CALL(int, __hard_smp_processor_id, void)
BTFIXUPDEF_BLACKBOX(smp_processor_id) BTFIXUPDEF_BLACKBOX(hard_smp_processor_id)
BTFIXUPDEF_BLACKBOX(load_current) BTFIXUPDEF_BLACKBOX(load_current)
#define smp_cross_call(func,arg1,arg2,arg3,arg4,arg5) BTFIXUP_CALL(smp_cross_call)(func,arg1,arg2,arg3,arg4,arg5) #define smp_cross_call(func,arg1,arg2,arg3,arg4,arg5) BTFIXUP_CALL(smp_cross_call)(func,arg1,arg2,arg3,arg4,arg5)
...@@ -129,7 +129,7 @@ extern __inline__ int hard_smp_processor_id(void) ...@@ -129,7 +129,7 @@ extern __inline__ int hard_smp_processor_id(void)
"=&r" (cpuid)); "=&r" (cpuid));
See btfixup.h and btfixupprep.c to understand how a blackbox works. See btfixup.h and btfixupprep.c to understand how a blackbox works.
*/ */
__asm__ __volatile__("sethi %%hi(___b_smp_processor_id), %0\n\t" __asm__ __volatile__("sethi %%hi(___b_hard_smp_processor_id), %0\n\t"
"sethi %%hi(boot_cpu_id), %0\n\t" "sethi %%hi(boot_cpu_id), %0\n\t"
"ldub [%0 + %%lo(boot_cpu_id)], %0\n\t" : "ldub [%0 + %%lo(boot_cpu_id)], %0\n\t" :
"=&r" (cpuid)); "=&r" (cpuid));
...@@ -141,7 +141,7 @@ extern __inline__ int hard_smp_processor_id(void) ...@@ -141,7 +141,7 @@ extern __inline__ int hard_smp_processor_id(void)
int cpuid; int cpuid;
__asm__ __volatile__("mov %%o7, %%g1\n\t" __asm__ __volatile__("mov %%o7, %%g1\n\t"
"call ___f___smp_processor_id\n\t" "call ___f___hard_smp_processor_id\n\t"
" nop\n\t" " nop\n\t"
"mov %%g2, %0\n\t" : "=r"(cpuid) : : "g1", "g2"); "mov %%g2, %0\n\t" : "=r"(cpuid) : : "g1", "g2");
return cpuid; return cpuid;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment