Commit a662c5a3 authored by Pete Zaitcev's avatar Pete Zaitcev Committed by David S. Miller

SPARC32: First pass at getting this platform functional under 2.5.x

parent d893a438
...@@ -41,15 +41,16 @@ SUBDIRS += arch/sparc/kernel arch/sparc/lib arch/sparc/prom \ ...@@ -41,15 +41,16 @@ SUBDIRS += arch/sparc/kernel arch/sparc/lib arch/sparc/prom \
CORE_FILES := arch/sparc/kernel/kernel.o arch/sparc/mm/mm.o $(CORE_FILES) \ CORE_FILES := arch/sparc/kernel/kernel.o arch/sparc/mm/mm.o $(CORE_FILES) \
arch/sparc/math-emu/math-emu.o arch/sparc/math-emu/math-emu.o
LIBS := $(TOPDIR)/lib/lib.a $(LIBS) $(TOPDIR)/arch/sparc/prom/promlib.a \ LIBS := $(LIBS) arch/sparc/prom/promlib.a arch/sparc/lib/lib.a
$(TOPDIR)/arch/sparc/lib/lib.a
# This one has to come last # This one has to come last
SUBDIRS += arch/sparc/boot SUBDIRS += arch/sparc/boot
CORE_FILES_NO_BTFIX := $(CORE_FILES) CORE_FILES_NO_BTFIX := $(CORE_FILES)
CORE_FILES += arch/sparc/boot/btfix.o CORE_FILES += arch/sparc/boot/btfix.o
# Export what is needed by arch/sparc/boot/Makefile
export CORE_FILES_NO_BTFIX export CORE_FILES_NO_BTFIX
export INIT
archclean: archclean:
rm -f $(TOPDIR)/vmlinux.aout rm -f $(TOPDIR)/vmlinux.aout
...@@ -60,7 +61,7 @@ archmrproper: ...@@ -60,7 +61,7 @@ archmrproper:
prepare: check_asm prepare: check_asm
check_asm: include/linux/version.h include/linux/asm include/config/MARKER check_asm: include/linux/version.h include/asm include/config/MARKER
$(MAKE) -C arch/sparc/kernel check_asm $(MAKE) -C arch/sparc/kernel check_asm
tftpboot.img: tftpboot.img:
......
...@@ -22,9 +22,12 @@ btfixupprep: btfixupprep.c ...@@ -22,9 +22,12 @@ btfixupprep: btfixupprep.c
clean: clean:
rm -f btfixupprep piggyback tftpboot.img btfix.o btfix.s rm -f btfixupprep piggyback tftpboot.img btfix.o btfix.s
BTOBJS := $(HEAD) init/main.o init/version.o #BTOBJS := $(HEAD) init/main.o init/version.o
BTLIBS := $(CORE_FILES_NO_BTFIX) $(FILESYSTEMS) \ BTOBJS := $(HEAD) $(INIT)
$(DRIVERS) $(NETWORKS) #BTLIBS := $(CORE_FILES_NO_BTFIX) $(FILESYSTEMS) \
# $(DRIVERS) $(NETWORKS)
# Threw away drivers because they must not have btfixup definitions.
BTLIBS := $(CORE_FILES_NO_BTFIX) $(LIBS)
# I wanted to make this depend upon BTOBJS so that a parallel # I wanted to make this depend upon BTOBJS so that a parallel
# build would work, but this fails because $(HEAD) cannot work # build would work, but this fails because $(HEAD) cannot work
...@@ -34,7 +37,6 @@ vmlinux.o: FORCE ...@@ -34,7 +37,6 @@ vmlinux.o: FORCE
$(LD) $(LDFLAGS) -r $(patsubst %,$(TOPDIR)/%,$(BTOBJS)) \ $(LD) $(LDFLAGS) -r $(patsubst %,$(TOPDIR)/%,$(BTOBJS)) \
--start-group \ --start-group \
$(patsubst %,$(TOPDIR)/%,$(BTLIBS)) \ $(patsubst %,$(TOPDIR)/%,$(BTLIBS)) \
$(LIBS) \
--end-group -o vmlinux.o --end-group -o vmlinux.o
btfix.s: btfixupprep vmlinux.o btfix.s: btfixupprep vmlinux.o
......
...@@ -239,7 +239,10 @@ endmenu ...@@ -239,7 +239,10 @@ endmenu
mainmenu_option next_comment mainmenu_option next_comment
comment 'Kernel hacking' comment 'Kernel hacking'
bool 'Debug memory allocations' CONFIG_DEBUG_SLAB
bool 'Magic SysRq key' CONFIG_MAGIC_SYSRQ bool 'Magic SysRq key' CONFIG_MAGIC_SYSRQ
bool 'Spinlock debugging' CONFIG_DEBUG_SPINLOCK
endmenu endmenu
source lib/Config.in source lib/Config.in
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <asm/signal.h> #include <asm/signal.h>
#include <asm/obio.h> #include <asm/obio.h>
#include <asm/mxcc.h> #include <asm/mxcc.h>
#include <asm/thread_info.h>
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
...@@ -1234,7 +1235,8 @@ C_LABEL(sys_ptrace): ...@@ -1234,7 +1235,8 @@ C_LABEL(sys_ptrace):
call C_LABEL(do_ptrace) call C_LABEL(do_ptrace)
add %sp, REGWIN_SZ, %o0 add %sp, REGWIN_SZ, %o0
ld [%curptr + AOFF_task_ptrace], %l5 ld [%curptr + TI_TASK], %l5
ld [%l5 + AOFF_task_ptrace], %l5
andcc %l5, 0x02, %g0 andcc %l5, 0x02, %g0
be 1f be 1f
nop nop
...@@ -1284,7 +1286,8 @@ C_LABEL(sys_sigpause): ...@@ -1284,7 +1286,8 @@ C_LABEL(sys_sigpause):
call C_LABEL(do_sigpause) call C_LABEL(do_sigpause)
add %sp, REGWIN_SZ, %o1 add %sp, REGWIN_SZ, %o1
ld [%curptr + AOFF_task_ptrace], %l5 ld [%curptr + TI_TASK], %l5
ld [%l5 + AOFF_task_ptrace], %l5
andcc %l5, 0x02, %g0 andcc %l5, 0x02, %g0
be 1f be 1f
nop nop
...@@ -1302,7 +1305,8 @@ C_LABEL(sys_sigsuspend): ...@@ -1302,7 +1305,8 @@ C_LABEL(sys_sigsuspend):
call C_LABEL(do_sigsuspend) call C_LABEL(do_sigsuspend)
add %sp, REGWIN_SZ, %o0 add %sp, REGWIN_SZ, %o0
ld [%curptr + AOFF_task_ptrace], %l5 ld [%curptr + TI_TASK], %l5
ld [%l5 + AOFF_task_ptrace], %l5
andcc %l5, 0x02, %g0 andcc %l5, 0x02, %g0
be 1f be 1f
nop nop
...@@ -1321,7 +1325,8 @@ C_LABEL(sys_rt_sigsuspend): ...@@ -1321,7 +1325,8 @@ C_LABEL(sys_rt_sigsuspend):
call C_LABEL(do_rt_sigsuspend) call C_LABEL(do_rt_sigsuspend)
add %sp, REGWIN_SZ, %o2 add %sp, REGWIN_SZ, %o2
ld [%curptr + AOFF_task_ptrace], %l5 ld [%curptr + TI_TASK], %l5
ld [%l5 + AOFF_task_ptrace], %l5
andcc %l5, 0x02, %g0 andcc %l5, 0x02, %g0
be 1f be 1f
nop nop
...@@ -1339,7 +1344,8 @@ C_LABEL(sys_sigreturn): ...@@ -1339,7 +1344,8 @@ C_LABEL(sys_sigreturn):
call C_LABEL(do_sigreturn) call C_LABEL(do_sigreturn)
add %sp, REGWIN_SZ, %o0 add %sp, REGWIN_SZ, %o0
ld [%curptr + AOFF_task_ptrace], %l5 ld [%curptr + TI_TASK], %l5
ld [%l5 + AOFF_task_ptrace], %l5
andcc %l5, 0x02, %g0 andcc %l5, 0x02, %g0
be 1f be 1f
nop nop
...@@ -1359,7 +1365,8 @@ C_LABEL(sys_rt_sigreturn): ...@@ -1359,7 +1365,8 @@ C_LABEL(sys_rt_sigreturn):
call C_LABEL(do_rt_sigreturn) call C_LABEL(do_rt_sigreturn)
add %sp, REGWIN_SZ, %o0 add %sp, REGWIN_SZ, %o0
ld [%curptr + AOFF_task_ptrace], %l5 ld [%curptr + TI_TASK], %l5
ld [%l5 + AOFF_task_ptrace], %l5
andcc %l5, 0x02, %g0 andcc %l5, 0x02, %g0
be 1f be 1f
nop nop
...@@ -1384,16 +1391,17 @@ C_LABEL(sys_fork): ...@@ -1384,16 +1391,17 @@ C_LABEL(sys_fork):
mov %o7, %l5 mov %o7, %l5
flush_patch_two: flush_patch_two:
FLUSH_ALL_KERNEL_WINDOWS; FLUSH_ALL_KERNEL_WINDOWS;
ld [%curptr + TI_TASK], %o4
rd %psr, %g4 rd %psr, %g4
WRITE_PAUSE WRITE_PAUSE
mov SIGCHLD, %o0 ! arg0: clone flags mov SIGCHLD, %o0 ! arg0: clone flags
rd %wim, %g5 rd %wim, %g5
WRITE_PAUSE WRITE_PAUSE
mov %fp, %o1 ! arg1: usp mov %fp, %o1 ! arg1: usp
std %g4, [%curptr + AOFF_task_thread + AOFF_thread_fork_kpsr] std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
add %sp, REGWIN_SZ, %o2 ! arg2: pt_regs ptr add %sp, REGWIN_SZ, %o2 ! arg2: pt_regs ptr
mov 0, %o3 mov 0, %o3
call C_LABEL(do_fork_FIXME_NOW_RETURNS_TASK_STRUCT) call C_LABEL(sparc_do_fork)
mov %l5, %o7 mov %l5, %o7
/* Whee, kernel threads! */ /* Whee, kernel threads! */
...@@ -1402,6 +1410,7 @@ C_LABEL(sys_clone): ...@@ -1402,6 +1410,7 @@ C_LABEL(sys_clone):
mov %o7, %l5 mov %o7, %l5
flush_patch_three: flush_patch_three:
FLUSH_ALL_KERNEL_WINDOWS; FLUSH_ALL_KERNEL_WINDOWS;
ld [%curptr + TI_TASK], %o4
rd %psr, %g4 rd %psr, %g4
WRITE_PAUSE WRITE_PAUSE
...@@ -1413,11 +1422,10 @@ flush_patch_three: ...@@ -1413,11 +1422,10 @@ flush_patch_three:
mov %fp, %o1 ! yes, use callers usp mov %fp, %o1 ! yes, use callers usp
andn %o1, 7, %o1 ! no, align to 8 bytes andn %o1, 7, %o1 ! no, align to 8 bytes
1: 1:
std %g4, [%curptr + AOFF_task_thread + AOFF_thread_fork_kpsr] std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
add %sp, REGWIN_SZ, %o2 ! arg2: pt_regs ptr add %sp, REGWIN_SZ, %o2 ! arg2: pt_regs ptr
mov 0, %o3 mov 0, %o3
/* FIXME: remove CLONE_IDLETASK from flags first */ call C_LABEL(sparc_do_fork)
call C_LABEL(do_fork_WITHOUT_CLONE_IDLETASK)
mov %l5, %o7 mov %l5, %o7
/* Whee, real vfork! */ /* Whee, real vfork! */
...@@ -1425,17 +1433,18 @@ flush_patch_three: ...@@ -1425,17 +1433,18 @@ flush_patch_three:
C_LABEL(sys_vfork): C_LABEL(sys_vfork):
flush_patch_four: flush_patch_four:
FLUSH_ALL_KERNEL_WINDOWS; FLUSH_ALL_KERNEL_WINDOWS;
ld [%curptr + TI_TASK], %o4
rd %psr, %g4 rd %psr, %g4
WRITE_PAUSE WRITE_PAUSE
rd %wim, %g5 rd %wim, %g5
WRITE_PAUSE WRITE_PAUSE
std %g4, [%curptr + AOFF_task_thread + AOFF_thread_fork_kpsr] std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
sethi %hi(0x4000 | 0x0100 | SIGCHLD), %o0 sethi %hi(0x4000 | 0x0100 | SIGCHLD), %o0
mov %fp, %o1 mov %fp, %o1
or %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0 or %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0
sethi %hi(C_LABEL(do_fork_FIXME_NOW_RETURNS_TASK_STRUCT)), %l1 sethi %hi(C_LABEL(sparc_do_fork)), %l1
mov 0, %o3 mov 0, %o3
jmpl %l1 + %lo(C_LABEL(do_fork_FIXME_NOW_RETURNS_TASK_STRUCT)), %g0 jmpl %l1 + %lo(C_LABEL(sparc_do_fork)), %g0
add %sp, REGWIN_SZ, %o2 add %sp, REGWIN_SZ, %o2
.align 4 .align 4
...@@ -1464,8 +1473,11 @@ linux_syscall_trace: ...@@ -1464,8 +1473,11 @@ linux_syscall_trace:
.globl C_LABEL(ret_from_fork) .globl C_LABEL(ret_from_fork)
C_LABEL(ret_from_fork): C_LABEL(ret_from_fork):
#if CONFIG_SMP || CONFIG_PREEMPT
/* XXX Wrong location: call schedule_tail in every ret_sys_call. */
call schedule_tail call schedule_tail
mov %g3, %o0 mov %g3, %o0
#endif
b C_LABEL(ret_sys_call) b C_LABEL(ret_sys_call)
ld [%sp + REGWIN_SZ + PT_I0], %o0 ld [%sp + REGWIN_SZ + PT_I0], %o0
...@@ -1492,7 +1504,8 @@ syscall_is_too_hard: ...@@ -1492,7 +1504,8 @@ syscall_is_too_hard:
mov %i1, %o1 mov %i1, %o1
mov %i2, %o2 mov %i2, %o2
ld [%curptr + AOFF_task_ptrace], %l5 ld [%curptr + TI_TASK], %l5
ld [%l5 + AOFF_task_ptrace], %l5
mov %i3, %o3 mov %i3, %o3
andcc %l5, 0x02, %g0 andcc %l5, 0x02, %g0
mov %i4, %o4 mov %i4, %o4
...@@ -1506,7 +1519,8 @@ syscall_is_too_hard: ...@@ -1506,7 +1519,8 @@ syscall_is_too_hard:
.globl C_LABEL(ret_sys_call) .globl C_LABEL(ret_sys_call)
C_LABEL(ret_sys_call): C_LABEL(ret_sys_call):
ld [%curptr + AOFF_task_ptrace], %l6 ld [%curptr + TI_TASK], %l6
ld [%l6 + AOFF_task_ptrace], %l6
cmp %o0, -ENOIOCTLCMD cmp %o0, -ENOIOCTLCMD
ld [%sp + REGWIN_SZ + PT_PSR], %g3 ld [%sp + REGWIN_SZ + PT_PSR], %g3
set PSR_C, %g2 set PSR_C, %g2
...@@ -1858,7 +1872,7 @@ kuw_patch1_7win: sll %o3, 6, %o3 ...@@ -1858,7 +1872,7 @@ kuw_patch1_7win: sll %o3, 6, %o3
* traps with the old method of just doing flush_user_windows(). * traps with the old method of just doing flush_user_windows().
*/ */
C_LABEL(kill_user_windows): C_LABEL(kill_user_windows):
ld [%g6 + AOFF_task_thread + AOFF_thread_uwinmask], %o0 ! get current umask ld [%g6 + TI_UWINMASK], %o0 ! get current umask
orcc %g0, %o0, %g0 ! if no bits set, we are done orcc %g0, %o0, %g0 ! if no bits set, we are done
be 3f ! nothing to do be 3f ! nothing to do
rd %psr, %o5 ! must clear interrupts rd %psr, %o5 ! must clear interrupts
...@@ -1866,7 +1880,7 @@ C_LABEL(kill_user_windows): ...@@ -1866,7 +1880,7 @@ C_LABEL(kill_user_windows):
wr %o4, 0x0, %psr ! the uwinmask state wr %o4, 0x0, %psr ! the uwinmask state
WRITE_PAUSE ! burn them cycles WRITE_PAUSE ! burn them cycles
1: 1:
ld [%g6 + AOFF_task_thread + AOFF_thread_uwinmask], %o0 ! get consistant state ld [%g6 + TI_UWINMASK], %o0 ! get consistant state
orcc %g0, %o0, %g0 ! did an interrupt come in? orcc %g0, %o0, %g0 ! did an interrupt come in?
be 4f ! yep, we are done be 4f ! yep, we are done
rd %wim, %o3 ! get current wim rd %wim, %o3 ! get current wim
...@@ -1878,13 +1892,14 @@ kuw_patch1: ...@@ -1878,13 +1892,14 @@ kuw_patch1:
bne kuw_patch1 ! not done yet bne kuw_patch1 ! not done yet
srl %o3, 1, %o4 ! begin another save simulation srl %o3, 1, %o4 ! begin another save simulation
wr %o3, 0x0, %wim ! set the new wim wr %o3, 0x0, %wim ! set the new wim
st %g0, [%g6 + AOFF_task_thread + AOFF_thread_uwinmask] ! clear uwinmask st %g0, [%g6 + TI_UWINMASK] ! clear uwinmask
4: 4:
wr %o5, 0x0, %psr ! re-enable interrupts wr %o5, 0x0, %psr ! re-enable interrupts
WRITE_PAUSE ! burn baby burn WRITE_PAUSE ! burn baby burn
3: 3:
ld [%g6 + TI_TASK], %o4
retl ! return retl ! return
st %g0, [%g6 + AOFF_task_thread + AOFF_thread_w_saved] ! no windows saved st %g0, [%o4 + AOFF_task_thread + AOFF_thread_w_saved] ! no windows saved
.align 4 .align 4
.globl C_LABEL(restore_current) .globl C_LABEL(restore_current)
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/winmacro.h> #include <asm/winmacro.h>
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
#include <asm/thread_info.h>
/* Registers to not touch at all. */ /* Registers to not touch at all. */
#define t_psr l0 /* Set by caller */ #define t_psr l0 /* Set by caller */
...@@ -101,7 +102,7 @@ trap_setup: ...@@ -101,7 +102,7 @@ trap_setup:
mov %t_kstack, %sp ! jump onto new stack mov %t_kstack, %sp ! jump onto new stack
trap_setup_kernel_spill: trap_setup_kernel_spill:
ld [%curptr + AOFF_task_thread + AOFF_thread_uwinmask], %g1 ld [%curptr + TI_UWINMASK], %g1
orcc %g0, %g1, %g0 orcc %g0, %g1, %g0
bne trap_setup_user_spill ! there are some user windows, yuck bne trap_setup_user_spill ! there are some user windows, yuck
/* Spill from kernel, but only kernel windows, adjust /* Spill from kernel, but only kernel windows, adjust
...@@ -127,7 +128,8 @@ tsetup_patch2: ...@@ -127,7 +128,8 @@ tsetup_patch2:
jmpl %t_retpc + 0x8, %g0 ! return to caller jmpl %t_retpc + 0x8, %g0 ! return to caller
mov %t_kstack, %sp ! and onto new kernel stack mov %t_kstack, %sp ! and onto new kernel stack
#define STACK_OFFSET (TASK_UNION_SIZE - (TRACEREG_SZ + REGWIN_SZ)) #define STACK_OFFSET (THREAD_SIZE - (TRACEREG_SZ + REGWIN_SZ))
trap_setup_from_user: trap_setup_from_user:
/* We can't use %curptr yet. */ /* We can't use %curptr yet. */
LOAD_CURRENT(t_kstack, t_twinmask) LOAD_CURRENT(t_kstack, t_twinmask)
...@@ -143,18 +145,19 @@ trap_setup_from_user: ...@@ -143,18 +145,19 @@ trap_setup_from_user:
STORE_PT_ALL(t_kstack, t_psr, t_pc, t_npc, g2) STORE_PT_ALL(t_kstack, t_psr, t_pc, t_npc, g2)
#if 0 #if 0
/* If we're sure every task_struct is TASK_UNION_SIZE aligned, /* If we're sure every task_struct is THREAD_SIZE aligned,
we can speed this up. */ we can speed this up. */
sethi %hi(STACK_OFFSET), %curptr sethi %hi(STACK_OFFSET), %curptr
or %curptr, %lo(STACK_OFFSET), %curptr or %curptr, %lo(STACK_OFFSET), %curptr
sub %t_kstack, %curptr, %curptr sub %t_kstack, %curptr, %curptr
#else #else
sethi %hi(~(TASK_UNION_SIZE - 1)), %curptr sethi %hi(~(THREAD_SIZE - 1)), %curptr
and %t_kstack, %curptr, %curptr and %t_kstack, %curptr, %curptr
#endif #endif
/* Clear current->thread.w_saved */ /* Clear current->thread.w_saved */
st %g0, [%curptr + AOFF_task_thread + AOFF_thread_w_saved] ld [%curptr + TI_TASK], %g2
st %g0, [%g2 + AOFF_task_thread + AOFF_thread_w_saved]
/* See if we are in the trap window. */ /* See if we are in the trap window. */
andcc %t_twinmask, %t_wim, %g0 andcc %t_twinmask, %t_wim, %g0
...@@ -185,7 +188,7 @@ trap_setup_from_user: ...@@ -185,7 +188,7 @@ trap_setup_from_user:
andn %g2, %t_twinmask, %g2 andn %g2, %t_twinmask, %g2
tsetup_patch3: tsetup_patch3:
and %g2, 0xff, %g2 ! patched on 7win Sparcs and %g2, 0xff, %g2 ! patched on 7win Sparcs
st %g2, [%curptr + AOFF_task_thread + AOFF_thread_uwinmask] ! store new umask st %g2, [%curptr + TI_UWINMASK] ! store new umask
jmpl %t_retpc + 0x8, %g0 ! return to caller jmpl %t_retpc + 0x8, %g0 ! return to caller
mov %t_kstack, %sp ! and onto kernel stack mov %t_kstack, %sp ! and onto kernel stack
...@@ -206,7 +209,7 @@ tsetup_patch5: ...@@ -206,7 +209,7 @@ tsetup_patch5:
tsetup_patch6: tsetup_patch6:
and %g2, 0xff, %g2 ! patched on 7win Sparcs and %g2, 0xff, %g2 ! patched on 7win Sparcs
andn %g1, %g2, %g1 ! clear this bit in %g1 andn %g1, %g2, %g1 ! clear this bit in %g1
st %g1, [%curptr + AOFF_task_thread + AOFF_thread_uwinmask] st %g1, [%curptr + TI_UWINMASK]
save %g0, %g0, %g0 save %g0, %g0, %g0
...@@ -288,7 +291,8 @@ trap_setup_user_stack_is_bolixed: ...@@ -288,7 +291,8 @@ trap_setup_user_stack_is_bolixed:
/* From user/kernel into invalid window w/bad user /* From user/kernel into invalid window w/bad user
* stack. Save bad user stack, and return to caller. * stack. Save bad user stack, and return to caller.
*/ */
SAVE_BOLIXED_USER_STACK(curptr, g3) ld [%curptr + TI_TASK], %glob_tmp
SAVE_BOLIXED_USER_STACK(glob_tmp, g3)
restore %g0, %g0, %g0 restore %g0, %g0, %g0
jmpl %t_retpc + 0x8, %g0 jmpl %t_retpc + 0x8, %g0
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/kdebug.h> #include <asm/kdebug.h>
#include <asm/winmacro.h> #include <asm/winmacro.h>
#include <asm/thread_info.h> /* TI_UWINMASK */
#include <asm/errno.h> #include <asm/errno.h>
.data .data
...@@ -749,7 +750,7 @@ go_to_highmem: ...@@ -749,7 +750,7 @@ go_to_highmem:
jmpl %g1, %g0 jmpl %g1, %g0
nop nop
/* This is to align init_task_union properly, be careful. -DaveM */ /* This is to align init_thread_union properly, be careful. -DaveM */
.align 8192 .align 8192
/* The code above should be at beginning and we have to take care about /* The code above should be at beginning and we have to take care about
...@@ -1010,8 +1011,8 @@ sun4c_continue_boot: ...@@ -1010,8 +1011,8 @@ sun4c_continue_boot:
WRITE_PAUSE WRITE_PAUSE
/* I want a kernel stack NOW! */ /* I want a kernel stack NOW! */
set C_LABEL(init_task_union), %g1 set C_LABEL(init_thread_union), %g1
set (TASK_UNION_SIZE - REGWIN_SZ), %g2 set (THREAD_SIZE - REGWIN_SZ), %g2
add %g1, %g2, %sp add %g1, %g2, %sp
mov 0, %fp /* And for good luck */ mov 0, %fp /* And for good luck */
...@@ -1025,10 +1026,10 @@ sun4c_continue_boot: ...@@ -1025,10 +1026,10 @@ sun4c_continue_boot:
bl 1b bl 1b
add %o0, 0x1, %o0 add %o0, 0x1, %o0
/* Initialize the umask value for init_task just in case. /* Initialize the uwinmask value for init task just in case.
* But first make current_set[boot_cpu_id] point to something useful. * But first make current_set[boot_cpu_id] point to something useful.
*/ */
set C_LABEL(init_task_union), %g6 set C_LABEL(init_thread_union), %g6
set C_LABEL(current_set), %g2 set C_LABEL(current_set), %g2
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
sethi %hi(C_LABEL(boot_cpu_id4)), %g3 sethi %hi(C_LABEL(boot_cpu_id4)), %g3
...@@ -1038,7 +1039,7 @@ sun4c_continue_boot: ...@@ -1038,7 +1039,7 @@ sun4c_continue_boot:
#endif #endif
st %g6, [%g2] st %g6, [%g2]
st %g0, [%g6 + AOFF_task_thread + AOFF_thread_uwinmask] st %g0, [%g6 + TI_UWINMASK]
/* Compute NWINDOWS and stash it away. Now uses %wim trick explained /* Compute NWINDOWS and stash it away. Now uses %wim trick explained
* in the V8 manual. Ok, this method seems to work, Sparc is cool... * in the V8 manual. Ok, this method seems to work, Sparc is cool...
......
...@@ -9,12 +9,12 @@ static struct fs_struct init_fs = INIT_FS; ...@@ -9,12 +9,12 @@ static struct fs_struct init_fs = INIT_FS;
static struct files_struct init_files = INIT_FILES; static struct files_struct init_files = INIT_FILES;
static struct signal_struct init_signals = INIT_SIGNALS; static struct signal_struct init_signals = INIT_SIGNALS;
struct mm_struct init_mm = INIT_MM(init_mm); struct mm_struct init_mm = INIT_MM(init_mm);
struct task_struct init_task = INIT_TASK(init_task);
/* .text section in head.S is aligned at 8k boundry and this gets linked /* .text section in head.S is aligned at 8k boundry and this gets linked
* right after that so that the init_task_union is aligned properly as well. * right after that so that the init_thread_union is aligned properly as well.
* If this is not aligned on a 8k boundry, then you should change code * If this is not aligned on a 8k boundry, then you should change code
* in etrap.S which assumes it. * in etrap.S which assumes it.
*/ */
__asm__(".section \".text\",#alloc\n"); __asm__(".section \".text\",#alloc\n");
union task_union init_task_union = union thread_union init_thread_union = { INIT_THREAD_INFO(init_task) };
{ INIT_TASK(init_task_union.task) };
...@@ -530,7 +530,7 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba) ...@@ -530,7 +530,7 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba)
} }
} }
*pba = virt_to_bus(va); *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
return (void *) res->start; return (void *) res->start;
} }
...@@ -565,7 +565,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba) ...@@ -565,7 +565,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba)
return; return;
} }
pgp = (unsigned long) bus_to_virt(ba); pgp = (unsigned long) phys_to_virt(ba); /* bus_to_virt actually */
mmu_inval_dma_area(pgp, n); mmu_inval_dma_area(pgp, n);
{ {
int x; int x;
...@@ -592,7 +592,7 @@ dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, ...@@ -592,7 +592,7 @@ dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
if (direction == PCI_DMA_NONE) if (direction == PCI_DMA_NONE)
BUG(); BUG();
/* IIep is write-through, not flushing. */ /* IIep is write-through, not flushing. */
return virt_to_bus(ptr); return virt_to_phys(ptr);
} }
/* Unmap a single streaming mode DMA translation. The dma_addr and size /* Unmap a single streaming mode DMA translation. The dma_addr and size
...@@ -608,7 +608,7 @@ void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size, ...@@ -608,7 +608,7 @@ void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size,
if (direction == PCI_DMA_NONE) if (direction == PCI_DMA_NONE)
BUG(); BUG();
if (direction != PCI_DMA_TODEVICE) { if (direction != PCI_DMA_TODEVICE) {
mmu_inval_dma_area((unsigned long)bus_to_virt(ba), mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
(size + PAGE_SIZE-1) & PAGE_MASK); (size + PAGE_SIZE-1) & PAGE_MASK);
} }
} }
...@@ -637,7 +637,8 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, ...@@ -637,7 +637,8 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
BUG(); BUG();
/* IIep is write-through, not flushing. */ /* IIep is write-through, not flushing. */
for (n = 0; n < nents; n++) { for (n = 0; n < nents; n++) {
sg->dvma_address = virt_to_bus(sg->address); if (page_address(sg->page) == NULL) BUG();
sg->dvma_address = virt_to_phys(page_address(sg->page));
sg->dvma_length = sg->length; sg->dvma_length = sg->length;
sg++; sg++;
} }
...@@ -657,7 +658,9 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, ...@@ -657,7 +658,9 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
BUG(); BUG();
if (direction != PCI_DMA_TODEVICE) { if (direction != PCI_DMA_TODEVICE) {
for (n = 0; n < nents; n++) { for (n = 0; n < nents; n++) {
mmu_inval_dma_area((unsigned long)sg->address, if (page_address(sg->page) == NULL) BUG();
mmu_inval_dma_area(
(unsigned long) page_address(sg->page),
(sg->length + PAGE_SIZE-1) & PAGE_MASK); (sg->length + PAGE_SIZE-1) & PAGE_MASK);
sg++; sg++;
} }
...@@ -678,7 +681,7 @@ void pci_dma_sync_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int ...@@ -678,7 +681,7 @@ void pci_dma_sync_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int
if (direction == PCI_DMA_NONE) if (direction == PCI_DMA_NONE)
BUG(); BUG();
if (direction != PCI_DMA_TODEVICE) { if (direction != PCI_DMA_TODEVICE) {
mmu_inval_dma_area((unsigned long)bus_to_virt(ba), mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
(size + PAGE_SIZE-1) & PAGE_MASK); (size + PAGE_SIZE-1) & PAGE_MASK);
} }
} }
...@@ -697,7 +700,9 @@ void pci_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, i ...@@ -697,7 +700,9 @@ void pci_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, i
BUG(); BUG();
if (direction != PCI_DMA_TODEVICE) { if (direction != PCI_DMA_TODEVICE) {
for (n = 0; n < nents; n++) { for (n = 0; n < nents; n++) {
mmu_inval_dma_area((unsigned long)sg->address, if (page_address(sg->page) == NULL) BUG();
mmu_inval_dma_area(
(unsigned long) page_address(sg->page),
(sg->length + PAGE_SIZE-1) & PAGE_MASK); (sg->length + PAGE_SIZE-1) & PAGE_MASK);
sg++; sg++;
} }
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* *
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx) * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
* Copyright (C) 1995 Pete A. Zaitcev (zaitcev@yahoo.com) * Copyright (C) 1995,2002 Pete A. Zaitcev (zaitcev@yahoo.com)
* Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk) * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
* Copyright (C) 1998-2000 Anton Blanchard (anton@samba.org) * Copyright (C) 1998-2000 Anton Blanchard (anton@samba.org)
*/ */
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include <asm/hardirq.h> #include <asm/hardirq.h>
#include <asm/softirq.h> #include <asm/softirq.h>
#include <asm/pcic.h> #include <asm/pcic.h>
#include <asm/cacheflush.h>
/* /*
* Dave Redman (djhr@tadpole.co.uk) * Dave Redman (djhr@tadpole.co.uk)
...@@ -119,9 +120,11 @@ int show_interrupts(struct seq_file *p, void *v) ...@@ -119,9 +120,11 @@ int show_interrupts(struct seq_file *p, void *v)
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
seq_printf(p, "%10u ", kstat_irqs(i)); seq_printf(p, "%10u ", kstat_irqs(i));
#else #else
for (j = 0; j < smp_num_cpus; j++) for (j = 0; j < NR_CPUS; j++) {
if (cpu_online(j))
seq_printf(p, "%10u ", seq_printf(p, "%10u ",
kstat.irqs[cpu_logical_map(j)][i]); kstat.irqs[cpu_logical_map(j)][i]);
}
#endif #endif
seq_printf(p, " %c %s", seq_printf(p, " %c %s",
(action->flags & SA_INTERRUPT) ? '+' : ' ', (action->flags & SA_INTERRUPT) ? '+' : ' ',
...@@ -214,12 +217,16 @@ static void show(char * str) ...@@ -214,12 +217,16 @@ static void show(char * str)
printk("\n%s, CPU %d:\n", str, cpu); printk("\n%s, CPU %d:\n", str, cpu);
printk("irq: %d [ ", irqs_running()); printk("irq: %d [ ", irqs_running());
for (i = 0; i < smp_num_cpus; i++) for (i = 0; i < NR_CPUS; i++) {
if (cpu_online(i))
printk("%u ", __brlock_array[i][BR_GLOBALIRQ_LOCK]); printk("%u ", __brlock_array[i][BR_GLOBALIRQ_LOCK]);
}
printk("]\nbh: %d [ ", printk("]\nbh: %d [ ",
(spin_is_locked(&global_bh_lock) ? 1 : 0)); (spin_is_locked(&global_bh_lock) ? 1 : 0));
for (i = 0; i < smp_num_cpus; i++) for (i = 0; i < NR_CPUS; i++) {
if (cpu_online(i))
printk("%u ", local_bh_count(i)); printk("%u ", local_bh_count(i));
}
printk("]\n"); printk("]\n");
#ifdef VERBOSE_DEBUG_IRQLOCK #ifdef VERBOSE_DEBUG_IRQLOCK
......
...@@ -57,7 +57,14 @@ void (*pm_power_off)(void); ...@@ -57,7 +57,14 @@ void (*pm_power_off)(void);
extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *); extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *);
struct task_struct *last_task_used_math = NULL; struct task_struct *last_task_used_math = NULL;
struct task_struct *current_set[NR_CPUS] = {&init_task, }; struct thread_info *current_set[NR_CPUS];
/*
* default_idle is new in 2.5. XXX Review, currently stolen from sparc64.
*/
void default_idle(void)
{
}
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
...@@ -106,8 +113,8 @@ int cpu_idle(void) ...@@ -106,8 +113,8 @@ int cpu_idle(void)
restore_flags(flags); restore_flags(flags);
} }
while((!current->need_resched) && pm_idle) { while((!need_resched()) && pm_idle) {
(*pm_idle)(); (*pm_idle)(); /* XXX Huh? On sparc?! */
} }
schedule(); schedule();
...@@ -306,7 +313,7 @@ void show_trace_task(struct task_struct *tsk) ...@@ -306,7 +313,7 @@ void show_trace_task(struct task_struct *tsk)
if (!tsk) if (!tsk)
return; return;
fp = tsk->thread.ksp; fp = tsk->thread_info->ksp;
do { do {
/* Bogus frame pointer? */ /* Bogus frame pointer? */
if (fp < (task_base + sizeof(struct task_struct)) || if (fp < (task_base + sizeof(struct task_struct)) ||
...@@ -320,6 +327,14 @@ void show_trace_task(struct task_struct *tsk) ...@@ -320,6 +327,14 @@ void show_trace_task(struct task_struct *tsk)
printk("\n"); printk("\n");
} }
/*
* Note: sparc64 has a pretty intricated thread_saved_pc, check it out.
*/
unsigned long thread_saved_pc(struct task_struct *tsk)
{
return tsk->thread_info->kpc;
}
/* /*
* Free current thread data structures etc.. * Free current thread data structures etc..
*/ */
...@@ -372,7 +387,7 @@ void flush_thread(void) ...@@ -372,7 +387,7 @@ void flush_thread(void)
/* We must fixup kregs as well. */ /* We must fixup kregs as well. */
current->thread.kregs = (struct pt_regs *) current->thread.kregs = (struct pt_regs *)
(((unsigned long)current) + (((unsigned long)current) +
(TASK_UNION_SIZE - TRACEREG_SZ)); (THREAD_SIZE - TRACEREG_SZ));
} }
} }
...@@ -445,6 +460,22 @@ clone_stackframe(struct sparc_stackf *dst, struct sparc_stackf *src) ...@@ -445,6 +460,22 @@ clone_stackframe(struct sparc_stackf *dst, struct sparc_stackf *src)
return sp; return sp;
} }
asmlinkage int sparc_do_fork(unsigned long clone_flags,
unsigned long stack_start,
struct pt_regs *regs,
unsigned long stack_size)
{
struct task_struct *p;
/* XXX This was spelled in DaveM's will and testament. Why? */
if (clone_flags & CLONE_IDLETASK) {
printk(KERN_DEBUG "Userland clone with CLONE_IDLETASK\n");
clone_flags &= ~CLONE_IDLETASK;
}
p = do_fork(clone_flags, stack_start, regs, stack_size);
return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
/* Copy a Sparc thread. The fork() return value conventions /* Copy a Sparc thread. The fork() return value conventions
* under SunOS are nothing short of bletcherous: * under SunOS are nothing short of bletcherous:
...@@ -457,6 +488,7 @@ clone_stackframe(struct sparc_stackf *dst, struct sparc_stackf *src) ...@@ -457,6 +488,7 @@ clone_stackframe(struct sparc_stackf *dst, struct sparc_stackf *src)
* if the parent should sleep while trying to * if the parent should sleep while trying to
* allocate the task_struct and kernel stack in * allocate the task_struct and kernel stack in
* do_fork(). * do_fork().
* XXX See comment above sys_vfork in sparc64. todo.
*/ */
extern void ret_from_fork(void); extern void ret_from_fork(void);
...@@ -464,6 +496,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, ...@@ -464,6 +496,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
unsigned long unused, unsigned long unused,
struct task_struct *p, struct pt_regs *regs) struct task_struct *p, struct pt_regs *regs)
{ {
struct thread_info *ti = p->thread_info;
struct pt_regs *childregs; struct pt_regs *childregs;
struct reg_window *new_stack; struct reg_window *new_stack;
unsigned long stack_offset; unsigned long stack_offset;
...@@ -482,19 +515,19 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, ...@@ -482,19 +515,19 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
} }
/* Calculate offset to stack_frame & pt_regs */ /* Calculate offset to stack_frame & pt_regs */
stack_offset = TASK_UNION_SIZE - TRACEREG_SZ; stack_offset = THREAD_SIZE - TRACEREG_SZ;
if(regs->psr & PSR_PS) if(regs->psr & PSR_PS)
stack_offset -= REGWIN_SZ; stack_offset -= REGWIN_SZ;
childregs = ((struct pt_regs *) (((unsigned long)p) + stack_offset)); childregs = ((struct pt_regs *) (((unsigned long)ti) + stack_offset));
copy_regs(childregs, regs); copy_regs(childregs, regs);
new_stack = (((struct reg_window *) childregs) - 1); new_stack = (((struct reg_window *) childregs) - 1);
copy_regwin(new_stack, (((struct reg_window *) regs) - 1)); copy_regwin(new_stack, (((struct reg_window *) regs) - 1));
p->thread.ksp = (unsigned long) new_stack; ti->ksp = (unsigned long) new_stack;
p->thread.kpc = (((unsigned long) ret_from_fork) - 0x8); ti->kpc = (((unsigned long) ret_from_fork) - 0x8);
p->thread.kpsr = current->thread.fork_kpsr; ti->kpsr = current->thread.fork_kpsr;
p->thread.kwim = current->thread.fork_kwim; ti->kwim = current->thread.fork_kwim;
/* This is used for sun4c only */ /* This is used for sun4c only */
atomic_set(&p->thread.refcount, 1); atomic_set(&p->thread.refcount, 1);
...@@ -504,16 +537,14 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, ...@@ -504,16 +537,14 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
p->thread.kregs = &fake_swapper_regs; p->thread.kregs = &fake_swapper_regs;
new_stack = (struct reg_window *) new_stack = (struct reg_window *)
((((unsigned long)p) + ((((unsigned long)ti) + (THREAD_SIZE)) - REGWIN_SZ);
(TASK_UNION_SIZE)) -
(REGWIN_SZ));
childregs->u_regs[UREG_FP] = (unsigned long) new_stack; childregs->u_regs[UREG_FP] = (unsigned long) new_stack;
p->thread.flags |= SPARC_FLAG_KTHREAD; p->thread.flags |= SPARC_FLAG_KTHREAD;
p->thread.current_ds = KERNEL_DS; p->thread.current_ds = KERNEL_DS;
memcpy((void *)new_stack, memcpy((void *)new_stack,
(void *)regs->u_regs[UREG_FP], (void *)regs->u_regs[UREG_FP],
sizeof(struct reg_window)); sizeof(struct reg_window));
childregs->u_regs[UREG_G6] = (unsigned long) p; childregs->u_regs[UREG_G6] = (unsigned long) ti;
} else { } else {
p->thread.kregs = childregs; p->thread.kregs = childregs;
childregs->u_regs[UREG_FP] = sp; childregs->u_regs[UREG_FP] = sp;
......
...@@ -92,16 +92,16 @@ static inline void read_sunos_user(struct pt_regs *regs, unsigned long offset, ...@@ -92,16 +92,16 @@ static inline void read_sunos_user(struct pt_regs *regs, unsigned long offset,
} }
switch(offset) { switch(offset) {
case 0: case 0:
v = t->ksp; v = tsk->thread_info->ksp;
break; break;
case 4: case 4:
v = t->kpc; v = tsk->thread_info->kpc;
break; break;
case 8: case 8:
v = t->kpsr; v = tsk->thread_info->kpsr;
break; break;
case 12: case 12:
v = t->uwinmask; v = tsk->thread_info->uwinmask;
break; break;
case 832: case 832:
v = t->w_saved; v = t->w_saved;
...@@ -530,9 +530,9 @@ asmlinkage void do_ptrace(struct pt_regs *regs) ...@@ -530,9 +530,9 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
} }
if (request == PTRACE_SYSCALL) if (request == PTRACE_SYSCALL)
child->ptrace |= PT_TRACESYS; set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
else else
child->ptrace &= ~PT_TRACESYS; clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
child->exit_code = data; child->exit_code = data;
#ifdef DEBUG_PTRACE #ifdef DEBUG_PTRACE
...@@ -581,7 +581,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs) ...@@ -581,7 +581,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
} }
out_tsk: out_tsk:
if (child) if (child)
free_task_struct(child); put_task_struct(child);
out: out:
unlock_kernel(); unlock_kernel();
} }
...@@ -591,8 +591,9 @@ asmlinkage void syscall_trace(void) ...@@ -591,8 +591,9 @@ asmlinkage void syscall_trace(void)
#ifdef DEBUG_PTRACE #ifdef DEBUG_PTRACE
printk("%s [%d]: syscall_trace\n", current->comm, current->pid); printk("%s [%d]: syscall_trace\n", current->comm, current->pid);
#endif #endif
if ((current->ptrace & (PT_PTRACED|PT_TRACESYS)) if (!test_thread_flag(TIF_SYSCALL_TRACE))
!= (PT_PTRACED|PT_TRACESYS)) return;
if (!(current->ptrace & PT_PTRACED))
return; return;
current->exit_code = SIGTRAP; current->exit_code = SIGTRAP;
current->state = TASK_STOPPED; current->state = TASK_STOPPED;
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <asm/contregs.h> #include <asm/contregs.h>
#include <asm/winmacro.h> #include <asm/winmacro.h>
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
#include <asm/thread_info.h>
#define t_psr l0 #define t_psr l0
#define t_pc l1 #define t_pc l1
...@@ -58,17 +59,17 @@ C_LABEL(ret_trap_lockless_ipi): ...@@ -58,17 +59,17 @@ C_LABEL(ret_trap_lockless_ipi):
nop nop
1: 1:
#error ld [%curptr + AOFF_task_need_resched], %g2 ld [%curptr + TI_FLAGS], %g2
orcc %g2, %g0, %g0 andcc %g2, (_TIF_NEED_RESCHED), %g0
be signal_p be signal_p
#error ld [%curptr + AOFF_task_sigpending], %g2 nop
call C_LABEL(schedule) call C_LABEL(schedule)
nop nop
#error ld [%curptr + AOFF_task_sigpending], %g2 ld [%curptr + TI_FLAGS], %g2
signal_p: signal_p:
cmp %g2, 0 andcc %g2, (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING), %g0
bz,a ret_trap_continue bz,a ret_trap_continue
ld [%sp + REGWIN_SZ + PT_PSR], %t_psr ld [%sp + REGWIN_SZ + PT_PSR], %t_psr
...@@ -98,7 +99,7 @@ ret_trap_continue: ...@@ -98,7 +99,7 @@ ret_trap_continue:
add %sp, REGWIN_SZ, %o0 add %sp, REGWIN_SZ, %o0
b signal_p b signal_p
#error ld [%curptr + AOFF_task_sigpending], %g2 ld [%curptr + TI_FLAGS], %g2
ret_trap_nobufwins: ret_trap_nobufwins:
/* Load up the user's out registers so we can pull /* Load up the user's out registers so we can pull
...@@ -109,7 +110,7 @@ ret_trap_nobufwins: ...@@ -109,7 +110,7 @@ ret_trap_nobufwins:
/* If there are already live user windows in the /* If there are already live user windows in the
* set we can return from trap safely. * set we can return from trap safely.
*/ */
ld [%curptr + AOFF_task_thread + AOFF_thread_uwinmask], %twin_tmp1 ld [%curptr + TI_UWINMASK], %twin_tmp1
orcc %g0, %twin_tmp1, %g0 orcc %g0, %twin_tmp1, %g0
bne ret_trap_userwins_ok bne ret_trap_userwins_ok
nop nop
...@@ -168,7 +169,7 @@ ret_trap_unaligned_pc: ...@@ -168,7 +169,7 @@ ret_trap_unaligned_pc:
nop nop
b signal_p b signal_p
#error ld [%curptr + AOFF_task_sigpending], %g2 ld [%curptr + TI_FLAGS], %g2
ret_trap_kernel: ret_trap_kernel:
/* Will the rett land us in the invalid window? */ /* Will the rett land us in the invalid window? */
...@@ -218,7 +219,8 @@ ret_trap_user_stack_is_bolixed: ...@@ -218,7 +219,8 @@ ret_trap_user_stack_is_bolixed:
add %sp, REGWIN_SZ, %o0 add %sp, REGWIN_SZ, %o0
b signal_p b signal_p
#error ld [%curptr + AOFF_task_sigpending], %g2 ld [%curptr + TI_FLAGS], %g2
.globl C_LABEL(sun4c_rett_stackchk) .globl C_LABEL(sun4c_rett_stackchk)
C_LABEL(sun4c_rett_stackchk): C_LABEL(sun4c_rett_stackchk):
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/winmacro.h> #include <asm/winmacro.h>
#include <asm/thread_info.h>
#include <asm/psr.h> #include <asm/psr.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -40,6 +41,7 @@ LABEL(sunosnop): ...@@ -40,6 +41,7 @@ LABEL(sunosnop):
.globl LABEL(sunosgetpid) .globl LABEL(sunosgetpid)
LABEL(sunosgetpid): LABEL(sunosgetpid):
LOAD_CURRENT(l4, l5) LOAD_CURRENT(l4, l5)
ld [%l4 + TI_TASK], %l4
ld [%l4 + AOFF_task_pid], %i0 ld [%l4 + AOFF_task_pid], %i0
ld [%l4 + AOFF_task_p_opptr], %l5 ld [%l4 + AOFF_task_p_opptr], %l5
ld [%l5 + AOFF_task_pid], %i1 ld [%l5 + AOFF_task_pid], %i1
...@@ -50,6 +52,7 @@ LABEL(sunosgetpid): ...@@ -50,6 +52,7 @@ LABEL(sunosgetpid):
.globl LABEL(sunosgetuid) .globl LABEL(sunosgetuid)
LABEL(sunosgetuid): LABEL(sunosgetuid):
LOAD_CURRENT(l4, l5) LOAD_CURRENT(l4, l5)
ld [%l4 + TI_TASK], %l4
lduh [%l4 + AOFF_task_uid], %i0 lduh [%l4 + AOFF_task_uid], %i0
lduh [%l4 + AOFF_task_euid], %i1 lduh [%l4 + AOFF_task_euid], %i1
CC_AND_RETT CC_AND_RETT
...@@ -59,6 +62,7 @@ LABEL(sunosgetuid): ...@@ -59,6 +62,7 @@ LABEL(sunosgetuid):
.globl LABEL(sunosgetgid) .globl LABEL(sunosgetgid)
LABEL(sunosgetgid): LABEL(sunosgetgid):
LOAD_CURRENT(l4, l5) LOAD_CURRENT(l4, l5)
ld [%l4 + TI_TASK], %l4
lduh [%l4 + AOFF_task_gid], %i0 lduh [%l4 + AOFF_task_gid], %i0
lduh [%l4 + AOFF_task_egid], %i1 lduh [%l4 + AOFF_task_egid], %i1
CC_AND_RETT CC_AND_RETT
...@@ -77,6 +81,7 @@ LABEL(sunosgdtsize): ...@@ -77,6 +81,7 @@ LABEL(sunosgdtsize):
.globl LABEL(sunossblock) .globl LABEL(sunossblock)
LABEL(sunossblock): LABEL(sunossblock):
LOAD_CURRENT(l4, l5) LOAD_CURRENT(l4, l5)
ld [%l4 + TI_TASK], %l4
set -65793, %l5 set -65793, %l5
and %i0, %l5, %l5 and %i0, %l5, %l5
ld [%l4 + AOFF_task_blocked], %i0 ld [%l4 + AOFF_task_blocked], %i0
...@@ -87,6 +92,7 @@ LABEL(sunossblock): ...@@ -87,6 +92,7 @@ LABEL(sunossblock):
.globl LABEL(sunossmask) .globl LABEL(sunossmask)
LABEL(sunossmask): LABEL(sunossmask):
LOAD_CURRENT(l4, l5) LOAD_CURRENT(l4, l5)
ld [%l4 + TI_TASK], %l4
set -65793, %l5 set -65793, %l5
and %i0, %l5, %l5 and %i0, %l5, %l5
ld [%l4 + AOFF_task_blocked], %i0 ld [%l4 + AOFF_task_blocked], %i0
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
/* sparc32 semaphore implementation, based on i386 version */ /* sparc32 semaphore implementation, based on i386 version */
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/errno.h>
#include <asm/semaphore.h> #include <asm/semaphore.h>
......
...@@ -481,7 +481,7 @@ static int show_cpuinfo(struct seq_file *m, void *__unused) ...@@ -481,7 +481,7 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
(short) romvec->pv_printrev, (short) romvec->pv_printrev,
&cputypval, &cputypval,
linux_num_cpus, linux_num_cpus,
smp_num_cpus num_online_cpus()
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
, loops_per_jiffy/(500000/HZ), , loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ)) % 100 (loops_per_jiffy/(5000/HZ)) % 100
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/tty.h> #include <linux/tty.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <linux/binfmts.h> /* do_coredum */
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/bitops.h> #include <asm/bitops.h>
...@@ -26,6 +27,7 @@ ...@@ -26,6 +27,7 @@
#include <asm/svr4.h> #include <asm/svr4.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/cacheflush.h> /* flush_sig_insns */
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
...@@ -34,7 +36,7 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr, ...@@ -34,7 +36,7 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
extern void fpload(unsigned long *fpregs, unsigned long *fsr); extern void fpload(unsigned long *fpregs, unsigned long *fsr);
asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs, asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs,
unsigned long orig_o0, int ret_from_syscall); unsigned long orig_o0, int restart_syscall);
/* This turned off for production... */ /* This turned off for production... */
/* #define DEBUG_SIGNALS 1 */ /* #define DEBUG_SIGNALS 1 */
...@@ -229,7 +231,7 @@ restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t *fpu) ...@@ -229,7 +231,7 @@ restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t *fpu)
{ {
int err; int err;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (current->flags & PF_USEDFPU) if (test_tsk_thread_flag(current, TIF_USEDFPU))
regs->psr &= ~PSR_EF; regs->psr &= ~PSR_EF;
#else #else
if (current == last_task_used_math) { if (current == last_task_used_math) {
...@@ -238,7 +240,7 @@ restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t *fpu) ...@@ -238,7 +240,7 @@ restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t *fpu)
} }
#endif #endif
current->used_math = 1; current->used_math = 1;
current->flags &= ~PF_USEDFPU; clear_tsk_thread_flag(current, TIF_USEDFPU);
if (verify_area (VERIFY_READ, fpu, sizeof(*fpu))) if (verify_area (VERIFY_READ, fpu, sizeof(*fpu)))
return -EFAULT; return -EFAULT;
...@@ -586,12 +588,12 @@ save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t *fpu) ...@@ -586,12 +588,12 @@ save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t *fpu)
{ {
int err = 0; int err = 0;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (current->flags & PF_USEDFPU) { if (test_tsk_thread_flag(current, TIF_USEDFPU)) {
put_psr(get_psr() | PSR_EF); put_psr(get_psr() | PSR_EF);
fpsave(&current->thread.float_regs[0], &current->thread.fsr, fpsave(&current->thread.float_regs[0], &current->thread.fsr,
&current->thread.fpqueue[0], &current->thread.fpqdepth); &current->thread.fpqueue[0], &current->thread.fpqdepth);
regs->psr &= ~(PSR_EF); regs->psr &= ~(PSR_EF);
current->flags &= ~(PF_USEDFPU); clear_tsk_thread_flag(current, TIF_USEDFPU);
} }
#else #else
if (current == last_task_used_math) { if (current == last_task_used_math) {
...@@ -1295,7 +1297,7 @@ asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs, ...@@ -1295,7 +1297,7 @@ asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs,
/* fall through */ /* fall through */
default: default:
sigaddset(&current->pending.signal, signr); sigaddset(&current->pending.signal, signr);
recalc_sigpending(current); recalc_sigpending();
current->flags |= PF_SIGNALED; current->flags |= PF_SIGNALED;
do_exit(exit_code); do_exit(exit_code);
/* NOT REACHED */ /* NOT REACHED */
......
...@@ -109,6 +109,8 @@ ...@@ -109,6 +109,8 @@
#include <asm/kgdb.h> #include <asm/kgdb.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/cacheflush.h>
/* /*
* *
* external low-level support routines * external low-level support routines
......
...@@ -142,7 +142,6 @@ EXPORT_SYMBOL(__global_save_flags); ...@@ -142,7 +142,6 @@ EXPORT_SYMBOL(__global_save_flags);
EXPORT_SYMBOL(__global_restore_flags); EXPORT_SYMBOL(__global_restore_flags);
/* Misc SMP information */ /* Misc SMP information */
EXPORT_SYMBOL(smp_num_cpus);
EXPORT_SYMBOL(__cpu_number_map); EXPORT_SYMBOL(__cpu_number_map);
EXPORT_SYMBOL(__cpu_logical_map); EXPORT_SYMBOL(__cpu_logical_map);
#endif #endif
......
...@@ -99,9 +99,11 @@ found_it: seq_printf(p, "%3d: ", i); ...@@ -99,9 +99,11 @@ found_it: seq_printf(p, "%3d: ", i);
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
seq_printf(p, "%10u ", kstat_irqs(i)); seq_printf(p, "%10u ", kstat_irqs(i));
#else #else
for (x = 0; x < smp_num_cpus; x++) for (x = 0; x < NR_CPUS; x++) {
if (cpu_online)
seq_printf(p, "%10u ", seq_printf(p, "%10u ",
kstat.irqs[cpu_logical_map(x)][i]); kstat.irqs[cpu_logical_map(x)][i]);
}
#endif #endif
seq_printf(p, "%c %s", seq_printf(p, "%c %s",
(action->flags & SA_INTERRUPT) ? '+' : ' ', (action->flags & SA_INTERRUPT) ? '+' : ' ',
......
...@@ -43,7 +43,6 @@ extern int linux_num_cpus; ...@@ -43,7 +43,6 @@ extern int linux_num_cpus;
extern void calibrate_delay(void); extern void calibrate_delay(void);
extern struct task_struct *current_set[NR_CPUS];
extern volatile int smp_processors_ready; extern volatile int smp_processors_ready;
extern unsigned long cpu_present_map; extern unsigned long cpu_present_map;
extern int smp_num_cpus; extern int smp_num_cpus;
......
...@@ -40,7 +40,6 @@ extern int linux_num_cpus; ...@@ -40,7 +40,6 @@ extern int linux_num_cpus;
extern void calibrate_delay(void); extern void calibrate_delay(void);
extern struct task_struct *current_set[NR_CPUS];
extern volatile int smp_processors_ready; extern volatile int smp_processors_ready;
extern unsigned long cpu_present_map; extern unsigned long cpu_present_map;
extern int smp_num_cpus; extern int smp_num_cpus;
......
...@@ -63,8 +63,8 @@ cpu3_startup: ...@@ -63,8 +63,8 @@ cpu3_startup:
and %g4, 0xc, %g4 and %g4, 0xc, %g4
ld [%g5 + %g4], %g6 ld [%g5 + %g4], %g6
sethi %hi(TASK_UNION_SIZE - REGWIN_SZ), %sp sethi %hi(THREAD_SIZE - REGWIN_SZ), %sp
or %sp, %lo(TASK_UNION_SIZE - REGWIN_SZ), %sp or %sp, %lo(THREAD_SIZE - REGWIN_SZ), %sp
add %g6, %sp, %sp add %g6, %sp, %sp
/* Turn on traps (PSR_ET). */ /* Turn on traps (PSR_ET). */
...@@ -142,8 +142,8 @@ C_LABEL(sun4d_cpu_startup): ...@@ -142,8 +142,8 @@ C_LABEL(sun4d_cpu_startup):
srl %g3, 1, %g4 srl %g3, 1, %g4
ld [%g5 + %g4], %g6 ld [%g5 + %g4], %g6
sethi %hi(TASK_UNION_SIZE - REGWIN_SZ), %sp sethi %hi(THREAD_SIZE - REGWIN_SZ), %sp
or %sp, %lo(TASK_UNION_SIZE - REGWIN_SZ), %sp or %sp, %lo(THREAD_SIZE - REGWIN_SZ), %sp
add %g6, %sp, %sp add %g6, %sp, %sp
/* Turn on traps (PSR_ET). */ /* Turn on traps (PSR_ET). */
......
...@@ -475,6 +475,23 @@ int thiscpus_mid; ...@@ -475,6 +475,23 @@ int thiscpus_mid;
void trap_init(void) void trap_init(void)
{ {
extern void thread_info_offsets_are_bolixed_pete(void);
/* Force linker to barf if mismatched */
if (TI_UWINMASK != offsetof(struct thread_info, uwinmask) ||
TI_TASK != offsetof(struct thread_info, task) ||
TI_EXECDOMAIN != offsetof(struct thread_info, exec_domain) ||
TI_FLAGS != offsetof(struct thread_info, flags) ||
TI_CPU != offsetof(struct thread_info, cpu) ||
TI_PREEMPT != offsetof(struct thread_info, preempt_count) ||
TI_SOFTIRQ != offsetof(struct thread_info, softirq_count) ||
TI_HARDIRQ != offsetof(struct thread_info, hardirq_count) ||
TI_KSP != offsetof(struct thread_info, ksp) ||
TI_KPC != offsetof(struct thread_info, kpc) ||
TI_KPSR != offsetof(struct thread_info, kpsr) ||
TI_KWIM != offsetof(struct thread_info, kwim))
thread_info_offsets_are_bolixed_pete();
/* Attach to the address space of init_task. */ /* Attach to the address space of init_task. */
atomic_inc(&init_mm.mm_count); atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm; current->active_mm = &init_mm;
......
...@@ -32,7 +32,7 @@ void flush_user_windows(void) ...@@ -32,7 +32,7 @@ void flush_user_windows(void)
" restore %%g0, %%g0, %%g0\n" " restore %%g0, %%g0, %%g0\n"
: "=&r" (ctr) : "=&r" (ctr)
: "0" (ctr), : "0" (ctr),
"i" ((const unsigned long)(&(((struct task_struct *)0)->thread.uwinmask))) "i" ((const unsigned long)TI_UWINMASK)
: "g4", "cc"); : "g4", "cc");
} }
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <asm/asi.h> #include <asm/asi.h>
#include <asm/winmacro.h> #include <asm/winmacro.h>
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
#include <asm/thread_info.h>
/* WARNING: This routine is hairy and _very_ complicated, but it /* WARNING: This routine is hairy and _very_ complicated, but it
* must be as fast as possible as it handles the allocation * must be as fast as possible as it handles the allocation
...@@ -62,7 +63,7 @@ spnwin_patch3_7win: and %twin_tmp, 0x7f, %twin_tmp ...@@ -62,7 +63,7 @@ spnwin_patch3_7win: and %twin_tmp, 0x7f, %twin_tmp
* andcc %l0, PSR_PS, %g0 * andcc %l0, PSR_PS, %g0
*/ */
/* Datum current->thread.uwinmask contains at all times a bitmask /* Datum current_thread_info->uwinmask contains at all times a bitmask
* where if any user windows are active, at least one bit will * where if any user windows are active, at least one bit will
* be set in to mask. If no user windows are active, the bitmask * be set in to mask. If no user windows are active, the bitmask
* will be all zeroes. * will be all zeroes.
...@@ -96,10 +97,10 @@ spnwin_patch2: and %glob_tmp, 0xff, %glob_tmp ...@@ -96,10 +97,10 @@ spnwin_patch2: and %glob_tmp, 0xff, %glob_tmp
save %g0, %g0, %g0 ! Go where saving will occur save %g0, %g0, %g0 ! Go where saving will occur
/* See if any user windows are active in the set. */ /* See if any user windows are active in the set. */
ld [%curptr + AOFF_task_thread + AOFF_thread_uwinmask], %twin_tmp ! grab win mask ld [%curptr + TI_UWINMASK], %twin_tmp ! grab win mask
orcc %g0, %twin_tmp, %g0 ! check for set bits orcc %g0, %twin_tmp, %g0 ! check for set bits
bne spwin_exist_uwins ! yep, there are some bne spwin_exist_uwins ! yep, there are some
andn %twin_tmp, %glob_tmp, %twin_tmp ! compute new umask andn %twin_tmp, %glob_tmp, %twin_tmp ! compute new uwinmask
/* Save into the window which must be saved and do it. /* Save into the window which must be saved and do it.
* Basically if we are here, this means that we trapped * Basically if we are here, this means that we trapped
...@@ -139,7 +140,7 @@ spwin_exist_uwins: ...@@ -139,7 +140,7 @@ spwin_exist_uwins:
* But first, store the new user window mask calculated * But first, store the new user window mask calculated
* above. * above.
*/ */
st %twin_tmp, [%curptr + AOFF_task_thread + AOFF_thread_uwinmask] st %twin_tmp, [%curptr + TI_UWINMASK]
save %g0, %g0, %g0 ! Go to where the saving will occur save %g0, %g0, %g0 ! Go to where the saving will occur
spwin_fromuser: spwin_fromuser:
...@@ -209,18 +210,20 @@ spwin_user_stack_is_bolixed: ...@@ -209,18 +210,20 @@ spwin_user_stack_is_bolixed:
bne spwin_bad_ustack_from_kernel bne spwin_bad_ustack_from_kernel
nop nop
ld [%curptr + TI_TASK], %glob_tmp
/* Oh well, throw this one window into the per-task window /* Oh well, throw this one window into the per-task window
* buffer, the first one. * buffer, the first one.
*/ */
st %sp, [%curptr + AOFF_task_thread + AOFF_thread_rwbuf_stkptrs] st %sp, [%glob_tmp + AOFF_task_thread + AOFF_thread_rwbuf_stkptrs]
STORE_WINDOW(curptr + AOFF_task_thread + AOFF_thread_reg_window) STORE_WINDOW(glob_tmp + AOFF_task_thread + AOFF_thread_reg_window)
restore %g0, %g0, %g0 restore %g0, %g0, %g0
/* LOCATION: Trap Window */ /* LOCATION: Trap Window */
/* Back in the trap window, update winbuffer save count. */ /* Back in the trap window, update winbuffer save count. */
mov 1, %glob_tmp mov 1, %twin_tmp
st %glob_tmp, [%curptr + AOFF_task_thread + AOFF_thread_w_saved] st %twin_tmp, [%glob_tmp + AOFF_task_thread + AOFF_thread_w_saved]
/* Compute new user window mask. What we are basically /* Compute new user window mask. What we are basically
* doing is taking two windows, the invalid one at trap * doing is taking two windows, the invalid one at trap
...@@ -232,9 +235,9 @@ spwin_user_stack_is_bolixed: ...@@ -232,9 +235,9 @@ spwin_user_stack_is_bolixed:
or %twin_tmp, %t_wim, %twin_tmp or %twin_tmp, %t_wim, %twin_tmp
not %twin_tmp not %twin_tmp
spnwin_patch3: and %twin_tmp, 0xff, %twin_tmp ! patched on 7win Sparcs spnwin_patch3: and %twin_tmp, 0xff, %twin_tmp ! patched on 7win Sparcs
st %twin_tmp, [%curptr + AOFF_task_thread + AOFF_thread_uwinmask] st %twin_tmp, [%curptr + TI_UWINMASK]
#define STACK_OFFSET (TASK_UNION_SIZE - TRACEREG_SZ - REGWIN_SZ) #define STACK_OFFSET (THREAD_SIZE - TRACEREG_SZ - REGWIN_SZ)
sethi %hi(STACK_OFFSET), %sp sethi %hi(STACK_OFFSET), %sp
or %sp, %lo(STACK_OFFSET), %sp or %sp, %lo(STACK_OFFSET), %sp
...@@ -247,7 +250,7 @@ spnwin_patch3: and %twin_tmp, 0xff, %twin_tmp ! patched on 7win Sparcs ...@@ -247,7 +250,7 @@ spnwin_patch3: and %twin_tmp, 0xff, %twin_tmp ! patched on 7win Sparcs
sethi %hi(STACK_OFFSET), %g6 sethi %hi(STACK_OFFSET), %g6
or %g6, %lo(STACK_OFFSET), %g6 or %g6, %lo(STACK_OFFSET), %g6
sub %sp, %g6, %g6 sub %sp, %g6, %g6 ! curptr
/* Turn on traps and call c-code to deal with it. */ /* Turn on traps and call c-code to deal with it. */
wr %t_psr, PSR_ET, %psr wr %t_psr, PSR_ET, %psr
...@@ -271,7 +274,8 @@ spwin_bad_ustack_from_kernel: ...@@ -271,7 +274,8 @@ spwin_bad_ustack_from_kernel:
* a per-process window buffer until we can properly handle * a per-process window buffer until we can properly handle
* this later on. * this later on.
*/ */
SAVE_BOLIXED_USER_STACK(curptr, glob_tmp) ld [%curptr + TI_TASK], %glob_tmp /* Using curptr one last time */
SAVE_BOLIXED_USER_STACK(glob_tmp, g6) /* ...now using g6 as scratch */
restore %g0, %g0, %g0 restore %g0, %g0, %g0
/* LOCATION: Trap window */ /* LOCATION: Trap window */
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <asm/asi.h> #include <asm/asi.h>
#include <asm/winmacro.h> #include <asm/winmacro.h>
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
#include <asm/thread_info.h>
/* Just like the overflow handler we define macros for registers /* Just like the overflow handler we define macros for registers
* with fixed meanings in this routine. * with fixed meanings in this routine.
...@@ -40,7 +41,7 @@ ...@@ -40,7 +41,7 @@
* andcc %l0, PSR_PS, %g0 * andcc %l0, PSR_PS, %g0
*/ */
/* Datum current->thread.uwinmask contains at all times a bitmask /* Datum current_thread_info->uwinmask contains at all times a bitmask
* where if any user windows are active, at least one bit will * where if any user windows are active, at least one bit will
* be set in to mask. If no user windows are active, the bitmask * be set in to mask. If no user windows are active, the bitmask
* will be all zeroes. * will be all zeroes.
...@@ -138,7 +139,7 @@ fwin_from_user: ...@@ -138,7 +139,7 @@ fwin_from_user:
C_LABEL(fwin_mmu_patchme): b C_LABEL(sun4c_fwin_stackchk) C_LABEL(fwin_mmu_patchme): b C_LABEL(sun4c_fwin_stackchk)
andcc %sp, 0x7, %g0 andcc %sp, 0x7, %g0
#define STACK_OFFSET (TASK_UNION_SIZE - TRACEREG_SZ - REGWIN_SZ) #define STACK_OFFSET (THREAD_SIZE - TRACEREG_SZ - REGWIN_SZ)
fwin_user_stack_is_bolixed: fwin_user_stack_is_bolixed:
/* LOCATION: Window 'W' */ /* LOCATION: Window 'W' */
...@@ -184,8 +185,9 @@ fwin_user_stack_is_bolixed: ...@@ -184,8 +185,9 @@ fwin_user_stack_is_bolixed:
/* Fix users window mask and buffer save count. */ /* Fix users window mask and buffer save count. */
mov 0x1, %g5 mov 0x1, %g5
sll %g5, %g3, %g5 sll %g5, %g3, %g5
st %g5, [%curptr + AOFF_task_thread + AOFF_thread_uwinmask] ! one live user window still st %g5, [%curptr + TI_UWINMASK] ! one live user window still
st %g0, [%curptr + AOFF_task_thread + AOFF_thread_w_saved] ! no windows in the buffer ld [%curptr + TI_TASK], %g5
st %g0, [%g5 + AOFF_task_thread + AOFF_thread_w_saved] ! no windows in the buffer
wr %t_psr, PSR_ET, %psr ! enable traps wr %t_psr, PSR_ET, %psr ! enable traps
nop nop
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/oplib.h> #include <asm/oplib.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/cacheflush.h>
#define BTFIXUP_OPTIMIZE_NOP #define BTFIXUP_OPTIMIZE_NOP
#define BTFIXUP_OPTIMIZE_OTHER #define BTFIXUP_OPTIMIZE_OTHER
......
...@@ -138,8 +138,8 @@ static void unhandled_fault(unsigned long address, struct task_struct *tsk, ...@@ -138,8 +138,8 @@ static void unhandled_fault(unsigned long address, struct task_struct *tsk,
struct pt_regs *regs) struct pt_regs *regs)
{ {
if((unsigned long) address < PAGE_SIZE) { if((unsigned long) address < PAGE_SIZE) {
printk(KERN_ALERT "Unable to handle kernel NULL " printk(KERN_ALERT
"pointer dereference"); "Unable to handle kernel NULL pointer dereference\n");
} else { } else {
printk(KERN_ALERT "Unable to handle kernel paging request " printk(KERN_ALERT "Unable to handle kernel paging request "
"at virtual address %08lx\n", address); "at virtual address %08lx\n", address);
...@@ -401,7 +401,7 @@ asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write, ...@@ -401,7 +401,7 @@ asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write,
{ {
extern void sun4c_update_mmu_cache(struct vm_area_struct *, extern void sun4c_update_mmu_cache(struct vm_area_struct *,
unsigned long,pte_t); unsigned long,pte_t);
extern pte_t *sun4c_pte_offset(pmd_t *,unsigned long); extern pte_t *sun4c_pte_offset_kernel(pmd_t *,unsigned long);
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm; struct mm_struct *mm = tsk->mm;
pgd_t *pgdp; pgd_t *pgdp;
...@@ -421,7 +421,7 @@ asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write, ...@@ -421,7 +421,7 @@ asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write,
} }
pgdp = pgd_offset(mm, address); pgdp = pgd_offset(mm, address);
ptep = sun4c_pte_offset((pmd_t *) pgdp, address); ptep = sun4c_pte_offset_kernel((pmd_t *) pgdp, address);
if (pgd_val(*pgdp)) { if (pgd_val(*pgdp)) {
if (write) { if (write) {
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
static inline void forget_pte(pte_t page) static inline void forget_pte(pte_t page)
{ {
#if 0 /* old 2.4 code */
if (pte_none(page)) if (pte_none(page))
return; return;
if (pte_present(page)) { if (pte_present(page)) {
...@@ -30,6 +31,12 @@ static inline void forget_pte(pte_t page) ...@@ -30,6 +31,12 @@ static inline void forget_pte(pte_t page)
return; return;
} }
swap_free(pte_to_swp_entry(page)); swap_free(pte_to_swp_entry(page));
#else
if (!pte_none(page)) {
printk("forget_pte: old mapping existed!\n");
BUG();
}
#endif
} }
/* Remap IO memory, the same way as remap_page_range(), but use /* Remap IO memory, the same way as remap_page_range(), but use
...@@ -69,7 +76,7 @@ static inline int io_remap_pmd_range(pmd_t * pmd, unsigned long address, unsigne ...@@ -69,7 +76,7 @@ static inline int io_remap_pmd_range(pmd_t * pmd, unsigned long address, unsigne
end = PGDIR_SIZE; end = PGDIR_SIZE;
offset -= address; offset -= address;
do { do {
pte_t * pte = pte_alloc(current->mm, pmd, address); pte_t * pte = pte_alloc_map(current->mm, pmd, address);
if (!pte) if (!pte)
return -ENOMEM; return -ENOMEM;
io_remap_pte_range(pte, address, end - address, address + offset, prot, space); io_remap_pte_range(pte, address, end - address, address + offset, prot, space);
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/vaddrs.h> #include <asm/vaddrs.h>
#include <asm/pgalloc.h> /* bug in asm-generic/tlb.h: check_pgt_cache */
#include <asm/tlb.h> #include <asm/tlb.h>
mmu_gather_t mmu_gathers[NR_CPUS]; mmu_gather_t mmu_gathers[NR_CPUS];
...@@ -60,7 +61,7 @@ pte_t *kmap_pte; ...@@ -60,7 +61,7 @@ pte_t *kmap_pte;
pgprot_t kmap_prot; pgprot_t kmap_prot;
#define kmap_get_fixed_pte(vaddr) \ #define kmap_get_fixed_pte(vaddr) \
pte_offset(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)) pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
void __init kmap_init(void) void __init kmap_init(void)
{ {
...@@ -77,11 +78,13 @@ void show_mem(void) ...@@ -77,11 +78,13 @@ void show_mem(void)
nr_swap_pages << (PAGE_SHIFT-10)); nr_swap_pages << (PAGE_SHIFT-10));
printk("%ld pages of RAM\n", totalram_pages); printk("%ld pages of RAM\n", totalram_pages);
printk("%d free pages\n", nr_free_pages()); printk("%d free pages\n", nr_free_pages());
#if 0 /* undefined pgtable_cache_size, pgd_cache_size */
printk("%ld pages in page table cache\n",pgtable_cache_size); printk("%ld pages in page table cache\n",pgtable_cache_size);
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
if (sparc_cpu_model == sun4m || sparc_cpu_model == sun4d) if (sparc_cpu_model == sun4m || sparc_cpu_model == sun4d)
printk("%ld entries in page dir cache\n",pgd_cache_size); printk("%ld entries in page dir cache\n",pgd_cache_size);
#endif #endif
#endif
} }
extern pgprot_t protection_map[16]; extern pgprot_t protection_map[16];
...@@ -309,6 +312,23 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) ...@@ -309,6 +312,23 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
return max_pfn; return max_pfn;
} }
/*
* check_pgt_cache
*
* This is called at the end of unmapping of VMA (zap_page_range),
* to rescan the page cache for architecture specific things,
* presumably something like sun4/sun4c PMEGs. Most architectures
* define check_pgt_cache empty.
*
* We simply copy the 2.4 implementation for now.
*/
int pgt_cache_water[2] = { 25, 50 };
void check_pgt_cache(void)
{
do_check_pgt_cache(pgt_cache_water[0], pgt_cache_water[1]);
}
/* /*
* paging_init() sets up the page tables: We call the MMU specific * paging_init() sets up the page tables: We call the MMU specific
* init routine based upon the Sun model type on the Sparc. * init routine based upon the Sun model type on the Sparc.
......
...@@ -9,6 +9,9 @@ ...@@ -9,6 +9,9 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
#include <asm/scatterlist.h> #include <asm/scatterlist.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -17,6 +20,8 @@ ...@@ -17,6 +20,8 @@
#include <asm/io-unit.h> #include <asm/io-unit.h>
#include <asm/mxcc.h> #include <asm/mxcc.h>
#include <asm/bitops.h> #include <asm/bitops.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
/* #define IOUNIT_DEBUG */ /* #define IOUNIT_DEBUG */
#ifdef IOUNIT_DEBUG #ifdef IOUNIT_DEBUG
...@@ -188,7 +193,7 @@ static void iounit_map_dma_area(unsigned long va, __u32 addr, int len) ...@@ -188,7 +193,7 @@ static void iounit_map_dma_area(unsigned long va, __u32 addr, int len)
pgdp = pgd_offset(init_task.mm, addr); pgdp = pgd_offset(init_task.mm, addr);
pmdp = pmd_offset(pgdp, addr); pmdp = pmd_offset(pgdp, addr);
ptep = pte_offset(pmdp, addr); ptep = pte_offset_map(pmdp, addr);
set_pte(ptep, pte_val(mk_pte(virt_to_page(page), dvma_prot))); set_pte(ptep, pte_val(mk_pte(virt_to_page(page), dvma_prot)));
......
/* $Id: iommu.c,v 1.22 2001/12/17 07:05:09 davem Exp $ /*
* iommu.c: IOMMU specific routines for memory management. * iommu.c: IOMMU specific routines for memory management.
* *
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1995 Pete Zaitcev * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/ */
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
#include <asm/scatterlist.h> #include <asm/scatterlist.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -19,6 +21,8 @@ ...@@ -19,6 +21,8 @@
#include <asm/io.h> #include <asm/io.h>
#include <asm/mxcc.h> #include <asm/mxcc.h>
#include <asm/mbus.h> #include <asm/mbus.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
/* srmmu.c */ /* srmmu.c */
extern int viking_mxcc_present; extern int viking_mxcc_present;
...@@ -245,7 +249,8 @@ static void iommu_map_dma_area(unsigned long va, __u32 addr, int len) ...@@ -245,7 +249,8 @@ static void iommu_map_dma_area(unsigned long va, __u32 addr, int len)
pgdp = pgd_offset(&init_mm, addr); pgdp = pgd_offset(&init_mm, addr);
pmdp = pmd_offset(pgdp, addr); pmdp = pmd_offset(pgdp, addr);
ptep = pte_offset(pmdp, addr); ptep = pte_offset_map(pmdp, addr);
/* XXX What if we run out of atomic maps above */
set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot)); set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
if (ipte_cache != 0) { if (ipte_cache != 0) {
......
/* $Id: srmmu.c,v 1.234 2001/12/21 04:56:15 davem Exp $ /*
* srmmu.c: SRMMU specific routines for memory management. * srmmu.c: SRMMU specific routines for memory management.
* *
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1995 Pete Zaitcev * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org) * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org)
...@@ -132,11 +132,32 @@ spinlock_t srmmu_nocache_spinlock; ...@@ -132,11 +132,32 @@ spinlock_t srmmu_nocache_spinlock;
static inline unsigned long srmmu_pgd_page(pgd_t pgd) static inline unsigned long srmmu_pgd_page(pgd_t pgd)
{ return srmmu_device_memory(pgd_val(pgd))?~0:(unsigned long)__nocache_va((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); } { return srmmu_device_memory(pgd_val(pgd))?~0:(unsigned long)__nocache_va((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
static inline unsigned long srmmu_pmd_page(pmd_t pmd) static inline unsigned long srmmu_pmd_page_kernel(pmd_t pmd)
{ return srmmu_device_memory(pmd_val(pmd))?~0:(unsigned long)__nocache_va((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4); } {
return (unsigned long)
__nocache_va((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4);
}
static struct page *srmmu_pmd_page(pmd_t pmd) /* XXX inline later */
{
if (srmmu_device_memory(pmd_val(pmd))) {
/* XXX Anton obviously had something in mind when he did this.
* But what?
*/
/* return (struct page *)~0; */
BUG(); /* NO WAY */
}
return virt_to_page(srmmu_pmd_page_kernel(pmd));
}
static inline struct page *srmmu_pte_page(pte_t pte) static inline unsigned long srmmu_pte_pfn(pte_t pte)
{ return (mem_map + (unsigned long)(srmmu_device_memory(pte_val(pte))?~0:(((pte_val(pte) & SRMMU_PTE_PMASK) << 4) >> PAGE_SHIFT))); } {
if (srmmu_device_memory(pte_val(pte)))
BUG();
return (unsigned long)
(((pte_val(pte) & SRMMU_PTE_PMASK) << 4) >> PAGE_SHIFT);
}
static inline int srmmu_pte_none(pte_t pte) static inline int srmmu_pte_none(pte_t pte)
{ return !(pte_val(pte) & 0xFFFFFFF); } { return !(pte_val(pte) & 0xFFFFFFF); }
...@@ -219,7 +240,16 @@ static inline void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp) ...@@ -219,7 +240,16 @@ static inline void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
{ srmmu_set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pmdp) >> 4))); } { srmmu_set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pmdp) >> 4))); }
static inline void srmmu_pmd_set(pmd_t * pmdp, pte_t * ptep) static inline void srmmu_pmd_set(pmd_t * pmdp, pte_t * ptep)
{ srmmu_set_pte((pte_t *)pmdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) ptep) >> 4))); } {
srmmu_set_pte((pte_t *)pmdp,
(SRMMU_ET_PTD | (__nocache_pa((unsigned long) ptep) >> 4)));
}
static inline void srmmu_pmd_populate(pmd_t * pmdp, struct page * ptep)
{
srmmu_set_pte((pte_t *)pmdp,
(SRMMU_ET_PTD | (((ptep - mem_map) << PAGE_SHIFT) >> 4)));
}
static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot) static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
{ return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); } { return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); }
...@@ -234,7 +264,13 @@ static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address) ...@@ -234,7 +264,13 @@ static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address)
/* Find an entry in the third-level page table.. */ /* Find an entry in the third-level page table.. */
static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address) static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
{ return (pte_t *) srmmu_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1)); } {
unsigned long pte;
pte = srmmu_pmd_page_kernel(*dir);
return (pte_t *) pte +
((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
}
unsigned long __srmmu_get_nocache(int size, int align) unsigned long __srmmu_get_nocache(int size, int align)
{ {
...@@ -387,14 +423,14 @@ static void srmmu_free_pgd_fast(pgd_t *pgd) ...@@ -387,14 +423,14 @@ static void srmmu_free_pgd_fast(pgd_t *pgd)
srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE); srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE);
} }
static pte_t *srmmu_pte_alloc_one_fast(struct mm_struct *mm, unsigned long address) static pte_t *srmmu_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{ {
return (pte_t *)srmmu_get_nocache(SRMMU_PTE_TABLE_SIZE, SRMMU_PTE_TABLE_SIZE); return (pte_t *)srmmu_get_nocache(SRMMU_PTE_TABLE_SIZE, SRMMU_PTE_TABLE_SIZE);
} }
static pte_t *srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address) static struct page *srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
return NULL; return virt_to_page(srmmu_pte_alloc_one_kernel(mm, address));
} }
static void srmmu_free_pte_fast(pte_t *pte) static void srmmu_free_pte_fast(pte_t *pte)
...@@ -402,6 +438,11 @@ static void srmmu_free_pte_fast(pte_t *pte) ...@@ -402,6 +438,11 @@ static void srmmu_free_pte_fast(pte_t *pte)
srmmu_free_nocache((unsigned long)pte, SRMMU_PTE_TABLE_SIZE); srmmu_free_nocache((unsigned long)pte, SRMMU_PTE_TABLE_SIZE);
} }
static void srmmu_pte_free(struct page *pte)
{
srmmu_free_nocache((unsigned long)page_address(pte), SRMMU_PTE_TABLE_SIZE);
}
static pmd_t *srmmu_pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address) static pmd_t *srmmu_pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
{ {
return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
...@@ -517,19 +558,15 @@ void srmmu_unmapioaddr(unsigned long virt_addr) ...@@ -517,19 +558,15 @@ void srmmu_unmapioaddr(unsigned long virt_addr)
* mappings on the kernel stack without any special code as we did * mappings on the kernel stack without any special code as we did
* need on the sun4c. * need on the sun4c.
*/ */
struct task_struct *srmmu_alloc_task_struct(void) struct thread_info *srmmu_alloc_thread_info(void)
{
return (struct task_struct *) __get_free_pages(GFP_KERNEL, 1);
}
static void srmmu_free_task_struct(struct task_struct *tsk)
{ {
free_pages((unsigned long)tsk, 1); return (struct thread_info *)
__get_free_pages(GFP_KERNEL, THREAD_INFO_ORDER);
} }
static void srmmu_get_task_struct(struct task_struct *tsk) static void srmmu_free_thread_info(struct thread_info *ti)
{ {
atomic_inc(&virt_to_page(tsk)->count); free_pages((unsigned long)ti, THREAD_INFO_ORDER);
} }
/* tsunami.S */ /* tsunami.S */
...@@ -1170,8 +1207,8 @@ void __init srmmu_paging_init(void) ...@@ -1170,8 +1207,8 @@ void __init srmmu_paging_init(void)
srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_BASE_END); srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_BASE_END);
pgd = pgd_offset_k(PKMAP_BASE); pgd = pgd_offset_k(PKMAP_BASE);
pmd = pmd_offset(pgd, PKMAP_BASE); pmd = srmmu_pmd_offset(pgd, PKMAP_BASE);
pte = pte_offset(pmd, PKMAP_BASE); pte = srmmu_pte_offset(pmd, PKMAP_BASE);
pkmap_page_table = pte; pkmap_page_table = pte;
flush_cache_all(); flush_cache_all();
...@@ -1209,6 +1246,10 @@ void __init srmmu_paging_init(void) ...@@ -1209,6 +1246,10 @@ void __init srmmu_paging_init(void)
free_area_init_node(0, NULL, NULL, zones_size, free_area_init_node(0, NULL, NULL, zones_size,
phys_base, zholes_size); phys_base, zholes_size);
} }
/* P3: easy to fix, todo. Current code is utterly broken, though. */
if (phys_base != 0)
panic("phys_base nonzero");
} }
static void srmmu_mmu_info(struct seq_file *m) static void srmmu_mmu_info(struct seq_file *m)
...@@ -1282,7 +1323,7 @@ static void __init init_vac_layout(void) ...@@ -1282,7 +1323,7 @@ static void __init init_vac_layout(void)
if(vac_line_size < min_line_size) if(vac_line_size < min_line_size)
min_line_size = vac_line_size; min_line_size = vac_line_size;
cpu++; cpu++;
if(cpu == smp_num_cpus) if (cpu >= NR_CPUS || !cpu_online(cpu))
break; break;
#else #else
break; break;
...@@ -1944,9 +1985,8 @@ static void __init get_srmmu_type(void) ...@@ -1944,9 +1985,8 @@ static void __init get_srmmu_type(void)
} }
/* dont laugh, static pagetables */ /* dont laugh, static pagetables */
static int srmmu_check_pgt_cache(int low, int high) static void srmmu_check_pgt_cache(int low, int high)
{ {
return 0;
} }
extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme, extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme,
...@@ -2017,16 +2057,16 @@ void __init ld_mmu_srmmu(void) ...@@ -2017,16 +2057,16 @@ void __init ld_mmu_srmmu(void)
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2); BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2);
#endif #endif
BTFIXUPSET_CALL(do_check_pgt_cache, srmmu_check_pgt_cache, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(do_check_pgt_cache, srmmu_check_pgt_cache, BTFIXUPCALL_NOP);
BTFIXUPSET_CALL(set_pte, srmmu_set_pte, BTFIXUPCALL_SWAPO0O1); BTFIXUPSET_CALL(set_pte, srmmu_set_pte, BTFIXUPCALL_SWAPO0O1);
BTFIXUPSET_CALL(switch_mm, srmmu_switch_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(switch_mm, srmmu_switch_mm, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pte_page, srmmu_pte_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_pfn, srmmu_pte_pfn, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pgd_page, srmmu_pgd_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_page, srmmu_pgd_page, BTFIXUPCALL_NORM);
BTFIXUPSET_SETHI(none_mask, 0xF0000000); /* XXX P3: is it used? */ BTFIXUPSET_SETHI(none_mask, 0xF0000000);
BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_SWAPO0G0); BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_SWAPO0G0);
...@@ -2041,16 +2081,18 @@ void __init ld_mmu_srmmu(void) ...@@ -2041,16 +2081,18 @@ void __init ld_mmu_srmmu(void)
BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_SWAPO0G0); BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_SWAPO0G0);
BTFIXUPSET_CALL(mk_pte, srmmu_mk_pte, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(mk_pte, srmmu_mk_pte, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pfn_pte, srmmu_pfn_pte, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(mk_pte_phys, srmmu_mk_pte_phys, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mk_pte_io, srmmu_mk_pte_io, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(mk_pte_io, srmmu_mk_pte_io, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pmd_populate, srmmu_pmd_populate, BTFIXUPCALL_NORM);
BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK); BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK);
BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pte_offset, srmmu_pte_offset, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_offset_kernel, srmmu_pte_offset, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(free_pte_fast, srmmu_free_pte_fast, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(free_pte_fast, srmmu_free_pte_fast, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pte_alloc_one_fast, srmmu_pte_alloc_one_fast, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_free, srmmu_pte_free, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pte_alloc_one_kernel, srmmu_pte_alloc_one_kernel, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pte_alloc_one, srmmu_pte_alloc_one, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_alloc_one, srmmu_pte_alloc_one, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(free_pmd_fast, srmmu_free_pmd_fast, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(free_pmd_fast, srmmu_free_pmd_fast, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pmd_alloc_one_fast, srmmu_pmd_alloc_one_fast, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_alloc_one_fast, srmmu_pmd_alloc_one_fast, BTFIXUPCALL_NORM);
...@@ -2071,10 +2113,8 @@ void __init ld_mmu_srmmu(void) ...@@ -2071,10 +2113,8 @@ void __init ld_mmu_srmmu(void)
BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM);
/* Task struct and kernel stack allocating/freeing. */ BTFIXUPSET_CALL(alloc_thread_info, srmmu_alloc_thread_info, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(alloc_task_struct, srmmu_alloc_task_struct, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(free_thread_info, srmmu_free_thread_info, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(free_task_struct, srmmu_free_task_struct, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(get_task_struct, srmmu_get_task_struct, BTFIXUPCALL_NORM);
get_srmmu_type(); get_srmmu_type();
patch_window_trap_handlers(); patch_window_trap_handlers();
......
This diff is collapsed.
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <asm/oplib.h> #include <asm/oplib.h>
#include <asm/bpp.h> #include <asm/bpp.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/pcic.h> /* pcic_present */
struct sbus_bus *sbus_root = NULL; struct sbus_bus *sbus_root = NULL;
...@@ -334,7 +335,7 @@ static int __init sbus_init(void) ...@@ -334,7 +335,7 @@ static int __init sbus_init(void)
(nd = prom_getchild(iommund)) == 0 || (nd = prom_getchild(iommund)) == 0 ||
(nd = prom_searchsiblings(nd, "sbus")) == 0) { (nd = prom_searchsiblings(nd, "sbus")) == 0) {
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
if (!pcibios_present()) { if (!pcic_present()) {
prom_printf("Neither SBUS nor PCI found.\n"); prom_printf("Neither SBUS nor PCI found.\n");
prom_halt(); prom_halt();
} }
......
...@@ -207,12 +207,74 @@ static __inline__ unsigned long ffz(unsigned long word) ...@@ -207,12 +207,74 @@ static __inline__ unsigned long ffz(unsigned long word)
return result; return result;
} }
/**
* __ffs - find first bit in word.
* @word: The word to search
*
* Undefined if no bit exists, so code should check against 0 first.
*/
static __inline__ int __ffs(unsigned long word)
{
int num = 0;
if ((word & 0xffff) == 0) {
num += 16;
word >>= 16;
}
if ((word & 0xff) == 0) {
num += 8;
word >>= 8;
}
if ((word & 0xf) == 0) {
num += 4;
word >>= 4;
}
if ((word & 0x3) == 0) {
num += 2;
word >>= 2;
}
if ((word & 0x1) == 0)
num += 1;
return num;
}
/*
* Every architecture must define this function. It's the fastest
* way of searching a 140-bit bitmap where the first 100 bits are
* unlikely to be set. It's guaranteed that at least one of the 140
* bits is cleared.
*/
static __inline__ int sched_find_first_bit(unsigned long *b)
{
if (unlikely(b[0]))
return __ffs(b[0]);
if (unlikely(b[1]))
return __ffs(b[1]) + 32;
if (unlikely(b[2]))
return __ffs(b[2]) + 64;
if (b[3])
return __ffs(b[3]) + 96;
return __ffs(b[4]) + 128;
}
/* /*
* ffs: find first bit set. This is defined the same way as * ffs: find first bit set. This is defined the same way as
* the libc and compiler builtin ffs routines, therefore * the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs). * differs in spirit from the above ffz (man ffs).
*/ */
#define ffs(x) generic_ffs(x) static __inline__ int ffs(int x)
{
if (!x)
return 0;
return __ffs((unsigned long)x);
}
/*
* fls: find last (most-significant) bit set.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
#define fls(x) generic_fls(x)
/* /*
* hweightN: returns the hamming weight (i.e. the number * hweightN: returns the hamming weight (i.e. the number
...@@ -272,6 +334,34 @@ static __inline__ unsigned long find_next_zero_bit(void *addr, unsigned long siz ...@@ -272,6 +334,34 @@ static __inline__ unsigned long find_next_zero_bit(void *addr, unsigned long siz
#define find_first_zero_bit(addr, size) \ #define find_first_zero_bit(addr, size) \
find_next_zero_bit((addr), (size), 0) find_next_zero_bit((addr), (size), 0)
/**
* find_next_bit - find the first set bit in a memory region
* @addr: The address to base the search on
* @offset: The bitnumber to start searching at
* @size: The maximum size to search
*
* Scheduler induced bitop, do not use.
*/
static __inline__ int find_next_bit(unsigned long *addr, int size, int offset)
{
unsigned long *p = addr + (offset >> 5);
int num = offset & ~0x1f;
unsigned long word;
word = *p++;
word &= ~((1 << (offset & 0x1f)) - 1);
while (num < size) {
if (word != 0) {
return __ffs(word) + num;
}
word = *p++;
num += 0x20;
}
return num;
}
/*
*/
static __inline__ int test_le_bit(int nr, __const__ void * addr) static __inline__ int test_le_bit(int nr, __const__ void * addr)
{ {
__const__ unsigned char *ADDR = (__const__ unsigned char *) addr; __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
......
#ifndef _SPARC_CACHEFLUSH_H
#define _SPARC_CACHEFLUSH_H
#include <linux/config.h>
#include <linux/mm.h> /* Common for other includes */
// #include <linux/kernel.h> from pgalloc.h
// #include <linux/sched.h> from pgalloc.h
// #include <asm/page.h>
#include <asm/btfixup.h>
/*
* Fine grained cache flushing.
*/
#ifdef CONFIG_SMP
BTFIXUPDEF_CALL(void, local_flush_cache_all, void)
BTFIXUPDEF_CALL(void, local_flush_cache_mm, struct mm_struct *)
BTFIXUPDEF_CALL(void, local_flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
BTFIXUPDEF_CALL(void, local_flush_cache_page, struct vm_area_struct *, unsigned long)
#define local_flush_cache_all() BTFIXUP_CALL(local_flush_cache_all)()
#define local_flush_cache_mm(mm) BTFIXUP_CALL(local_flush_cache_mm)(mm)
#define local_flush_cache_range(vma,start,end) BTFIXUP_CALL(local_flush_cache_range)(vma,start,end)
#define local_flush_cache_page(vma,addr) BTFIXUP_CALL(local_flush_cache_page)(vma,addr)
BTFIXUPDEF_CALL(void, local_flush_page_to_ram, unsigned long)
BTFIXUPDEF_CALL(void, local_flush_sig_insns, struct mm_struct *, unsigned long)
#define local_flush_page_to_ram(addr) BTFIXUP_CALL(local_flush_page_to_ram)(addr)
#define local_flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(local_flush_sig_insns)(mm,insn_addr)
extern void smp_flush_cache_all(void);
extern void smp_flush_cache_mm(struct mm_struct *mm);
extern void smp_flush_cache_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end);
extern void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
extern void smp_flush_page_to_ram(unsigned long page);
extern void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
#endif /* CONFIG_SMP */
BTFIXUPDEF_CALL(void, flush_cache_all, void)
BTFIXUPDEF_CALL(void, flush_cache_mm, struct mm_struct *)
BTFIXUPDEF_CALL(void, flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
BTFIXUPDEF_CALL(void, flush_cache_page, struct vm_area_struct *, unsigned long)
#define flush_cache_all() BTFIXUP_CALL(flush_cache_all)()
#define flush_cache_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
#define flush_cache_range(vma,start,end) BTFIXUP_CALL(flush_cache_range)(vma,start,end)
#define flush_cache_page(vma,addr) BTFIXUP_CALL(flush_cache_page)(vma,addr)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma, pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
BTFIXUPDEF_CALL(void, __flush_page_to_ram, unsigned long)
BTFIXUPDEF_CALL(void, flush_sig_insns, struct mm_struct *, unsigned long)
#define __flush_page_to_ram(addr) BTFIXUP_CALL(__flush_page_to_ram)(addr)
#define flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(flush_sig_insns)(mm,insn_addr)
extern void flush_page_to_ram(struct page *page);
#define flush_dcache_page(page) do { } while (0)
#endif /* _SPARC_CACHEFLUSH_H */
#ifndef _SPARC_CURRENT_H /*
#define _SPARC_CURRENT_H * include/asm-sparc/current.h
*
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Copyright (C) 2002 Pete Zaitcev (zaitcev@yahoo.com)
*
* Derived from "include/asm-s390/current.h" by
* Martin Schwidefsky (schwidefsky@de.ibm.com)
* Derived from "include/asm-i386/current.h"
*/
#ifndef _ASM_CURRENT_H
#define _ASM_CURRENT_H
/* Sparc rules... */ /*
register struct task_struct *current asm("g6"); * At the sparc64 DaveM keeps current_thread_info in %g4.
* We might want to consider doing the same to shave a few cycles.
*/
#endif /* !(_SPARC_CURRENT_H) */ // Applications do not include kernel headers anymore, period.
// #ifdef __KERNEL__
#ifndef _ASM_THREAD_INFO_H
#include <asm/thread_info.h>
#endif
struct task_struct;
/* Two stage process (inline + #define) for type-checking. */
/* We also obfuscate get_current() to check if anyone used that by mistake. */
static inline struct task_struct *__get_current(void)
{
return current_thread_info()->task;
}
#define current __get_current()
// #endif /* __KERNEL__ */
#endif /* !(_ASM_CURRENT_H) */
...@@ -41,7 +41,7 @@ do { unsigned long *dest = &(__elf_regs[0]); \ ...@@ -41,7 +41,7 @@ do { unsigned long *dest = &(__elf_regs[0]); \
dest[34] = src->npc; \ dest[34] = src->npc; \
dest[35] = src->y; \ dest[35] = src->y; \
dest[36] = dest[37] = 0; /* XXX */ \ dest[36] = dest[37] = 0; /* XXX */ \
} while(0) } while(0); /* Janitors: Don't touch this colon. */
typedef struct { typedef struct {
union { union {
......
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
#include <asm/vaddrs.h> #include <asm/vaddrs.h>
#include <asm/kmap_types.h> #include <asm/kmap_types.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
/* undef for production */ /* undef for production */
#define HIGHMEM_DEBUG 1 #define HIGHMEM_DEBUG 1
......
...@@ -9,6 +9,8 @@ enum km_type { ...@@ -9,6 +9,8 @@ enum km_type {
KM_USER1, KM_USER1,
KM_BIO_SRC_IRQ, KM_BIO_SRC_IRQ,
KM_BIO_DST_IRQ, KM_BIO_DST_IRQ,
KM_PTE0,
KM_PTE1,
KM_TYPE_NR KM_TYPE_NR
}; };
......
...@@ -5,29 +5,6 @@ ...@@ -5,29 +5,6 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/*
* Every architecture must define this function. It's the fastest
* way of searching a 168-bit bitmap where the first 128 bits are
* unlikely to be clear. It's guaranteed that at least one of the 168
* bits is cleared.
*/
#if MAX_RT_PRIO != 128 || MAX_PRIO != 168
# error update this function.
#endif
static inline int sched_find_first_zero_bit(unsigned long *b)
{
unsigned int rt;
rt = b[0] & b[1] & b[2] & b[3];
if (unlikely(rt != 0xffffffff))
return find_first_zero_bit(b, MAX_RT_PRIO);
if (b[4] != ~0)
return ffz(b[4]) + MAX_RT_PRIO;
return ffz(b[5]) + 32 + MAX_RT_PRIO;
}
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
{ {
} }
......
...@@ -27,9 +27,6 @@ ...@@ -27,9 +27,6 @@
#include <asm/head.h> /* for KERNBASE */ #include <asm/head.h> /* for KERNBASE */
#include <asm/btfixup.h> #include <asm/btfixup.h>
/* This is always 2048*sizeof(long), doesn't change with PAGE_SIZE */
#define TASK_UNION_SIZE 8192
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* /*
...@@ -176,8 +173,12 @@ extern __inline__ int get_order(unsigned long size) ...@@ -176,8 +173,12 @@ extern __inline__ int get_order(unsigned long size)
#define PAGE_OFFSET 0xf0000000 #define PAGE_OFFSET 0xf0000000
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
#define pfn_to_page(pfn) (mem_map + (pfn))
#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
#define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT)) #define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
#define VALID_PAGE(page) ((page - mem_map) < max_mapnr) #define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
......
...@@ -9,85 +9,7 @@ ...@@ -9,85 +9,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/btfixup.h> #include <asm/btfixup.h>
/* Fine grained cache/tlb flushing. */ struct page;
#ifdef CONFIG_SMP
BTFIXUPDEF_CALL(void, local_flush_cache_all, void)
BTFIXUPDEF_CALL(void, local_flush_cache_mm, struct mm_struct *)
BTFIXUPDEF_CALL(void, local_flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
BTFIXUPDEF_CALL(void, local_flush_cache_page, struct vm_area_struct *, unsigned long)
#define local_flush_cache_all() BTFIXUP_CALL(local_flush_cache_all)()
#define local_flush_cache_mm(mm) BTFIXUP_CALL(local_flush_cache_mm)(mm)
#define local_flush_cache_range(vma,start,end) BTFIXUP_CALL(local_flush_cache_range)(vma,start,end)
#define local_flush_cache_page(vma,addr) BTFIXUP_CALL(local_flush_cache_page)(vma,addr)
BTFIXUPDEF_CALL(void, local_flush_tlb_all, void)
BTFIXUPDEF_CALL(void, local_flush_tlb_mm, struct mm_struct *)
BTFIXUPDEF_CALL(void, local_flush_tlb_range, struct vm_area_struct *, unsigned long, unsigned long)
BTFIXUPDEF_CALL(void, local_flush_tlb_page, struct vm_area_struct *, unsigned long)
#define local_flush_tlb_all() BTFIXUP_CALL(local_flush_tlb_all)()
#define local_flush_tlb_mm(mm) BTFIXUP_CALL(local_flush_tlb_mm)(mm)
#define local_flush_tlb_range(vma,start,end) BTFIXUP_CALL(local_flush_tlb_range)(vma,start,end)
#define local_flush_tlb_page(vma,addr) BTFIXUP_CALL(local_flush_tlb_page)(vma,addr)
BTFIXUPDEF_CALL(void, local_flush_page_to_ram, unsigned long)
BTFIXUPDEF_CALL(void, local_flush_sig_insns, struct mm_struct *, unsigned long)
#define local_flush_page_to_ram(addr) BTFIXUP_CALL(local_flush_page_to_ram)(addr)
#define local_flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(local_flush_sig_insns)(mm,insn_addr)
extern void smp_flush_cache_all(void);
extern void smp_flush_cache_mm(struct mm_struct *mm);
extern void smp_flush_cache_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end);
extern void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
extern void smp_flush_tlb_all(void);
extern void smp_flush_tlb_mm(struct mm_struct *mm);
extern void smp_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end);
extern void smp_flush_tlb_page(struct vm_area_struct *mm, unsigned long page);
extern void smp_flush_page_to_ram(unsigned long page);
extern void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
#endif
BTFIXUPDEF_CALL(void, flush_cache_all, void)
BTFIXUPDEF_CALL(void, flush_cache_mm, struct mm_struct *)
BTFIXUPDEF_CALL(void, flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
BTFIXUPDEF_CALL(void, flush_cache_page, struct vm_area_struct *, unsigned long)
#define flush_cache_all() BTFIXUP_CALL(flush_cache_all)()
#define flush_cache_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
#define flush_cache_range(vma,start,end) BTFIXUP_CALL(flush_cache_range)(vma,start,end)
#define flush_cache_page(vma,addr) BTFIXUP_CALL(flush_cache_page)(vma,addr)
#define flush_icache_range(start, end) do { } while (0)
BTFIXUPDEF_CALL(void, flush_tlb_all, void)
BTFIXUPDEF_CALL(void, flush_tlb_mm, struct mm_struct *)
BTFIXUPDEF_CALL(void, flush_tlb_range, struct vm_area_struct *, unsigned long, unsigned long)
BTFIXUPDEF_CALL(void, flush_tlb_page, struct vm_area_struct *, unsigned long)
extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
{
}
#define flush_tlb_all() BTFIXUP_CALL(flush_tlb_all)()
#define flush_tlb_mm(mm) BTFIXUP_CALL(flush_tlb_mm)(mm)
#define flush_tlb_range(vma,start,end) BTFIXUP_CALL(flush_tlb_range)(vma,start,end)
#define flush_tlb_page(vma,addr) BTFIXUP_CALL(flush_tlb_page)(vma,addr)
BTFIXUPDEF_CALL(void, __flush_page_to_ram, unsigned long)
BTFIXUPDEF_CALL(void, flush_sig_insns, struct mm_struct *, unsigned long)
#define __flush_page_to_ram(addr) BTFIXUP_CALL(__flush_page_to_ram)(addr)
#define flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(flush_sig_insns)(mm,insn_addr)
extern void flush_page_to_ram(struct page *page);
#define flush_dcache_page(page) do { } while (0)
extern struct pgtable_cache_struct { extern struct pgtable_cache_struct {
unsigned long *pgd_cache; unsigned long *pgd_cache;
...@@ -101,7 +23,8 @@ extern struct pgtable_cache_struct { ...@@ -101,7 +23,8 @@ extern struct pgtable_cache_struct {
#define pgtable_cache_size (pgt_quicklists.pgtable_cache_sz) #define pgtable_cache_size (pgt_quicklists.pgtable_cache_sz)
#define pgd_cache_size (pgt_quicklists.pgd_cache_sz) #define pgd_cache_size (pgt_quicklists.pgd_cache_sz)
BTFIXUPDEF_CALL(int, do_check_pgt_cache, int, int) extern void check_pgt_cache(void);
BTFIXUPDEF_CALL(void, do_check_pgt_cache, int, int)
#define do_check_pgt_cache(low,high) BTFIXUP_CALL(do_check_pgt_cache)(low,high) #define do_check_pgt_cache(low,high) BTFIXUP_CALL(do_check_pgt_cache)(low,high)
BTFIXUPDEF_CALL(pgd_t *, get_pgd_fast, void) BTFIXUPDEF_CALL(pgd_t *, get_pgd_fast, void)
...@@ -113,6 +36,8 @@ BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *) ...@@ -113,6 +36,8 @@ BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *)
#define pgd_free(pgd) free_pgd_fast(pgd) #define pgd_free(pgd) free_pgd_fast(pgd)
#define pgd_alloc(mm) get_pgd_fast() #define pgd_alloc(mm) get_pgd_fast()
BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
#define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD) #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
...@@ -127,18 +52,24 @@ BTFIXUPDEF_CALL(void, free_pmd_fast, pmd_t *) ...@@ -127,18 +52,24 @@ BTFIXUPDEF_CALL(void, free_pmd_fast, pmd_t *)
#define free_pmd_fast(pmd) BTFIXUP_CALL(free_pmd_fast)(pmd) #define free_pmd_fast(pmd) BTFIXUP_CALL(free_pmd_fast)(pmd)
#define pmd_free(pmd) free_pmd_fast(pmd) #define pmd_free(pmd) free_pmd_fast(pmd)
#define pmd_free_tlb(tlb, pmd) pmd_free(pmd)
#define pmd_populate(MM, PMD, PTE) pmd_set(PMD, PTE) BTFIXUPDEF_CALL(void, pmd_populate, pmd_t *, struct page *)
#define pmd_populate(MM, PMD, PTE) BTFIXUP_CALL(pmd_populate)(PMD, PTE)
BTFIXUPDEF_CALL(void, pmd_set, pmd_t *, pte_t *)
#define pmd_populate_kernel(MM, PMD, PTE) BTFIXUP_CALL(pmd_set)(PMD, PTE)
BTFIXUPDEF_CALL(pte_t *, pte_alloc_one, struct mm_struct *, unsigned long) BTFIXUPDEF_CALL(struct page *, pte_alloc_one, struct mm_struct *, unsigned long)
#define pte_alloc_one(mm, address) BTFIXUP_CALL(pte_alloc_one)(mm, address) #define pte_alloc_one(mm, address) BTFIXUP_CALL(pte_alloc_one)(mm, address)
BTFIXUPDEF_CALL(pte_t *, pte_alloc_one_kernel, struct mm_struct *, unsigned long)
BTFIXUPDEF_CALL(pte_t *, pte_alloc_one_fast, struct mm_struct *, unsigned long) #define pte_alloc_one_kernel(mm, addr) BTFIXUP_CALL(pte_alloc_one_kernel)(mm, addr)
#define pte_alloc_one_fast(mm, address) BTFIXUP_CALL(pte_alloc_one_fast)(mm, address)
BTFIXUPDEF_CALL(void, free_pte_fast, pte_t *) BTFIXUPDEF_CALL(void, free_pte_fast, pte_t *)
#define free_pte_fast(pte) BTFIXUP_CALL(free_pte_fast)(pte) #define free_pte_fast(pte) BTFIXUP_CALL(free_pte_fast)(pte)
#define pte_free_kernel(pte) free_pte_fast(pte)
#define pte_free(pte) free_pte_fast(pte) BTFIXUPDEF_CALL(void, pte_free, struct page *)
#define pte_free(pte) BTFIXUP_CALL(pte_free)(pte)
#define pte_free_tlb(tlb, pte) pte_free(pte)
#endif /* _SPARC_PGALLOC_H */ #endif /* _SPARC_PGALLOC_H */
...@@ -11,6 +11,8 @@ ...@@ -11,6 +11,8 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
/* XXX This creates many nasty warnings. */
/* #include <linux/highmem.h> */ /* kmap_atomic in pte_offset_map */
#include <asm/asi.h> #include <asm/asi.h>
#ifdef CONFIG_SUN4 #ifdef CONFIG_SUN4
#include <asm/pgtsun4.h> #include <asm/pgtsun4.h>
...@@ -26,6 +28,8 @@ ...@@ -26,6 +28,8 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
struct vm_area_struct;
extern void load_mmu(void); extern void load_mmu(void);
extern unsigned long calc_highpages(void); extern unsigned long calc_highpages(void);
...@@ -183,7 +187,7 @@ extern unsigned long empty_zero_page; ...@@ -183,7 +187,7 @@ extern unsigned long empty_zero_page;
#define BAD_PAGETABLE __bad_pagetable() #define BAD_PAGETABLE __bad_pagetable()
#define BAD_PAGE __bad_page() #define BAD_PAGE __bad_page()
#define ZERO_PAGE(vaddr) (mem_map + (((unsigned long)&empty_zero_page - PAGE_OFFSET + phys_base) >> PAGE_SHIFT)) #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
/* number of bits that fit into a memory pointer */ /* number of bits that fit into a memory pointer */
#define BITS_PER_PTR (8*sizeof(unsigned long)) #define BITS_PER_PTR (8*sizeof(unsigned long))
...@@ -193,7 +197,7 @@ extern unsigned long empty_zero_page; ...@@ -193,7 +197,7 @@ extern unsigned long empty_zero_page;
#define SIZEOF_PTR_LOG2 2 #define SIZEOF_PTR_LOG2 2
BTFIXUPDEF_CALL_CONST(unsigned long, pmd_page, pmd_t) BTFIXUPDEF_CALL_CONST(struct page *, pmd_page, pmd_t)
BTFIXUPDEF_CALL_CONST(unsigned long, pgd_page, pgd_t) BTFIXUPDEF_CALL_CONST(unsigned long, pgd_page, pgd_t)
#define pmd_page(pmd) BTFIXUP_CALL(pmd_page)(pmd) #define pmd_page(pmd) BTFIXUP_CALL(pmd_page)(pmd)
...@@ -291,13 +295,12 @@ BTFIXUPDEF_CALL_CONST(pte_t, pte_mkyoung, pte_t) ...@@ -291,13 +295,12 @@ BTFIXUPDEF_CALL_CONST(pte_t, pte_mkyoung, pte_t)
#define pte_mkyoung(pte) BTFIXUP_CALL(pte_mkyoung)(pte) #define pte_mkyoung(pte) BTFIXUP_CALL(pte_mkyoung)(pte)
#define page_pte_prot(page, prot) mk_pte(page, prot) #define page_pte_prot(page, prot) mk_pte(page, prot)
#define page_pte(page) page_pte_prot(page, __pgprot(0)) #define page_pte(page) mk_pte(page, __pgprot(0))
#define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot)
/* Permanent address of a page. */
#define page_address(page) ((page)->virtual)
BTFIXUPDEF_CALL(struct page *, pte_page, pte_t) BTFIXUPDEF_CALL(unsigned long, pte_pfn, pte_t)
#define pte_page(pte) BTFIXUP_CALL(pte_page)(pte) #define pte_pfn(pte) BTFIXUP_CALL(pte_pfn)(pte)
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
/* /*
* Conversion functions: convert a page and protection to a page entry, * Conversion functions: convert a page and protection to a page entry,
...@@ -312,12 +315,6 @@ BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_io, unsigned long, pgprot_t, int) ...@@ -312,12 +315,6 @@ BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_io, unsigned long, pgprot_t, int)
#define mk_pte_phys(page,pgprot) BTFIXUP_CALL(mk_pte_phys)(page,pgprot) #define mk_pte_phys(page,pgprot) BTFIXUP_CALL(mk_pte_phys)(page,pgprot)
#define mk_pte_io(page,pgprot,space) BTFIXUP_CALL(mk_pte_io)(page,pgprot,space) #define mk_pte_io(page,pgprot,space) BTFIXUP_CALL(mk_pte_io)(page,pgprot,space)
BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
BTFIXUPDEF_CALL(void, pmd_set, pmd_t *, pte_t *)
#define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
#define pmd_set(pmdp,ptep) BTFIXUP_CALL(pmd_set)(pmdp,ptep)
BTFIXUPDEF_INT(pte_modify_mask) BTFIXUPDEF_INT(pte_modify_mask)
extern pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute__((const)); extern pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute__((const));
...@@ -335,21 +332,32 @@ extern __inline__ pte_t pte_modify(pte_t pte, pgprot_t newprot) ...@@ -335,21 +332,32 @@ extern __inline__ pte_t pte_modify(pte_t pte, pgprot_t newprot)
/* to find an entry in a kernel page-table-directory */ /* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address) #define pgd_offset_k(address) pgd_offset(&init_mm, address)
BTFIXUPDEF_CALL(pmd_t *, pmd_offset, pgd_t *, unsigned long)
BTFIXUPDEF_CALL(pte_t *, pte_offset, pmd_t *, unsigned long)
/* Find an entry in the second-level page table.. */ /* Find an entry in the second-level page table.. */
BTFIXUPDEF_CALL(pmd_t *, pmd_offset, pgd_t *, unsigned long)
#define pmd_offset(dir,addr) BTFIXUP_CALL(pmd_offset)(dir,addr) #define pmd_offset(dir,addr) BTFIXUP_CALL(pmd_offset)(dir,addr)
/* Find an entry in the third-level page table.. */ /* Find an entry in the third-level page table.. */
#define pte_offset(dir,addr) BTFIXUP_CALL(pte_offset)(dir,addr) BTFIXUPDEF_CALL(pte_t *, pte_offset_kernel, pmd_t *, unsigned long)
#define pte_offset_kernel(dir,addr) BTFIXUP_CALL(pte_offset_kernel)(dir,addr)
/* __pte_offset is not BTFIXUP-ed, but PTRS_PER_PTE is, so it's ok. */
#define __pte_offset(address) \
(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#if 0 /* XXX Should we expose pmd_page_kernel? */
#define pte_offset_kernel(dir, addr) \
((pte_t *) pmd_page_kernel(*(dir)) + __pte_offset(addr))
#endif
#define pte_offset_map(dir, addr) \
((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + __pte_offset(addr))
#define pte_offset_map_nested(dir, addr) \
((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + __pte_offset(addr))
#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
/* The permissions for pgprot_val to make a page mapped on the obio space */ /* The permissions for pgprot_val to make a page mapped on the obio space */
extern unsigned int pg_iobits; extern unsigned int pg_iobits;
#define flush_icache_page(vma, pg) do { } while(0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
/* Certain architectures need to do special things when pte's /* Certain architectures need to do special things when pte's
* within a page table are directly modified. Thus, the following * within a page table are directly modified. Thus, the following
* hook is made available. * hook is made available.
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#define _SPARC_PGTSRMMU_H #define _SPARC_PGTSRMMU_H
#include <asm/page.h> #include <asm/page.h>
#include <asm/thread_info.h> /* TI_UWINMASK for WINDOW_FLUSH */
/* PMD_SHIFT determines the size of the area a second-level page table can map */ /* PMD_SHIFT determines the size of the area a second-level page table can map */
#define SRMMU_PMD_SHIFT 18 #define SRMMU_PMD_SHIFT 18
...@@ -87,7 +88,7 @@ ...@@ -87,7 +88,7 @@
#define WINDOW_FLUSH(tmp1, tmp2) \ #define WINDOW_FLUSH(tmp1, tmp2) \
mov 0, tmp1; \ mov 0, tmp1; \
98: ld [%g6 + AOFF_task_thread + AOFF_thread_uwinmask], tmp2; \ 98: ld [%g6 + TI_UWINMASK], tmp2; \
orcc %g0, tmp2, %g0; \ orcc %g0, tmp2, %g0; \
add tmp1, 1, tmp1; \ add tmp1, 1, tmp1; \
bne 98b; \ bne 98b; \
......
...@@ -44,6 +44,8 @@ ...@@ -44,6 +44,8 @@
*/ */
#define TASK_SIZE PAGE_OFFSET #define TASK_SIZE PAGE_OFFSET
struct task_struct;
struct fpq { struct fpq {
unsigned long *insn_addr; unsigned long *insn_addr;
unsigned long insn; unsigned long insn;
...@@ -55,15 +57,8 @@ typedef struct { ...@@ -55,15 +57,8 @@ typedef struct {
/* The Sparc processor specific thread struct. */ /* The Sparc processor specific thread struct. */
struct thread_struct { struct thread_struct {
unsigned long uwinmask __attribute__ ((aligned (8)));
struct pt_regs *kregs; struct pt_regs *kregs;
/* Context switch saved kernel state. */
unsigned long ksp __attribute__ ((aligned (8)));
unsigned long kpc;
unsigned long kpsr;
unsigned long kwim;
/* Special child fork kpsr/kwim values. */ /* Special child fork kpsr/kwim values. */
unsigned long fork_kpsr __attribute__ ((aligned (8))); unsigned long fork_kpsr __attribute__ ((aligned (8)));
unsigned long fork_kwim; unsigned long fork_kwim;
...@@ -92,8 +87,8 @@ struct thread_struct { ...@@ -92,8 +87,8 @@ struct thread_struct {
#define SPARC_FLAG_UNALIGNED 0x2 /* is allowed to do unaligned accesses */ #define SPARC_FLAG_UNALIGNED 0x2 /* is allowed to do unaligned accesses */
#define INIT_THREAD { \ #define INIT_THREAD { \
/* uwinmask, kregs, ksp, kpc, kpsr, kwim */ \ /* kregs, */ \
0, 0, 0, 0, 0, 0, \ 0, \
/* fork_kpsr, fork_kwim */ \ /* fork_kpsr, fork_kwim */ \
0, 0, \ 0, 0, \
/* reg_window */ \ /* reg_window */ \
...@@ -115,10 +110,7 @@ struct thread_struct { ...@@ -115,10 +110,7 @@ struct thread_struct {
} }
/* Return saved PC of a blocked thread. */ /* Return saved PC of a blocked thread. */
extern __inline__ unsigned long thread_saved_pc(struct thread_struct *t) extern unsigned long thread_saved_pc(struct task_struct *t);
{
return t->kpc;
}
/* Do necessary setup to start up a newly executed thread. */ /* Do necessary setup to start up a newly executed thread. */
extern __inline__ void start_thread(struct pt_regs * regs, unsigned long pc, extern __inline__ void start_thread(struct pt_regs * regs, unsigned long pc,
...@@ -163,7 +155,7 @@ extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); ...@@ -163,7 +155,7 @@ extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
if (!(__TSK) || (__TSK) == current || \ if (!(__TSK) || (__TSK) == current || \
(__TSK)->state == TASK_RUNNING) \ (__TSK)->state == TASK_RUNNING) \
goto __out; \ goto __out; \
fp = (__TSK)->thread.ksp + bias; \ fp = (__TSK)->thread_info->ksp + bias; \
do { \ do { \
/* Bogus frame pointer? */ \ /* Bogus frame pointer? */ \
if (fp < (task_base + sizeof(struct task_struct)) || \ if (fp < (task_base + sizeof(struct task_struct)) || \
...@@ -185,22 +177,9 @@ __out: __ret; \ ...@@ -185,22 +177,9 @@ __out: __ret; \
#define KSTK_ESP(tsk) ((tsk)->thread.kregs->u_regs[UREG_FP]) #define KSTK_ESP(tsk) ((tsk)->thread.kregs->u_regs[UREG_FP])
#ifdef __KERNEL__ #ifdef __KERNEL__
#define THREAD_SIZE (2*PAGE_SIZE)
extern struct task_struct *last_task_used_math; extern struct task_struct *last_task_used_math;
/* Allocation and freeing of basic task resources. */
BTFIXUPDEF_CALL(struct task_struct *, alloc_task_struct, void)
BTFIXUPDEF_CALL(void, free_task_struct, struct task_struct *)
BTFIXUPDEF_CALL(void, get_task_struct, struct task_struct *)
#define alloc_task_struct() BTFIXUP_CALL(alloc_task_struct)()
#define free_task_struct(tsk) BTFIXUP_CALL(free_task_struct)(tsk)
#define get_task_struct(tsk) BTFIXUP_CALL(get_task_struct)(tsk)
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
#define cpu_relax() do { } while (0) #define cpu_relax() do { } while (0)
#endif #endif
......
...@@ -12,15 +12,7 @@ ...@@ -12,15 +12,7 @@
#include <asm/psr.h> #include <asm/psr.h>
/* #ifdef CONFIG_DEBUG_SPINLOCK
* Define this to use the verbose/debugging versions in
* arch/sparc/lib/debuglocks.c
*
* Be sure to make dep whenever changing this option.
*/
#define SPIN_LOCK_DEBUG
#ifdef SPIN_LOCK_DEBUG
struct _spinlock_debug { struct _spinlock_debug {
unsigned char lock; unsigned char lock;
unsigned long owner_pc; unsigned long owner_pc;
...@@ -36,9 +28,9 @@ extern void _do_spin_lock(spinlock_t *lock, char *str); ...@@ -36,9 +28,9 @@ extern void _do_spin_lock(spinlock_t *lock, char *str);
extern int _spin_trylock(spinlock_t *lock); extern int _spin_trylock(spinlock_t *lock);
extern void _do_spin_unlock(spinlock_t *lock); extern void _do_spin_unlock(spinlock_t *lock);
#define spin_trylock(lp) _spin_trylock(lp) #define _raw_spin_trylock(lp) _spin_trylock(lp)
#define spin_lock(lock) _do_spin_lock(lock, "spin_lock") #define _raw_spin_lock(lock) _do_spin_lock(lock, "spin_lock")
#define spin_unlock(lock) _do_spin_unlock(lock) #define _raw_spin_unlock(lock) _do_spin_unlock(lock)
struct _rwlock_debug { struct _rwlock_debug {
volatile unsigned int lock; volatile unsigned int lock;
...@@ -56,35 +48,35 @@ extern void _do_read_unlock(rwlock_t *rw, char *str); ...@@ -56,35 +48,35 @@ extern void _do_read_unlock(rwlock_t *rw, char *str);
extern void _do_write_lock(rwlock_t *rw, char *str); extern void _do_write_lock(rwlock_t *rw, char *str);
extern void _do_write_unlock(rwlock_t *rw); extern void _do_write_unlock(rwlock_t *rw);
#define read_lock(lock) \ #define _raw_read_lock(lock) \
do { unsigned long flags; \ do { unsigned long flags; \
__save_and_cli(flags); \ __save_and_cli(flags); \
_do_read_lock(lock, "read_lock"); \ _do_read_lock(lock, "read_lock"); \
__restore_flags(flags); \ __restore_flags(flags); \
} while(0) } while(0)
#define read_unlock(lock) \ #define _raw_read_unlock(lock) \
do { unsigned long flags; \ do { unsigned long flags; \
__save_and_cli(flags); \ __save_and_cli(flags); \
_do_read_unlock(lock, "read_unlock"); \ _do_read_unlock(lock, "read_unlock"); \
__restore_flags(flags); \ __restore_flags(flags); \
} while(0) } while(0)
#define write_lock(lock) \ #define _raw_write_lock(lock) \
do { unsigned long flags; \ do { unsigned long flags; \
__save_and_cli(flags); \ __save_and_cli(flags); \
_do_write_lock(lock, "write_lock"); \ _do_write_lock(lock, "write_lock"); \
__restore_flags(flags); \ __restore_flags(flags); \
} while(0) } while(0)
#define write_unlock(lock) \ #define _raw_write_unlock(lock) \
do { unsigned long flags; \ do { unsigned long flags; \
__save_and_cli(flags); \ __save_and_cli(flags); \
_do_write_unlock(lock); \ _do_write_unlock(lock); \
__restore_flags(flags); \ __restore_flags(flags); \
} while(0) } while(0)
#else /* !SPIN_LOCK_DEBUG */ #else /* !CONFIG_DEBUG_SPINLOCK */
typedef unsigned char spinlock_t; typedef unsigned char spinlock_t;
#define SPIN_LOCK_UNLOCKED 0 #define SPIN_LOCK_UNLOCKED 0
...@@ -97,7 +89,7 @@ do { \ ...@@ -97,7 +89,7 @@ do { \
barrier(); \ barrier(); \
} while(*((volatile unsigned char *)lock)) } while(*((volatile unsigned char *)lock))
extern __inline__ void spin_lock(spinlock_t *lock) extern __inline__ void _raw_spin_lock(spinlock_t *lock)
{ {
__asm__ __volatile__( __asm__ __volatile__(
"\n1:\n\t" "\n1:\n\t"
...@@ -117,7 +109,7 @@ extern __inline__ void spin_lock(spinlock_t *lock) ...@@ -117,7 +109,7 @@ extern __inline__ void spin_lock(spinlock_t *lock)
: "g2", "memory", "cc"); : "g2", "memory", "cc");
} }
extern __inline__ int spin_trylock(spinlock_t *lock) extern __inline__ int _raw_spin_trylock(spinlock_t *lock)
{ {
unsigned int result; unsigned int result;
__asm__ __volatile__("ldstub [%1], %0" __asm__ __volatile__("ldstub [%1], %0"
...@@ -127,7 +119,7 @@ extern __inline__ int spin_trylock(spinlock_t *lock) ...@@ -127,7 +119,7 @@ extern __inline__ int spin_trylock(spinlock_t *lock)
return (result == 0); return (result == 0);
} }
extern __inline__ void spin_unlock(spinlock_t *lock) extern __inline__ void _raw_spin_unlock(spinlock_t *lock)
{ {
__asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
} }
...@@ -178,7 +170,7 @@ extern __inline__ void _read_lock(rwlock_t *rw) ...@@ -178,7 +170,7 @@ extern __inline__ void _read_lock(rwlock_t *rw)
: "g2", "g4", "memory", "cc"); : "g2", "g4", "memory", "cc");
} }
#define read_lock(lock) \ #define _raw_read_lock(lock) \
do { unsigned long flags; \ do { unsigned long flags; \
__save_and_cli(flags); \ __save_and_cli(flags); \
_read_lock(lock); \ _read_lock(lock); \
...@@ -198,14 +190,14 @@ extern __inline__ void _read_unlock(rwlock_t *rw) ...@@ -198,14 +190,14 @@ extern __inline__ void _read_unlock(rwlock_t *rw)
: "g2", "g4", "memory", "cc"); : "g2", "g4", "memory", "cc");
} }
#define read_unlock(lock) \ #define _raw_read_unlock(lock) \
do { unsigned long flags; \ do { unsigned long flags; \
__save_and_cli(flags); \ __save_and_cli(flags); \
_read_unlock(lock); \ _read_unlock(lock); \
__restore_flags(flags); \ __restore_flags(flags); \
} while(0) } while(0)
extern __inline__ void write_lock(rwlock_t *rw) extern __inline__ void _raw_write_lock(rwlock_t *rw)
{ {
register rwlock_t *lp asm("g1"); register rwlock_t *lp asm("g1");
lp = rw; lp = rw;
...@@ -218,9 +210,9 @@ extern __inline__ void write_lock(rwlock_t *rw) ...@@ -218,9 +210,9 @@ extern __inline__ void write_lock(rwlock_t *rw)
: "g2", "g4", "memory", "cc"); : "g2", "g4", "memory", "cc");
} }
#define write_unlock(rw) do { (rw)->lock = 0; } while(0) #define _raw_write_unlock(rw) do { (rw)->lock = 0; } while(0)
#endif /* SPIN_LOCK_DEBUG */ #endif /* CONFIG_DEBUG_SPINLOCK */
#endif /* !(__ASSEMBLY__) */ #endif /* !(__ASSEMBLY__) */
......
...@@ -4,19 +4,18 @@ ...@@ -4,19 +4,18 @@
#ifndef __SPARC_SYSTEM_H #ifndef __SPARC_SYSTEM_H
#define __SPARC_SYSTEM_H #define __SPARC_SYSTEM_H
#include <linux/config.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/threads.h> /* NR_CPUS */
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/thread_info.h>
#ifdef __KERNEL__
#include <asm/page.h> #include <asm/page.h>
#include <asm/oplib.h> #include <asm/openprom.h> /* romvec. XXX will be dealt later. Promise. */
#include <asm/psr.h> #include <asm/psr.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/btfixup.h> #include <asm/btfixup.h>
#endif /* __KERNEL__ */
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* /*
...@@ -48,6 +47,8 @@ extern enum sparc_cpu sparc_cpu_model; ...@@ -48,6 +47,8 @@ extern enum sparc_cpu sparc_cpu_model;
#define SUN4M_NCPUS 4 /* Architectural limit of sun4m. */ #define SUN4M_NCPUS 4 /* Architectural limit of sun4m. */
extern struct thread_info *current_set[NR_CPUS];
extern unsigned long empty_bad_page; extern unsigned long empty_bad_page;
extern unsigned long empty_bad_page_table; extern unsigned long empty_bad_page_table;
extern unsigned long empty_zero_page; extern unsigned long empty_zero_page;
...@@ -67,28 +68,43 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr, ...@@ -67,28 +68,43 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
void *fpqueue, unsigned long *fpqdepth); void *fpqueue, unsigned long *fpqdepth);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define SWITCH_ENTER \ #define SWITCH_ENTER(prv) \
if(prev->flags & PF_USEDFPU) { \ do { \
if (test_tsk_thread_flag(prv, TIF_USEDFPU) { \
put_psr(get_psr() | PSR_EF); \ put_psr(get_psr() | PSR_EF); \
fpsave(&prev->thread.float_regs[0], &prev->thread.fsr, \ fpsave(&(prv)->thread.float_regs[0], &(prv)->thread.fsr, \
&prev->thread.fpqueue[0], &prev->thread.fpqdepth); \ &(prv)->thread.fpqueue[0], &(prv)->thread.fpqdepth); \
prev->flags &= ~PF_USEDFPU; \ clear_tsk_thread_flag(prv, TIF_USEDFPU); \
prev->thread.kregs->psr &= ~PSR_EF; \ (prv)->thread.kregs->psr &= ~PSR_EF; \
} } \
} while(0)
#define SWITCH_DO_LAZY_FPU
#define SWITCH_DO_LAZY_FPU(next) /* */
#else #else
#define SWITCH_ENTER #define SWITCH_ENTER(prv) /* */
#define SWITCH_DO_LAZY_FPU if(last_task_used_math != next) next->thread.kregs->psr&=~PSR_EF; #define SWITCH_DO_LAZY_FPU(nxt) \
do { \
if (last_task_used_math != (nxt)) \
(nxt)->thread.kregs->psr&=~PSR_EF; \
} while(0)
#endif #endif
// #define prepare_arch_schedule(prev) task_lock(prev)
// #define finish_arch_schedule(prev) task_unlock(prev)
#define prepare_arch_schedule(prev) do{ }while(0)
#define finish_arch_schedule(prev) do{ }while(0)
/* /*
* Flush windows so that the VM switch which follows * Flush windows so that the VM switch which follows
* would not pull the stack from under us. * would not pull the stack from under us.
* *
* SWITCH_ENTER and SWITH_DO_LAZY_FPU do not work yet (e.g. SMP does not work) * SWITCH_ENTER and SWITH_DO_LAZY_FPU do not work yet (e.g. SMP does not work)
* XXX WTF is the above comment? Found in late teen 2.4.x.
*
* XXX prepare_arch_switch() is much smarter than this in sparc64, are we sure?
* XXX Cosider if doing it the flush_user_windows way is faster (by uwinmask).
*/ */
#define prepare_to_switch() do { \ #define prepare_arch_switch(rq) do { \
__asm__ __volatile__( \ __asm__ __volatile__( \
".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \ ".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \
"save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \ "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
...@@ -96,6 +112,7 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr, ...@@ -96,6 +112,7 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
"save %sp, -0x40, %sp\n\t" \ "save %sp, -0x40, %sp\n\t" \
"restore; restore; restore; restore; restore; restore; restore"); \ "restore; restore; restore; restore; restore; restore; restore"); \
} while(0) } while(0)
#define finish_arch_switch(rq) do{ }while(0)
/* Much care has gone into this code, do not touch it. /* Much care has gone into this code, do not touch it.
* *
...@@ -111,9 +128,8 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr, ...@@ -111,9 +128,8 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
#define switch_to(prev, next, last) do { \ #define switch_to(prev, next, last) do { \
__label__ here; \ __label__ here; \
register unsigned long task_pc asm("o7"); \ register unsigned long task_pc asm("o7"); \
extern struct task_struct *current_set[NR_CPUS]; \ SWITCH_ENTER(prev); \
SWITCH_ENTER \ SWITCH_DO_LAZY_FPU(next); \
SWITCH_DO_LAZY_FPU \
next->active_mm->cpu_vm_mask |= (1 << smp_processor_id()); \ next->active_mm->cpu_vm_mask |= (1 << smp_processor_id()); \
task_pc = ((unsigned long) &&here) - 0x8; \ task_pc = ((unsigned long) &&here) - 0x8; \
__asm__ __volatile__( \ __asm__ __volatile__( \
...@@ -140,11 +156,13 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr, ...@@ -140,11 +156,13 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
"nop\n\t" \ "nop\n\t" \
"nop\n\t" \ "nop\n\t" \
"jmpl %%o7 + 0x8, %%g0\n\t" \ "jmpl %%o7 + 0x8, %%g0\n\t" \
" mov %%g3, %0\n\t" \ " ld [%%g3 + %5], %0\n\t" \
: "=&r" (last) \ : "=&r" (last) \
: "r" (&(current_set[hard_smp_processor_id()])), "r" (next), \ : "r" (&(current_set[hard_smp_processor_id()])), \
"i" ((const unsigned long)(&((struct task_struct *)0)->thread.kpsr)), \ "r" ((next)->thread_info), \
"i" ((const unsigned long)(&((struct task_struct *)0)->thread.ksp)), \ "i" (TI_KPSR), \
"i" (TI_KSP), \
"i" (TI_TASK), \
"r" (task_pc) \ "r" (task_pc) \
: "g1", "g2", "g3", "g4", "g5", "g7", "l0", "l1", \ : "g1", "g2", "g3", "g4", "g5", "g7", "l0", "l1", \
"l4", "l5", "l6", "l7", "i0", "i1", "i2", "i3", "i4", "i5", "o0", "o1", "o2", \ "l4", "l5", "l6", "l7", "i0", "i1", "i2", "i3", "i4", "i5", "o0", "o1", "o2", \
......
/*
* thread_info.h: sparc low-level thread information
* adapted from the ppc version by Pete Zaitcev, which was
* adapted from the i386 version by Paul Mackerras
*
* Copyright (C) 2002 David Howells (dhowells@redhat.com)
* Copyright (c) 2002 Pete Zaitcev (zaitcev@yahoo.com)
* - Incorporating suggestions made by Linus Torvalds and Dave Miller
*/
#ifndef _ASM_THREAD_INFO_H
#define _ASM_THREAD_INFO_H
// XXX todo: comment thread_info components and see what breaks.
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <asm/btfixup.h>
/*
* Low level task data.
*
* If you change this, change the TI_* offsets below to match. XXX check_asm.
*
* The uwinmask is a first class citizen among w_saved and friends.
* XXX Is this a good idea? wof.S/wuf.S have to use w_saved anyway,
* so they waste a register on current, and an ld on fetching it.
*/
struct thread_info {
unsigned long uwinmask;
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
int cpu; /* cpu we're on */
int preempt_count;
int softirq_count;
int hardirq_count;
/* Context switch saved kernel state. */
unsigned long ksp; /* ... ksp __attribute__ ((aligned (8))); */
unsigned long kpc;
unsigned long kpsr;
unsigned long kwim;
};
/*
* macros/functions for gaining access to the thread information structure
*/
#define INIT_THREAD_INFO(tsk) \
{ \
uwinmask: 0, \
task: &tsk, \
exec_domain: &default_exec_domain, \
flags: 0, \
cpu: 0, \
}
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
/* how to get the thread information struct from C */
register struct thread_info *current_thread_info_reg asm("g6");
#define current_thread_info() (current_thread_info_reg)
/*
* thread information allocation
*/
#ifdef CONFIG_SUN4
#define THREAD_INFO_ORDER 0
#else
#define THREAD_INFO_ORDER 1
#endif
BTFIXUPDEF_CALL(struct thread_info *, alloc_thread_info, void)
#define alloc_thread_info() BTFIXUP_CALL(alloc_thread_info)()
BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
#define free_thread_info(ti) BTFIXUP_CALL(free_thread_info)(ti)
#define get_thread_info(ti) get_task_struct((ti)->task)
#define put_thread_info(ti) put_task_struct((ti)->task)
#endif /* __ASSEMBLY__ */
/*
* Size of kernel stack for each process.
* Observe the order of get_free_pages() in alloc_thread_info().
* The sun4 has 8K stack too, because it's short on memory, and 16K is a waste.
*
* XXX Watch how INIT_THREAD_SIZE evolves in linux/sched.h and elsewhere.
* On 2.5.24 it happens to match 8192 magically.
*/
#define THREAD_SIZE 8192
/*
* Offsets in thread_info structure, used in assembly code
*/
#define TI_UWINMASK 0x00 /* uwinmask */
#define TI_TASK 0x04
#define TI_EXECDOMAIN 0x08 /* exec_domain */
#define TI_FLAGS 0x0c
#define TI_CPU 0x10
#define TI_PREEMPT 0x14 /* preempt_count */
#define TI_SOFTIRQ 0x18 /* softirq_count */
#define TI_HARDIRQ 0x1c /* hardirq_count */
#define TI_KSP 0x20 /* ksp */
#define TI_KPC 0x24 /* kpc (ldd'ed with kpc) */
#define TI_KPSR 0x28 /* kpsr */
#define TI_KWIM 0x2c /* kwim (ldd'ed with kpsr) */
#define PREEMPT_ACTIVE 0x4000000
/*
* thread information flag bit numbers
*/
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_USEDFPU 8 /* FPU was used by this task
* this quantum (SMP) */
#define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling
* TIF_NEED_RESCHED */
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_USEDFPU (1<<TIF_USEDFPU)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#endif /* __KERNEL__ */
#endif /* _ASM_THREAD_INFO_H */
#ifndef _SPARC_TLB_H
#define _SPARC_TLB_H
#define tlb_start_vma(tlb, vma) \
do { \
flush_cache_range(vma, vma->vm_start, vma->vm_end); \
} while (0)
#define tlb_end_vma(tlb, vma) \
do { \
flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
} while (0)
#define tlb_remove_tlb_entry(tlb, pte, address) \
do { } while (0)
#define tlb_flush(tlb) \
do { \
flush_tlb_mm((tlb)->mm); \
} while (0)
#include <asm-generic/tlb.h> #include <asm-generic/tlb.h>
#endif /* _SPARC_TLB_H */
#ifndef _SPARC_TLBFLUSH_H
#define _SPARC_TLBFLUSH_H
#include <linux/config.h>
#include <linux/mm.h>
// #include <asm/processor.h>
/*
* TLB flushing:
*
* - flush_tlb() flushes the current mm struct TLBs XXX Exists?
* - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*/
#ifdef CONFIG_SMP
BTFIXUPDEF_CALL(void, local_flush_tlb_all, void)
BTFIXUPDEF_CALL(void, local_flush_tlb_mm, struct mm_struct *)
BTFIXUPDEF_CALL(void, local_flush_tlb_range, struct vm_area_struct *, unsigned long, unsigned long)
BTFIXUPDEF_CALL(void, local_flush_tlb_page, struct vm_area_struct *, unsigned long)
#define local_flush_tlb_all() BTFIXUP_CALL(local_flush_tlb_all)()
#define local_flush_tlb_mm(mm) BTFIXUP_CALL(local_flush_tlb_mm)(mm)
#define local_flush_tlb_range(vma,start,end) BTFIXUP_CALL(local_flush_tlb_range)(vma,start,end)
#define local_flush_tlb_page(vma,addr) BTFIXUP_CALL(local_flush_tlb_page)(vma,addr)
extern void smp_flush_tlb_all(void);
extern void smp_flush_tlb_mm(struct mm_struct *mm);
extern void smp_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end);
extern void smp_flush_tlb_page(struct vm_area_struct *mm, unsigned long page);
#endif /* CONFIG_SMP */
BTFIXUPDEF_CALL(void, flush_tlb_all, void)
BTFIXUPDEF_CALL(void, flush_tlb_mm, struct mm_struct *)
BTFIXUPDEF_CALL(void, flush_tlb_range, struct vm_area_struct *, unsigned long, unsigned long)
BTFIXUPDEF_CALL(void, flush_tlb_page, struct vm_area_struct *, unsigned long)
// Thanks to Anton Blanchard, our pagetables became uncached in 2.4. Wee!
// extern void flush_tlb_pgtables(struct mm_struct *mm,
// unsigned long start, unsigned long end);
#define flush_tlb_pgtables(mm, start, end) do{ }while(0)
#define flush_tlb_all() BTFIXUP_CALL(flush_tlb_all)()
#define flush_tlb_mm(mm) BTFIXUP_CALL(flush_tlb_mm)(mm)
#define flush_tlb_range(vma,start,end) BTFIXUP_CALL(flush_tlb_range)(vma,start,end)
#define flush_tlb_page(vma,addr) BTFIXUP_CALL(flush_tlb_page)(vma,addr)
// #define flush_tlb() flush_tlb_mm(current->active_mm) /* XXX Sure? */
/*
* This is a kludge, until I know better. --zaitcev XXX
*/
#define flush_tlb_kernel_range(start, end) flush_tlb_all()
#endif /* _SPARC_TLBFLUSH_H */
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/errno.h>
#include <asm/vac-ops.h> #include <asm/vac-ops.h>
#include <asm/a.out.h> #include <asm/a.out.h>
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment