Commit 43f6fba8 authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/home/davem/BK/davem-2.5

into home.transmeta.com:/home/torvalds/v2.5/linux
parents c1378606 6421563d
/* $Id: signal.c,v 1.109 2001/12/21 01:22:31 davem Exp $
/* $Id: signal.c,v 1.110 2002/02/08 03:57:14 davem Exp $
* linux/arch/sparc/kernel/signal.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
......
/* $Id: sys_sunos.c,v 1.136 2002/01/08 16:00:14 davem Exp $
/* $Id: sys_sunos.c,v 1.137 2002/02/08 03:57:14 davem Exp $
* sys_sunos.c: SunOS specific syscall compatibility support.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
......
/* $Id: systbls.S,v 1.102 2002/01/31 03:30:05 davem Exp $
/* $Id: systbls.S,v 1.103 2002/02/08 03:57:14 davem Exp $
* systbls.S: System call entry point tables for OS compatibility.
* The native Linux system call table lives here also.
*
......@@ -55,7 +55,7 @@ sys_call_table:
/*170*/ .long sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys_getdents
/*175*/ .long sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr
/*180*/ .long sys_flistxattr, sys_removexattr, sys_lremovexattr, sys_sigpending, sys_query_module
/*185*/ .long sys_setpgid, sys_fremovexattr, sys_nis_syscall, sys_nis_syscall, sys_newuname
/*185*/ .long sys_setpgid, sys_fremovexattr, sys_tkill, sys_nis_syscall, sys_newuname
/*190*/ .long sys_init_module, sys_personality, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
/*195*/ .long sys_nis_syscall, sys_nis_syscall, sys_getppid, sparc_sigaction, sys_sgetmask
/*200*/ .long sys_ssetmask, sys_sigsuspend, sys_newlstat, sys_uselib, old_readdir
......@@ -70,7 +70,7 @@ sys_call_table:
/*240*/ .long sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
/*245*/ .long sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep
/*250*/ .long sparc_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
/*255*/ .long sys_nis_syscall, sys_nis_syscall, sys_tkill
/*255*/ .long sys_nis_syscall, sys_nis_syscall
#ifdef CONFIG_SUNOS_EMUL
/* Now the SunOS syscall table. */
......
......@@ -25,6 +25,16 @@ CONFIG_SMP
If you don't know what to do here, say N.
CONFIG_PREEMPT
This option reduces the latency of the kernel when reacting to
real-time or interactive events by allowing a low priority process to
be preempted even if it is in kernel mode executing a system call.
This allows applications to run more reliably even when the system is
under load.
Say Y here if you are building a kernel for a desktop, embedded
or real-time system. Say N if you are unsure.
CONFIG_SPARC64
SPARC is a family of RISC microprocessors designed and marketed by
Sun Microsystems, incorporated. This port covers the newer 64-bit
......
# $Id: Makefile,v 1.51 2001/11/17 00:15:27 davem Exp $
# $Id: Makefile,v 1.52 2002/02/09 19:49:31 davem Exp $
# sparc64/Makefile
#
# Makefile for the architecture dependent flags and dependencies on the
......@@ -79,12 +79,8 @@ archclean:
rm -f $(TOPDIR)/vmlinux.aout
archmrproper:
rm -f $(TOPDIR)/include/asm-sparc64/asm_offsets.h
archdep: check_asm
check_asm: include/linux/version.h
$(MAKE) -C arch/sparc64/kernel check_asm
archdep:
tftpboot.img:
$(MAKE) -C arch/sparc64/boot tftpboot.img
......@@ -15,6 +15,7 @@ define_bool CONFIG_VT y
define_bool CONFIG_VT_CONSOLE y
bool 'Symmetric multi-processing support' CONFIG_SMP
bool 'Preemptible kernel' CONFIG_PREEMPT
# Identify this as a Sparc64 build
define_bool CONFIG_SPARC64 y
......
......@@ -2,6 +2,11 @@
# Automatically generated make config: don't edit
#
#
# Code maturity level options
#
CONFIG_EXPERIMENTAL=y
#
# General setup
#
......@@ -10,11 +15,6 @@ CONFIG_SYSVIPC=y
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
#
# Code maturity level options
#
CONFIG_EXPERIMENTAL=y
#
# Loadable module support
#
......@@ -456,7 +456,6 @@ CONFIG_SUNLANCE=y
CONFIG_HAPPYMEAL=y
CONFIG_SUNBMAC=m
CONFIG_SUNQE=m
CONFIG_SUNLANCE=y
CONFIG_SUNGEM=y
CONFIG_NET_VENDOR_3COM=y
# CONFIG_EL1 is not set
......@@ -487,6 +486,7 @@ CONFIG_EEPRO100=m
# CONFIG_LNE390 is not set
CONFIG_FEALNX=m
CONFIG_NATSEMI=m
# CONFIG_NATSEMI_CABLE_MAGIC is not set
CONFIG_NE2K_PCI=m
# CONFIG_NE3210 is not set
# CONFIG_ES3210 is not set
......@@ -495,6 +495,7 @@ CONFIG_8139TOO=m
# CONFIG_8139TOO_PIO is not set
# CONFIG_8139TOO_TUNE_TWISTER is not set
# CONFIG_8139TOO_8129 is not set
# CONFIG_8139_NEW_RX_RESET is not set
CONFIG_SIS900=m
CONFIG_EPIC100=m
CONFIG_SUNDANCE=m
......@@ -575,7 +576,7 @@ CONFIG_DRM_TDFX=m
# CONFIG_DRM_R128 is not set
#
# Input core support
# Input device support
#
CONFIG_INPUT=y
CONFIG_INPUT_KEYBDEV=y
......@@ -584,6 +585,36 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
# CONFIG_INPUT_JOYDEV is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_GAMEPORT is not set
CONFIG_SOUND_GAMEPORT=y
# CONFIG_GAMEPORT_NS558 is not set
# CONFIG_GAMEPORT_L4 is not set
# CONFIG_INPUT_EMU10K1 is not set
# CONFIG_GAMEPORT_PCIGAME is not set
# CONFIG_GAMEPORT_FM801 is not set
# CONFIG_GAMEPORT_CS461x is not set
# CONFIG_SERIO is not set
# CONFIG_SERIO_SERPORT is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_JOYSTICK_ANALOG is not set
# CONFIG_JOYSTICK_A3D is not set
# CONFIG_JOYSTICK_ADI is not set
# CONFIG_JOYSTICK_COBRA is not set
# CONFIG_JOYSTICK_GF2K is not set
# CONFIG_JOYSTICK_GRIP is not set
# CONFIG_JOYSTICK_INTERACT is not set
# CONFIG_JOYSTICK_SIDEWINDER is not set
# CONFIG_JOYSTICK_TMDC is not set
# CONFIG_JOYSTICK_IFORCE_USB is not set
# CONFIG_JOYSTICK_IFORCE_232 is not set
# CONFIG_JOYSTICK_WARRIOR is not set
# CONFIG_JOYSTICK_MAGELLAN is not set
# CONFIG_JOYSTICK_SPACEORB is not set
# CONFIG_JOYSTICK_SPACEBALL is not set
# CONFIG_JOYSTICK_STINGER is not set
# CONFIG_JOYSTICK_DB9 is not set
# CONFIG_JOYSTICK_GAMECON is not set
# CONFIG_JOYSTICK_TURBOGRAFX is not set
#
# File systems
......@@ -639,7 +670,7 @@ CONFIG_UFS_FS_WRITE=y
# Network File Systems
#
CONFIG_CODA_FS=m
CONFIG_INTERMEZZO_FS=m
# CONFIG_INTERMEZZO_FS is not set
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
# CONFIG_ROOT_NFS is not set
......@@ -659,7 +690,6 @@ CONFIG_LOCKD_V4=y
# CONFIG_NCPFS_NLS is not set
# CONFIG_NCPFS_EXTRAS is not set
# CONFIG_ZISOFS_FS is not set
# CONFIG_ZLIB_FS_INFLATE is not set
#
# Partition Types
......@@ -887,3 +917,5 @@ CONFIG_MAGIC_SYSRQ=y
# Library routines
#
CONFIG_CRC32=y
# CONFIG_ZLIB_INFLATE is not set
# CONFIG_ZLIB_DEFLATE is not set
# $Id: Makefile,v 1.69 2001/11/19 04:09:53 davem Exp $
# $Id: Makefile,v 1.70 2002/02/09 19:49:30 davem Exp $
# Makefile for the linux kernel.
#
# Note! Dependencies are done automagically by 'make dep', which also
......@@ -56,118 +56,4 @@ else
CMODEL_CFLAG := -m64 -mcmodel=medlow
endif
check_asm: dummy
@if [ ! -r $(HPATH)/asm/asm_offsets.h ] ; then \
touch $(HPATH)/asm/asm_offsets.h ; \
fi
@echo "/* Automatically generated. Do not edit. */" > asm_offsets.h
@echo "#ifndef __ASM_OFFSETS_H__" >> asm_offsets.h
@echo -e "#define __ASM_OFFSETS_H__\n" >> asm_offsets.h
@echo -e "#include <linux/config.h>\n" >> asm_offsets.h
@echo -e "#ifndef CONFIG_SMP\n" >> asm_offsets.h
@echo "#include <linux/config.h>" > tmp.c
@echo "#undef CONFIG_SMP" >> tmp.c
@echo "#include <linux/sched.h>" >> tmp.c
$(CPP) $(CPPFLAGS) -P tmp.c -o tmp.i
@echo "/* Automatically generated. Do not edit. */" > check_asm_data.c
@echo "#include <linux/config.h>" >> check_asm_data.c
@echo "#undef CONFIG_SMP" >> check_asm_data.c
@echo "#include <linux/sched.h>" >> check_asm_data.c
@echo 'unsigned int check_asm_data[] = {' >> check_asm_data.c
$(SH) ./check_asm.sh -data task tmp.i check_asm_data.c
$(SH) ./check_asm.sh -data mm tmp.i check_asm_data.c
$(SH) ./check_asm.sh -data thread tmp.i check_asm_data.c
@echo '};' >> check_asm_data.c
$(CC) $(CPPFLAGS) $(CMODEL_CFLAG) -ffixed-g4 -S -o check_asm_data.s check_asm_data.c
@echo "/* Automatically generated. Do not edit. */" > check_asm.c
@echo 'extern int printf(const char *fmt, ...);' >>check_asm.c
@echo 'unsigned int check_asm_data[] = {' >> check_asm.c
$(SH) ./check_asm.sh -ints check_asm_data.s check_asm.c
@echo '};' >> check_asm.c
@echo 'int main(void) {' >> check_asm.c
@echo 'int i = 0;' >> check_asm.c
$(SH) ./check_asm.sh -printf task tmp.i check_asm.c
$(SH) ./check_asm.sh -printf mm tmp.i check_asm.c
$(SH) ./check_asm.sh -printf thread tmp.i check_asm.c
@echo 'return 0; }' >> check_asm.c
@rm -f tmp.[ci] check_asm_data.[cs]
$(HOSTCC) -o check_asm check_asm.c
./check_asm >> asm_offsets.h
@rm -f check_asm check_asm.c
@echo -e "\n#else /* CONFIG_SMP */\n" >> asm_offsets.h
@echo -e "#ifndef CONFIG_DEBUG_SPINLOCK\n" >>asm_offsets.h
@echo "#include <linux/config.h>" > tmp.c
@echo "#undef CONFIG_SMP" >> tmp.c
@echo "#define CONFIG_SMP 1" >> tmp.c
@echo "#include <linux/sched.h>" >> tmp.c
$(CPP) $(CPPFLAGS) -P tmp.c -o tmp.i
@echo "/* Automatically generated. Do not edit. */" > check_asm_data.c
@echo "#include <linux/config.h>" >> check_asm_data.c
@echo "#undef CONFIG_SMP" >> check_asm_data.c
@echo "#define CONFIG_SMP 1" >> check_asm_data.c
@echo "#include <linux/sched.h>" >> check_asm_data.c
@echo 'unsigned int check_asm_data[] = {' >> check_asm_data.c
$(SH) ./check_asm.sh -data task tmp.i check_asm_data.c
$(SH) ./check_asm.sh -data mm tmp.i check_asm_data.c
$(SH) ./check_asm.sh -data thread tmp.i check_asm_data.c
@echo '};' >> check_asm_data.c
$(CC) $(CPPFLAGS) $(CMODEL_CFLAG) -ffixed-g4 -S -o check_asm_data.s check_asm_data.c
@echo "/* Automatically generated. Do not edit. */" > check_asm.c
@echo 'extern int printf(const char *fmt, ...);' >>check_asm.c
@echo 'unsigned int check_asm_data[] = {' >> check_asm.c
$(SH) ./check_asm.sh -ints check_asm_data.s check_asm.c
@echo '};' >> check_asm.c
@echo 'int main(void) {' >> check_asm.c
@echo 'int i = 0;' >> check_asm.c
$(SH) ./check_asm.sh -printf task tmp.i check_asm.c
$(SH) ./check_asm.sh -printf mm tmp.i check_asm.c
$(SH) ./check_asm.sh -printf thread tmp.i check_asm.c
@echo 'return 0; }' >> check_asm.c
@rm -f tmp.[ci] check_asm_data.[cs]
$(HOSTCC) -o check_asm check_asm.c
./check_asm >> asm_offsets.h
@rm -f check_asm check_asm.c
@echo -e "\n#else /* CONFIG_DEBUG_SPINLOCK */\n" >> asm_offsets.h
@echo "#include <linux/sched.h>" > tmp.c
$(CPP) $(CPPFLAGS) -P -DCONFIG_DEBUG_SPINLOCK tmp.c -o tmp.i
@echo "/* Automatically generated. Do not edit. */" > check_asm_data.c
@echo "#include <linux/config.h>" >> check_asm_data.c
@echo "#undef CONFIG_SMP" >> check_asm_data.c
@echo "#define CONFIG_SMP 1" >> check_asm_data.c
@echo "#include <linux/sched.h>" >> check_asm_data.c
@echo 'unsigned int check_asm_data[] = {' >> check_asm_data.c
$(SH) ./check_asm.sh -data task tmp.i check_asm_data.c
$(SH) ./check_asm.sh -data mm tmp.i check_asm_data.c
$(SH) ./check_asm.sh -data thread tmp.i check_asm_data.c
@echo '};' >> check_asm_data.c
$(CC) $(CPPFLAGS) -DCONFIG_DEBUG_SPINLOCK $(CMODEL_CFLAG) -ffixed-g4 -S -o check_asm_data.s check_asm_data.c
@echo "/* Automatically generated. Do not edit. */" > check_asm.c
@echo 'extern int printf(const char *fmt, ...);' >>check_asm.c
@echo 'unsigned int check_asm_data[] = {' >> check_asm.c
$(SH) ./check_asm.sh -ints check_asm_data.s check_asm.c
@echo '};' >> check_asm.c
@echo 'int main(void) {' >> check_asm.c
@echo 'int i = 0;' >> check_asm.c
$(SH) ./check_asm.sh -printf task tmp.i check_asm.c
$(SH) ./check_asm.sh -printf mm tmp.i check_asm.c
$(SH) ./check_asm.sh -printf thread tmp.i check_asm.c
@echo 'return 0; }' >> check_asm.c
@rm -f tmp.[ci] check_asm_data.[cs]
$(HOSTCC) -o check_asm check_asm.c
./check_asm >> asm_offsets.h
@rm -f check_asm check_asm.c
@echo -e "#endif /* CONFIG_DEBUG_SPINLOCK */\n" >> asm_offsets.h
@echo -e "#endif /* CONFIG_SMP */\n" >> asm_offsets.h
@echo "#endif /* __ASM_OFFSETS_H__ */" >> asm_offsets.h
@if test -r $(HPATH)/asm/asm_offsets.h; then \
if cmp -s asm_offsets.h $(HPATH)/asm/asm_offsets.h; then \
echo $(HPATH)/asm/asm_offsets.h is unchanged; \
rm -f asm_offsets.h; \
else \
mv -f asm_offsets.h $(HPATH)/asm/asm_offsets.h; \
fi; \
else \
mv -f asm_offsets.h $(HPATH)/asm/asm_offsets.h; \
fi
include $(TOPDIR)/Rules.make
......@@ -314,7 +314,7 @@ static int load_aout32_binary(struct linux_binprm * bprm, struct pt_regs * regs)
current->mm->start_stack =
(unsigned long) create_aout32_tables((char *)bprm->p, bprm);
if (!(current->thread.flags & SPARC_FLAG_32BIT)) {
if (!(test_thread_flag(TIF_32BIT))) {
unsigned long pgd_cache;
pgd_cache = ((unsigned long)current->mm->pgd[0])<<11UL;
......@@ -323,7 +323,7 @@ static int load_aout32_binary(struct linux_binprm * bprm, struct pt_regs * regs)
: /* no outputs */
: "r" (pgd_cache),
"r" (TSB_REG), "i" (ASI_DMMU));
current->thread.flags |= SPARC_FLAG_32BIT;
set_thread_flag(TIF_32BIT);
}
start_thread32(regs, ex.a_entry, current->mm->start_stack);
if (current->ptrace & PT_PTRACED)
......
......@@ -149,7 +149,7 @@ struct elf_prpsinfo32
#ifdef CONFIG_BINFMT_ELF32_MODULE
#define CONFIG_BINFMT_ELF_MODULE CONFIG_BINFMT_ELF32_MODULE
#endif
#define ELF_FLAGS_INIT current->thread.flags |= SPARC_FLAG_32BIT
#define ELF_FLAGS_INIT set_thread_flag(TIF_32BIT)
MODULE_DESCRIPTION("Binary format loader for compatibility with 32bit SparcLinux binaries on the Ultra");
MODULE_AUTHOR("Eric Youngdale, David S. Miller, Jakub Jelinek");
......
#!/bin/sh
case $1 in
-printf)
sed -n -e '/^#/d;/struct[ ]*'$2'_struct[ ]*{/,/};/p' < $3 | sed '/struct[ ]*'$2'_struct[ ]*{/d;/:[0-9]*[ ]*;/d;/^[ ]*$/d;/};/d;s/^[ ]*//;s/volatile[ ]*//;s/\(unsigned\|signed\|struct\)[ ]*//;s/\(\[\|__attribute__\).*;[ ]*$//;s/(\*//;s/)(.*)//;s/;[ ]*$//;s/^[^ ]*[ ]*//;s/,/\
/g' | sed 's/^[ *]*//;s/[ ]*$//;s/^.*$/printf ("#define AOFF_'$2'_\0 0x%08x\\n", check_asm_data[i++]); printf("#define ASIZ_'$2'_\0 0x%08x\\n", check_asm_data[i++]);/' >> $4
echo "printf (\"#define ASIZ_$2\\t0x%08x\\n\", check_asm_data[i++]);" >> $4
;;
-data)
sed -n -e '/^#/d;/struct[ ]*'$2'_struct[ ]*{/,/};/p' < $3 | sed '/struct[ ]*'$2'_struct[ ]*{/d;/:[0-9]*[ ]*;/d;/^[ ]*$/d;/};/d;s/^[ ]*//;s/volatile[ ]*//;s/\(unsigned\|signed\|struct\)[ ]*//;s/\(\[\|__attribute__\).*;[ ]*$//;s/(\*//;s/)(.*)//;s/;[ ]*$//;s/^[^ ]*[ ]*//;s/,/\
/g' | sed 's/^[ *]*//;s/[ ]*$//;s/^.*$/ ((char *)\&((struct '$2'_struct *)0)->\0) - ((char *)((struct '$2'_struct *)0)), sizeof(((struct '$2'_struct *)0)->\0),/' >> $4
echo " sizeof(struct $2_struct)," >> $4
;;
-ints)
sed -n -e '/check_asm_data:/,/\.size/p' <$2 | sed -e 's/check_asm_data://' -e 's/\.size.*//' -e 's/\.long[ ]\([0-9]*\)/\1,/' | grep -v '\.ident' >>$3
;;
*)
exit 1
;;
esac
exit 0
/* $Id: entry.S,v 1.142 2002/01/31 03:30:06 davem Exp $
/* $Id: entry.S,v 1.144 2002/02/09 19:49:30 davem Exp $
* arch/sparc64/kernel/entry.S: Sparc64 trap low-level entry points.
*
* Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
......@@ -151,12 +151,12 @@ do_fpdis:
add %g0, %g0, %g0
ba,a,pt %xcc, rtrap_clr_l6
1: ldub [%g6 + AOFF_task_thread + AOFF_thread_fpsaved], %g5 ! Load Group
1: ldub [%g6 + TI_FPSAVED], %g5 ! Load Group
wr %g0, FPRS_FEF, %fprs ! LSU Group+4bubbles
andcc %g5, FPRS_FEF, %g0 ! IEU1 Group
be,a,pt %icc, 1f ! CTI
clr %g7 ! IEU0
ldx [%g6 + AOFF_task_thread + AOFF_thread_gsr], %g7 ! Load Group
ldx [%g6 + TI_GSR], %g7 ! Load Group
1: andcc %g5, FPRS_DL, %g0 ! IEU1
bne,pn %icc, 2f ! CTI
fzero %f0 ! FPA
......@@ -194,11 +194,11 @@ do_fpdis:
b,pt %xcc, fpdis_exit2
faddd %f0, %f2, %f60
1: mov SECONDARY_CONTEXT, %g3
add %g6, AOFF_task_fpregs + 0x80, %g1
add %g6, TI_FPREGS + 0x80, %g1
faddd %f0, %f2, %f4
fmuld %f0, %f2, %f6
ldxa [%g3] ASI_DMMU, %g5
add %g6, AOFF_task_fpregs + 0xc0, %g2
add %g6, TI_FPREGS + 0xc0, %g2
stxa %g0, [%g3] ASI_DMMU
membar #Sync
faddd %f0, %f2, %f8
......@@ -223,10 +223,10 @@ do_fpdis:
mov SECONDARY_CONTEXT, %g3
fzero %f34
ldxa [%g3] ASI_DMMU, %g5
add %g6, AOFF_task_fpregs, %g1
add %g6, TI_FPREGS, %g1
stxa %g0, [%g3] ASI_DMMU
membar #Sync
add %g6, AOFF_task_fpregs + 0x40, %g2
add %g6, TI_FPREGS + 0x40, %g2
faddd %f32, %f34, %f36
fmuld %f32, %f34, %f38
ldda [%g1] ASI_BLK_S, %f0 ! grrr, where is ASI_BLK_NUCLEUS 8-(
......@@ -246,7 +246,7 @@ do_fpdis:
ba,pt %xcc, fpdis_exit
membar #Sync
3: mov SECONDARY_CONTEXT, %g3
add %g6, AOFF_task_fpregs, %g1
add %g6, TI_FPREGS, %g1
ldxa [%g3] ASI_DMMU, %g5
mov 0x40, %g2
stxa %g0, [%g3] ASI_DMMU
......@@ -262,7 +262,7 @@ fpdis_exit:
membar #Sync
fpdis_exit2:
wr %g7, 0, %gsr
ldx [%g6 + AOFF_task_thread + AOFF_thread_xfsr], %fsr
ldx [%g6 + TI_XFSR], %fsr
rdpr %tstate, %g3
or %g3, %g4, %g3 ! anal...
wrpr %g3, %tstate
......@@ -285,12 +285,12 @@ do_fpother_check_fitos:
/* NOTE: Need to preserve %g7 until we fully commit
* to the fitos fixup.
*/
stx %fsr, [%g6 + AOFF_task_thread + AOFF_thread_xfsr]
stx %fsr, [%g6 + TI_XFSR]
rdpr %tstate, %g3
andcc %g3, TSTATE_PRIV, %g0
bne,pn %xcc, do_fptrap_after_fsr
nop
ldx [%g6 + AOFF_task_thread + AOFF_thread_xfsr], %g3
ldx [%g6 + TI_XFSR], %g3
srlx %g3, 14, %g1
and %g1, 7, %g1
cmp %g1, 2 ! Unfinished FP-OP
......@@ -310,7 +310,7 @@ do_fpother_check_fitos:
cmp %g1, %g2
bne,pn %xcc, do_fptrap_after_fsr
nop
std %f62, [%g6 + AOFF_task_fpregs + (62 * 4)]
std %f62, [%g6 + TI_FPREGS + (62 * 4)]
sethi %hi(fitos_table_1), %g1
and %g3, 0x1f, %g2
or %g1, %lo(fitos_table_1), %g1
......@@ -396,22 +396,22 @@ fitos_table_2:
fdtos %f62, %f31
fitos_emul_fini:
ldd [%g6 + AOFF_task_fpregs + (62 * 4)], %f62
ldd [%g6 + TI_FPREGS + (62 * 4)], %f62
done
.globl do_fptrap
.align 32
do_fptrap:
stx %fsr, [%g6 + AOFF_task_thread + AOFF_thread_xfsr]
stx %fsr, [%g6 + TI_XFSR]
do_fptrap_after_fsr:
ldub [%g6 + AOFF_task_thread + AOFF_thread_fpsaved], %g3
ldub [%g6 + TI_FPSAVED], %g3
rd %fprs, %g1
or %g3, %g1, %g3
stb %g3, [%g6 + AOFF_task_thread + AOFF_thread_fpsaved]
stb %g3, [%g6 + TI_FPSAVED]
rd %gsr, %g3
stx %g3, [%g6 + AOFF_task_thread + AOFF_thread_gsr]
stx %g3, [%g6 + TI_GSR]
mov SECONDARY_CONTEXT, %g3
add %g6, AOFF_task_fpregs, %g2
add %g6, TI_FPREGS, %g2
ldxa [%g3] ASI_DMMU, %g5
stxa %g0, [%g3] ASI_DMMU
membar #Sync
......@@ -713,8 +713,8 @@ floppy_dosoftint:
call sparc_floppy_irq
add %sp, STACK_BIAS + REGWIN_SZ, %o2
b,pt %xcc, rtrap
clr %l6
b,pt %xcc, rtrap_irq
nop
#endif /* CONFIG_BLK_DEV_FD */
......@@ -883,7 +883,7 @@ cee_trap:
mov %l5, %o1
call cee_log
add %sp, STACK_BIAS + REGWIN_SZ, %o2
ba,a,pt %xcc, rtrap_clr_l6
ba,a,pt %xcc, rtrap_irq
/* Capture I/D/E-cache state into per-cpu error scoreboard.
*
......@@ -1109,7 +1109,7 @@ cheetah_fast_ecc:
mov %l5, %o2
call cheetah_fecc_handler
add %sp, STACK_BIAS + REGWIN_SZ, %o0
ba,a,pt %xcc, rtrap_clr_l6
ba,a,pt %xcc, rtrap_irq
/* Our caller has disabled I-cache and performed membar Sync. */
.globl cheetah_cee
......@@ -1135,7 +1135,7 @@ cheetah_cee:
mov %l5, %o2
call cheetah_cee_handler
add %sp, STACK_BIAS + REGWIN_SZ, %o0
ba,a,pt %xcc, rtrap_clr_l6
ba,a,pt %xcc, rtrap_irq
/* Our caller has disabled I-cache+D-cache and performed membar Sync. */
.globl cheetah_deferred_trap
......@@ -1161,7 +1161,7 @@ cheetah_deferred_trap:
mov %l5, %o2
call cheetah_deferred_handler
add %sp, STACK_BIAS + REGWIN_SZ, %o0
ba,a,pt %xcc, rtrap_clr_l6
ba,a,pt %xcc, rtrap_irq
.globl __do_privact
__do_privact:
......@@ -1384,8 +1384,8 @@ sys_ptrace: add %sp, STACK_BIAS + REGWIN_SZ, %o0
add %o7, 1f-.-4, %o7
nop
.align 32
1: ldub [%curptr + AOFF_task_work + 1], %l5
andcc %l5, 0xff, %g0
1: ldx [%curptr + TI_FLAGS], %l5
andcc %l5, _TIF_SYSCALL_TRACE, %g0
be,pt %icc, rtrap
clr %l6
call syscall_trace
......@@ -1435,14 +1435,14 @@ ret_from_syscall:
/* Clear SPARC_FLAG_NEWCHILD, switch_to leaves thread.flags in
* %o7 for us. Check performance counter stuff too.
*/
andn %o7, SPARC_FLAG_NEWCHILD, %l0
andn %o7, _TIF_NEWCHILD, %l0
mov %g5, %o0 /* 'prev' */
call schedule_tail
stb %l0, [%g6 + AOFF_task_thread + AOFF_thread_flags]
andcc %l0, SPARC_FLAG_PERFCTR, %g0
stx %l0, [%g6 + TI_FLAGS]
andcc %l0, _TIF_PERFCTR, %g0
be,pt %icc, 1f
nop
ldx [%g6 + AOFF_task_thread + AOFF_thread_pcr_reg], %o7
ldx [%g6 + TI_PCR], %o7
wr %g0, %o7, %pcr
/* Blackbird errata workaround. See commentary in
......@@ -1465,7 +1465,7 @@ sparc_exit: wrpr %g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV), %pstate
wrpr %g0, 0x0, %otherwin
wrpr %g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE), %pstate
ba,pt %xcc, sys_exit
stb %g0, [%g6 + AOFF_task_thread + AOFF_thread_w_saved]
stb %g0, [%g6 + TI_WSAVED]
linux_sparc_ni_syscall:
sethi %hi(sys_ni_syscall), %l7
......@@ -1510,11 +1510,11 @@ linux_sparc_syscall32:
mov %i4, %o4 ! IEU1
lduw [%l7 + %l4], %l7 ! Load
srl %i1, 0, %o1 ! IEU0 Group
ldub [%curptr + AOFF_task_work + 1], %l0 ! Load
ldx [%curptr + TI_FLAGS], %l0 ! Load
mov %i5, %o5 ! IEU1
srl %i2, 0, %o2 ! IEU0 Group
andcc %l0, 0xff, %g0 ! IEU0 Group
andcc %l0, _TIF_SYSCALL_TRACE, %g0 ! IEU0 Group
bne,pn %icc, linux_syscall_trace32 ! CTI
mov %i0, %l5 ! IEU1
call %l7 ! CTI Group brk forced
......@@ -1538,11 +1538,11 @@ linux_sparc_syscall:
mov %i1, %o1 ! IEU1
lduw [%l7 + %l4], %l7 ! Load
4: mov %i2, %o2 ! IEU0 Group
ldub [%curptr + AOFF_task_work + 1], %l0 ! Load
ldx [%curptr + TI_FLAGS], %l0 ! Load
mov %i3, %o3 ! IEU1
mov %i4, %o4 ! IEU0 Group
andcc %l0, 0xff, %g0 ! IEU1 Group+1 bubble
andcc %l0, _TIF_SYSCALL_TRACE, %g0 ! IEU1 Group+1 bubble
bne,pn %icc, linux_syscall_trace ! CTI Group
mov %i0, %l5 ! IEU0
2: call %l7 ! CTI Group brk forced
......@@ -1565,7 +1565,7 @@ ret_sys_call:
sllx %g2, 32, %g2
bgeu,pn %xcc, 1f
andcc %l0, 0x02, %l6
andcc %l0, _TIF_SYSCALL_TRACE, %l6
andn %g3, %g2, %g3 /* System call success, clear Carry condition code. */
stx %g3, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TSTATE]
bne,pn %icc, linux_syscall_trace2
......
/* $Id: etrap.S,v 1.45 2001/09/07 21:04:40 kanoj Exp $
/* $Id: etrap.S,v 1.46 2002/02/09 19:49:30 davem Exp $
* etrap.S: Preparing for entry into the kernel on Sparc V9.
*
* Copyright (C) 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
#include <linux/config.h>
#include <asm/asi.h>
#include <asm/pstate.h>
#include <asm/ptrace.h>
......@@ -25,9 +27,17 @@
.text
.align 64
.globl etrap, etrap_irq, etraptl1
#ifdef CONFIG_PREEMPT
etrap_irq: ldsw [%g6 + TI_PRE_COUNT], %g1
add %g1, 1, %g1
ba,pt %xcc, etrap_irq2
stw %g1, [%g6 + TI_PRE_COUNT]
#endif
etrap: rdpr %pil, %g2 ! Single Group
etrap_irq: rdpr %tstate, %g1 ! Single Group
#ifndef CONFIG_PREEMPT
etrap_irq:
#endif
etrap_irq2: rdpr %tstate, %g1 ! Single Group
sllx %g2, 20, %g3 ! IEU0 Group
andcc %g1, TSTATE_PRIV, %g0 ! IEU1
or %g1, %g3, %g1 ! IEU0 Group
......@@ -60,7 +70,7 @@ etrap_irq: rdpr %tstate, %g1 ! Single Group
wrpr %g0, 0, %canrestore ! Single Group+4bubbles
sll %g2, 3, %g2 ! IEU0 Group
mov 1, %l5 ! IEU1
stb %l5, [%l6 + AOFF_task_thread + AOFF_thread_fpdepth] ! Store
stb %l5, [%l6 + TI_FPDEPTH] ! Store
wrpr %g3, 0, %otherwin ! Single Group+4bubbles
wrpr %g2, 0, %wstate ! Single Group+4bubbles
......@@ -98,11 +108,11 @@ etrap_irq: rdpr %tstate, %g1 ! Single Group
nop
nop
3: ldub [%l6 + AOFF_task_thread + AOFF_thread_fpdepth], %l5 ! Load Group
add %l6, AOFF_task_thread + AOFF_thread_fpsaved + 1, %l4 ! IEU0
3: ldub [%l6 + TI_FPDEPTH], %l5 ! Load Group
add %l6, TI_FPSAVED + 1, %l4 ! IEU0
srl %l5, 1, %l3 ! IEU0 Group
add %l5, 2, %l5 ! IEU1
stb %l5, [%l6 + AOFF_task_thread + AOFF_thread_fpdepth] ! Store
stb %l5, [%l6 + TI_FPDEPTH] ! Store
ba,pt %xcc, 2b ! CTI
stb %g0, [%l4 + %l3] ! Store Group
nop
......
/* $Id: head.S,v 1.86 2001/12/05 01:02:16 davem Exp $
/* $Id: head.S,v 1.87 2002/02/09 19:49:31 davem Exp $
* head.S: Initial boot code for the Sparc64 port of Linux.
*
* Copyright (C) 1996,1997 David S. Miller (davem@caip.rutgers.edu)
......@@ -10,7 +10,7 @@
#include <linux/config.h>
#include <linux/version.h>
#include <linux/errno.h>
#include <asm/asm_offsets.h>
#include <asm/thread_info.h>
#include <asm/asi.h>
#include <asm/pstate.h>
#include <asm/ptrace.h>
......@@ -491,8 +491,8 @@ spitfire_tlb_fixup:
stw %g2, [%g5 + %lo(tlb_type)]
tlb_fixup_done:
sethi %hi(init_task_union), %g6
or %g6, %lo(init_task_union), %g6
sethi %hi(init_thread_union), %g6
or %g6, %lo(init_thread_union), %g6
mov %sp, %l6
mov %o4, %l7
......
......@@ -12,17 +12,25 @@ static struct signal_struct init_signals = INIT_SIGNALS;
struct mm_struct init_mm = INIT_MM(init_mm);
/* .text section in head.S is aligned at 2 page boundry and this gets linked
* right after that so that the init_task_union is aligned properly as well.
* right after that so that the init_thread_union is aligned properly as well.
* We really don't need this special alignment like the Intel does, but
* I do it anyways for completeness.
*/
__asm__ (".text");
union task_union init_task_union = { INIT_TASK(init_task_union.task) };
union thread_union init_thread_union = { INIT_THREAD_INFO(init_task) };
/*
* This is to make the init_task+stack of the right size for >8k pagesize.
* The definition of task_union in sched.h makes it 16k wide.
* This is to make the init_thread+stack be the right size for >8k pagesize.
* The definition of thread_union in sched.h makes it 16k wide.
*/
#if PAGE_SHIFT != 13
char init_task_stack[THREAD_SIZE - INIT_TASK_SIZE] = { 0 };
char init_task_stack[THREAD_SIZE - INIT_THREAD_SIZE] = { 0 };
#endif
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
__asm__(".data");
struct task_struct init_task = INIT_TASK(init_task);
/* $Id: itlb_base.S,v 1.11 2001/08/17 04:55:09 kanoj Exp $
/* $Id: itlb_base.S,v 1.12 2002/02/09 19:49:30 davem Exp $
* itlb_base.S: Front end to ITLB miss replacement strategy.
* This is included directly into the trap table.
*
......@@ -51,8 +51,8 @@
rdpr %tpc, %g5 ! And load faulting VA
mov FAULT_CODE_ITLB, %g4 ! It was read from ITLB
sparc64_realfault_common: ! Called by TL0 dtlb_miss too
stb %g4, [%g6 + AOFF_task_thread + AOFF_thread_fault_code]
stx %g5, [%g6 + AOFF_task_thread + AOFF_thread_fault_address]
stb %g4, [%g6 + TI_FAULT_CODE]
stx %g5, [%g6 + TI_FAULT_ADDR]
ba,pt %xcc, etrap ! Save state
1: rd %pc, %g7 ! ...
nop
......
/* $Id: pci_common.c,v 1.28 2002/01/14 05:47:02 davem Exp $
/* $Id: pci_common.c,v 1.29 2002/02/01 00:56:03 davem Exp $
* pci_common.c: PCI controller common support.
*
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
......@@ -670,6 +670,20 @@ static void __init pdev_fixup_irq(struct pci_dev *pdev)
int prom_node = pcp->prom_node;
int err;
/* If this is an empty EBUS device, sometimes OBP fails to
* give it a valid fully specified interrupts property.
* The EBUS hooked up to SunHME on PCI I/O boards of
* Ex000 systems is one such case.
*
* The interrupt is not important so just ignore it.
*/
if (pdev->vendor == PCI_VENDOR_ID_SUN &&
pdev->device == PCI_DEVICE_ID_SUN_EBUS &&
!prom_getchild(prom_node)) {
pdev->irq = 0;
return;
}
err = prom_getproperty(prom_node, "interrupts",
(char *)&prom_irq, sizeof(prom_irq));
if (err == 0 || err == -1) {
......
/* $Id: pci_psycho.c,v 1.32 2002/01/23 11:27:32 davem Exp $
/* $Id: pci_psycho.c,v 1.33 2002/02/01 00:58:33 davem Exp $
* pci_psycho.c: PSYCHO/U2P specific PCI controller support.
*
* Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu)
......@@ -13,7 +13,6 @@
#include <linux/slab.h>
#include <asm/pbm.h>
#include <asm/fhc.h>
#include <asm/iommu.h>
#include <asm/irq.h>
#include <asm/starfire.h>
......@@ -1495,8 +1494,6 @@ static void psycho_pbm_init(struct pci_controller_info *p,
} else {
pbm = &p->pbm_B;
pbm->pci_first_slot = 2;
if (central_bus != NULL)
pbm->pci_first_slot = 1;
pbm->io_space.start = p->controller_regs + PSYCHO_IOSPACE_B;
pbm->mem_space.start = p->controller_regs + PSYCHO_MEMSPACE_B;
}
......
This diff is collapsed.
......@@ -27,8 +27,6 @@
#include <asm/visasm.h>
#include <asm/spitfire.h>
#define MAGIC_CONSTANT 0x80000000
/* Returning from ptrace is a bit tricky because the syscall return
* low level code assumes any value returned which is negative and
* is a valid errno will mean setting the condition codes to indicate
......@@ -53,7 +51,7 @@ static inline void pt_succ_return(struct pt_regs *regs, unsigned long value)
static inline void
pt_succ_return_linux(struct pt_regs *regs, unsigned long value, long *addr)
{
if (current->thread.flags & SPARC_FLAG_32BIT) {
if (test_thread_flag(TIF_32BIT)) {
if (put_user(value, (unsigned int *)addr))
return pt_error_return(regs, EFAULT);
} else {
......@@ -125,7 +123,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
unsigned long addr2 = regs->u_regs[UREG_I4];
struct task_struct *child;
if (current->thread.flags & SPARC_FLAG_32BIT) {
if (test_thread_flag(TIF_32BIT)) {
addr &= 0xffffffffUL;
data &= 0xffffffffUL;
addr2 &= 0xffffffffUL;
......@@ -201,7 +199,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
goto out_tsk;
}
if (!(child->thread.flags & SPARC_FLAG_32BIT) &&
if (!(test_thread_flag(TIF_32BIT)) &&
((request == PTRACE_READDATA64) ||
(request == PTRACE_WRITEDATA64) ||
(request == PTRACE_READTEXT64) ||
......@@ -223,7 +221,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
int res, copied;
res = -EIO;
if (current->thread.flags & SPARC_FLAG_32BIT) {
if (test_thread_flag(TIF_32BIT)) {
copied = access_process_vm(child, addr,
&tmp32, sizeof(tmp32), 0);
tmp64 = (unsigned long) tmp32;
......@@ -248,7 +246,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
unsigned int tmp32;
int copied, res = -EIO;
if (current->thread.flags & SPARC_FLAG_32BIT) {
if (test_thread_flag(TIF_32BIT)) {
tmp32 = data;
copied = access_process_vm(child, addr,
&tmp32, sizeof(tmp32), 1);
......@@ -270,7 +268,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
case PTRACE_GETREGS: {
struct pt_regs32 *pregs = (struct pt_regs32 *) addr;
struct pt_regs *cregs = child->thread.kregs;
struct pt_regs *cregs = child->thread_info->kregs;
int rval;
if (__put_user(tstate_to_psr(cregs->tstate), (&pregs->psr)) ||
......@@ -294,11 +292,11 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
case PTRACE_GETREGS64: {
struct pt_regs *pregs = (struct pt_regs *) addr;
struct pt_regs *cregs = child->thread.kregs;
struct pt_regs *cregs = child->thread_info->kregs;
unsigned long tpc = cregs->tpc;
int rval;
if ((child->thread.flags & SPARC_FLAG_32BIT) != 0)
if ((child->thread_info->flags & _TIF_32BIT) != 0)
tpc &= 0xffffffff;
if (__put_user(cregs->tstate, (&pregs->tstate)) ||
__put_user(tpc, (&pregs->tpc)) ||
......@@ -321,7 +319,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
case PTRACE_SETREGS: {
struct pt_regs32 *pregs = (struct pt_regs32 *) addr;
struct pt_regs *cregs = child->thread.kregs;
struct pt_regs *cregs = child->thread_info->kregs;
unsigned int psr, pc, npc, y;
int i;
......@@ -354,7 +352,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
case PTRACE_SETREGS64: {
struct pt_regs *pregs = (struct pt_regs *) addr;
struct pt_regs *cregs = child->thread.kregs;
struct pt_regs *cregs = child->thread_info->kregs;
unsigned long tstate, tpc, tnpc, y;
int i;
......@@ -368,7 +366,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
pt_error_return(regs, EFAULT);
goto out_tsk;
}
if ((child->thread.flags & SPARC_FLAG_32BIT) != 0) {
if ((child->thread_info->flags & _TIF_32BIT) != 0) {
tpc &= 0xffffffff;
tnpc &= 0xffffffff;
}
......@@ -402,11 +400,11 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
unsigned int insn;
} fpq[16];
} *fps = (struct fps *) addr;
unsigned long *fpregs = (unsigned long *)(((char *)child) + AOFF_task_fpregs);
unsigned long *fpregs = child->thread_info->fpregs;
if (copy_to_user(&fps->regs[0], fpregs,
(32 * sizeof(unsigned int))) ||
__put_user(child->thread.xfsr[0], (&fps->fsr)) ||
__put_user(child->thread_info->xfsr[0], (&fps->fsr)) ||
__put_user(0, (&fps->fpqd)) ||
__put_user(0, (&fps->flags)) ||
__put_user(0, (&fps->extra)) ||
......@@ -423,11 +421,11 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
unsigned int regs[64];
unsigned long fsr;
} *fps = (struct fps *) addr;
unsigned long *fpregs = (unsigned long *)(((char *)child) + AOFF_task_fpregs);
unsigned long *fpregs = child->thread_info->fpregs;
if (copy_to_user(&fps->regs[0], fpregs,
(64 * sizeof(unsigned int))) ||
__put_user(child->thread.xfsr[0], (&fps->fsr))) {
__put_user(child->thread_info->xfsr[0], (&fps->fsr))) {
pt_error_return(regs, EFAULT);
goto out_tsk;
}
......@@ -447,7 +445,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
unsigned int insn;
} fpq[16];
} *fps = (struct fps *) addr;
unsigned long *fpregs = (unsigned long *)(((char *)child) + AOFF_task_fpregs);
unsigned long *fpregs = child->thread_info->fpregs;
unsigned fsr;
if (copy_from_user(fpregs, &fps->regs[0],
......@@ -456,11 +454,11 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
pt_error_return(regs, EFAULT);
goto out_tsk;
}
child->thread.xfsr[0] &= 0xffffffff00000000UL;
child->thread.xfsr[0] |= fsr;
if (!(child->thread.fpsaved[0] & FPRS_FEF))
child->thread.gsr[0] = 0;
child->thread.fpsaved[0] |= (FPRS_FEF | FPRS_DL);
child->thread_info->xfsr[0] &= 0xffffffff00000000UL;
child->thread_info->xfsr[0] |= fsr;
if (!(child->thread_info->fpsaved[0] & FPRS_FEF))
child->thread_info->gsr[0] = 0;
child->thread_info->fpsaved[0] |= (FPRS_FEF | FPRS_DL);
pt_succ_return(regs, 0);
goto out_tsk;
}
......@@ -470,17 +468,17 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
unsigned int regs[64];
unsigned long fsr;
} *fps = (struct fps *) addr;
unsigned long *fpregs = (unsigned long *)(((char *)child) + AOFF_task_fpregs);
unsigned long *fpregs = child->thread_info->fpregs;
if (copy_from_user(fpregs, &fps->regs[0],
(64 * sizeof(unsigned int))) ||
__get_user(child->thread.xfsr[0], (&fps->fsr))) {
__get_user(child->thread_info->xfsr[0], (&fps->fsr))) {
pt_error_return(regs, EFAULT);
goto out_tsk;
}
if (!(child->thread.fpsaved[0] & FPRS_FEF))
child->thread.gsr[0] = 0;
child->thread.fpsaved[0] |= (FPRS_FEF | FPRS_DL | FPRS_DU);
if (!(child->thread_info->fpsaved[0] & FPRS_FEF))
child->thread_info->gsr[0] = 0;
child->thread_info->fpsaved[0] |= (FPRS_FEF | FPRS_DL | FPRS_DU);
pt_succ_return(regs, 0);
goto out_tsk;
}
......@@ -523,7 +521,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
if (addr != 1) {
unsigned long pc_mask = ~0UL;
if ((child->thread.flags & SPARC_FLAG_32BIT) != 0)
if ((child->thread_info->flags & _TIF_32BIT) != 0)
pc_mask = 0xffffffff;
if (addr & 3) {
......@@ -531,27 +529,27 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
goto out_tsk;
}
#ifdef DEBUG_PTRACE
printk ("Original: %016lx %016lx\n", child->thread.kregs->tpc, child->thread.kregs->tnpc);
printk ("Original: %016lx %016lx\n",
child->thread_info->kregs->tpc,
child->thread_info->kregs->tnpc);
printk ("Continuing with %016lx %016lx\n", addr, addr+4);
#endif
child->thread.kregs->tpc = (addr & pc_mask);
child->thread.kregs->tnpc = ((addr + 4) & pc_mask);
child->thread_info->kregs->tpc = (addr & pc_mask);
child->thread_info->kregs->tnpc = ((addr + 4) & pc_mask);
}
if (request == PTRACE_SYSCALL) {
child->ptrace |= PT_SYSCALLTRACE;
child->work.syscall_trace++;
set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
} else {
child->ptrace &= ~PT_SYSCALLTRACE;
child->work.syscall_trace--;
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
}
child->exit_code = data;
#ifdef DEBUG_PTRACE
printk("CONT: %s [%d]: set exit_code = %x %lx %lx\n", child->comm,
child->pid, child->exit_code,
child->thread.kregs->tpc,
child->thread.kregs->tnpc);
child->thread_info->kregs->tpc,
child->thread_info->kregs->tnpc);
#endif
wake_up_process(child);
......@@ -614,7 +612,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
}
out_tsk:
if (child)
free_task_struct(child);
put_task_struct(child);
out:
unlock_kernel();
}
......@@ -624,12 +622,12 @@ asmlinkage void syscall_trace(void)
#ifdef DEBUG_PTRACE
printk("%s [%d]: syscall_trace\n", current->comm, current->pid);
#endif
if ((current->ptrace & (PT_PTRACED|PT_SYSCALLTRACE))
!= (PT_PTRACED|PT_SYSCALLTRACE))
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return;
if (!(current->ptrace & PT_PTRACED))
return;
current->exit_code = SIGTRAP;
current->state = TASK_STOPPED;
current->thread.flags ^= MAGIC_CONSTANT;
notify_parent(current, SIGCHLD);
schedule();
/*
......
/* $Id: rtrap.S,v 1.60 2002/01/31 03:30:06 davem Exp $
/* $Id: rtrap.S,v 1.61 2002/02/09 19:49:31 davem Exp $
* rtrap.S: Preparing for return from trap on Sparc V9.
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
*/
#include <linux/config.h>
#include <asm/asi.h>
#include <asm/pstate.h>
#include <asm/ptrace.h>
......@@ -42,18 +44,18 @@ __handle_user_windows:
wrpr %g0, RTRAP_PSTATE, %pstate
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
/* Redo sched+sig checks */
lduw [%g6 + AOFF_task_work], %l0
srlx %l0, 24, %o0
ldx [%g6 + TI_FLAGS], %l0
andcc %l0, _TIF_NEED_RESCHED, %g0
brz,pt %o0, 1f
be,pt %xcc, 1f
nop
call schedule
wrpr %g0, RTRAP_PSTATE, %pstate
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
lduw [%g6 + AOFF_task_work], %l0
ldx [%g6 + TI_FLAGS], %l0
1: sllx %l0, 48, %o0
brz,pt %o0, __handle_user_windows_continue
1: andcc %l0, (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING), %g0
be,pt %xcc, __handle_user_windows_continue
nop
clr %o0
mov %l5, %o2
......@@ -78,7 +80,7 @@ __handle_perfctrs:
call update_perfctrs
wrpr %g0, RTRAP_PSTATE, %pstate
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
ldub [%g6 + AOFF_task_thread + AOFF_thread_w_saved], %o2
ldub [%g6 + TI_WSAVED], %o2
brz,pt %o2, 1f
nop
/* Redo userwin+sched+sig checks */
......@@ -86,18 +88,18 @@ __handle_perfctrs:
wrpr %g0, RTRAP_PSTATE, %pstate
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
lduw [%g6 + AOFF_task_work], %l0
srlx %l0, 24, %o0
brz,pt %o0, 1f
ldx [%g6 + TI_FLAGS], %l0
andcc %l0, _TIF_NEED_RESCHED, %g0
be,pt %xcc, 1f
nop
call schedule
wrpr %g0, RTRAP_PSTATE, %pstate
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
lduw [%g6 + AOFF_task_work], %l0
1: sllx %l0, 48, %o0
ldx [%g6 + TI_FLAGS], %l0
1: andcc %l0, (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING), %g0
brz,pt %o0, __handle_perfctrs_continue
be,pt %xcc, __handle_perfctrs_continue
sethi %hi(TSTATE_PEF), %o0
clr %o0
mov %l5, %o2
......@@ -148,9 +150,15 @@ __handle_signal:
andn %l1, %l4, %l1
.align 64
.globl rtrap_clr_l6, rtrap, irqsz_patchme
.globl rtrap_irq, rtrap_clr_l6, rtrap, irqsz_patchme
rtrap_irq:
#ifdef CONFIG_PREEMPT
ldsw [%g6 + TI_PRE_COUNT], %l0
sub %l0, 1, %l0
stw %l0, [%g6 + TI_PRE_COUNT]
#endif
rtrap_clr_l6: clr %l6
rtrap: lduw [%g6 + AOFF_task_cpu], %l0
rtrap: ldub [%g6 + TI_CPU], %l0
sethi %hi(irq_stat), %l2 ! &softirq_active
or %l2, %lo(irq_stat), %l2 ! &softirq_active
irqsz_patchme: sllx %l0, 0, %l0
......@@ -182,26 +190,33 @@ __handle_softirq_continue:
*/
to_user: wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
__handle_preemption_continue:
lduw [%g6 + AOFF_task_work], %l0
srlx %l0, 24, %o0
brnz,pn %o0, __handle_preemption
sllx %l0, 48, %o0
brnz,pn %o0, __handle_signal
ldx [%g6 + TI_FLAGS], %l0
sethi %hi(_TIF_USER_WORK_MASK), %o0
or %o0, %lo(_TIF_USER_WORK_MASK), %o0
andcc %l0, %o0, %g0
sethi %hi(TSTATE_PEF), %o0
be,pt %xcc, user_nowork
andcc %l1, %o0, %g0
andcc %l0, _TIF_NEED_RESCHED, %g0
bne,pn %xcc, __handle_preemption
andcc %l0, (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING), %g0
bne,pn %xcc, __handle_signal
__handle_signal_continue:
ldub [%g6 + AOFF_task_thread + AOFF_thread_w_saved], %o2
ldub [%g6 + TI_WSAVED], %o2
brnz,pn %o2, __handle_user_windows
nop
__handle_user_windows_continue:
ldub [%g6 + AOFF_task_thread + AOFF_thread_flags], %l5
andcc %l5, SPARC_FLAG_PERFCTR, %g0
ldx [%g6 + TI_FLAGS], %l5
andcc %l5, _TIF_PERFCTR, %g0
sethi %hi(TSTATE_PEF), %o0
bne,pn %xcc, __handle_perfctrs
__handle_perfctrs_continue:
andcc %l1, %o0, %g0
/* This fpdepth clear is neccessary for non-syscall rtraps only */
user_nowork:
bne,pn %xcc, __handle_userfpu
stb %g0, [%g6 + AOFF_task_thread + AOFF_thread_fpdepth]
stb %g0, [%g6 + TI_FPDEPTH]
__handle_userfpu_continue:
rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
......@@ -254,14 +269,25 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
kern_rtt: restore
retry
to_kernel: ldub [%g6 + AOFF_task_thread + AOFF_thread_fpdepth], %l5
to_kernel:
#ifdef CONFIG_PREEMPT
ldsw [%g6 + TI_PRE_COUNT], %l5
brnz %l5, kern_fpucheck
add %l5, 1, %l6
stw %l6, [%g6 + TI_PRE_COUNT]
call kpreempt_maybe
wrpr %g0, RTRAP_PSTATE, %pstate
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
stw %l5, [%g6 + TI_PRE_COUNT]
#endif
kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5
brz,pt %l5, rt_continue
srl %l5, 1, %o0
add %g6, AOFF_task_thread + AOFF_thread_fpsaved, %l6
add %g6, TI_FPSAVED, %l6
ldub [%l6 + %o0], %l2
sub %l5, 2, %l5
add %g6, AOFF_task_thread + AOFF_thread_gsr, %o1
add %g6, TI_GSR, %o1
andcc %l2, (FPRS_FEF|FPRS_DU), %g0
be,pt %icc, 2f
and %l2, FPRS_DL, %l6
......@@ -272,12 +298,12 @@ to_kernel: ldub [%g6 + AOFF_task_thread + AOFF_thread_fpdepth], %l5
wr %g5, FPRS_FEF, %fprs
ldx [%o1 + %o5], %g5
add %g6, AOFF_task_thread + AOFF_thread_xfsr, %o1
add %g6, TI_XFSR, %o1
membar #StoreLoad | #LoadLoad
sll %o0, 8, %o2
add %g6, AOFF_task_fpregs, %o3
add %g6, TI_FPREGS, %o3
brz,pn %l6, 1f
add %g6, AOFF_task_fpregs+0x40, %o4
add %g6, TI_FPREGS+0x40, %o4
ldda [%o3 + %o2] ASI_BLK_P, %f0
ldda [%o4 + %o2] ASI_BLK_P, %f16
......@@ -290,20 +316,20 @@ to_kernel: ldub [%g6 + AOFF_task_thread + AOFF_thread_fpdepth], %l5
1: membar #Sync
ldx [%o1 + %o5], %fsr
2: stb %l5, [%g6 + AOFF_task_thread + AOFF_thread_fpdepth]
2: stb %l5, [%g6 + TI_FPDEPTH]
ba,pt %xcc, rt_continue
nop
5: wr %g0, FPRS_FEF, %fprs
membar #StoreLoad | #LoadLoad
sll %o0, 8, %o2
add %g6, AOFF_task_fpregs+0x80, %o3
add %g6, AOFF_task_fpregs+0xc0, %o4
add %g6, TI_FPREGS+0x80, %o3
add %g6, TI_FPREGS+0xc0, %o4
ldda [%o3 + %o2] ASI_BLK_P, %f32
ldda [%o4 + %o2] ASI_BLK_P, %f48
membar #Sync
wr %g0, FPRS_DU, %fprs
ba,pt %xcc, rt_continue
stb %l5, [%g6 + AOFF_task_thread + AOFF_thread_fpdepth]
stb %l5, [%g6 + TI_FPDEPTH]
#undef PTREGS_OFF
/* $Id: setup.c,v 1.71 2001/11/13 00:49:28 davem Exp $
/* $Id: setup.c,v 1.72 2002/02/09 19:49:30 davem Exp $
* linux/arch/sparc64/kernel/setup.c
*
* Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
......@@ -536,7 +536,7 @@ void __init setup_arch(char **cmdline_p)
rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
#endif
init_task.thread.kregs = &fake_swapper_regs;
init_task.thread_info->kregs = &fake_swapper_regs;
#ifdef CONFIG_IP_PNP
if (!ic_set_manually) {
......
This diff is collapsed.
This diff is collapsed.
......@@ -212,7 +212,7 @@ void __init smp_callin(void)
/* Clear this or we will die instantly when we
* schedule back to this idler...
*/
current->thread.flags &= ~(SPARC_FLAG_NEWCHILD);
clear_thread_flag(TIF_NEWCHILD);
/* Attach to the address space of init_task. */
atomic_inc(&init_mm.mm_count);
......@@ -236,7 +236,7 @@ extern unsigned long sparc64_cpu_startup;
* 32-bits (I think) so to be safe we have it read the pointer
* contained here so we work on >4GB machines. -DaveM
*/
static struct task_struct *cpu_new_task = NULL;
static struct thread_info *cpu_new_thread = NULL;
static void smp_tune_scheduling(void);
......@@ -261,7 +261,7 @@ void __init smp_boot_cpus(void)
goto ignorecpu;
if (cpu_present_map & (1UL << i)) {
unsigned long entry = (unsigned long)(&sparc64_cpu_startup);
unsigned long cookie = (unsigned long)(&cpu_new_task);
unsigned long cookie = (unsigned long)(&cpu_new_thread);
struct task_struct *p;
int timeout;
int no;
......@@ -280,7 +280,7 @@ void __init smp_boot_cpus(void)
for (no = 0; no < linux_num_cpus; no++)
if (linux_cpus[no].mid == i)
break;
cpu_new_task = p;
cpu_new_thread = p->thread_info;
prom_startcpu(linux_cpus[no].prom_node,
entry, cookie);
for (timeout = 0; timeout < 5000000; timeout++) {
......@@ -305,7 +305,7 @@ void __init smp_boot_cpus(void)
__cpu_number_map[i] = -1;
}
}
cpu_new_task = NULL;
cpu_new_thread = NULL;
if (cpucount == 0) {
if (max_cpus != 1)
printk("Error: only one processor found.\n");
......@@ -902,7 +902,7 @@ void smp_migrate_task(int cpu, task_t *p)
if (smp_processors_ready && (cpu_present_map & mask) != 0) {
u64 data0 = (((u64)&xcall_migrate_task) & 0xffffffff);
spin_lock(&migration_lock);
_raw_spin_lock(&migration_lock);
new_task = p;
if (tlb_type == spitfire)
......@@ -923,7 +923,7 @@ asmlinkage void smp_task_migration_interrupt(int irq, struct pt_regs *regs)
clear_softint(1 << irq);
p = new_task;
spin_unlock(&migration_lock);
_raw_spin_unlock(&migration_lock);
sched_task_migrated(p);
}
......
/* $Id: sparc64_ksyms.c,v 1.120 2001/12/21 04:56:15 davem Exp $
/* $Id: sparc64_ksyms.c,v 1.121 2002/02/09 19:49:31 davem Exp $
* arch/sparc64/kernel/sparc64_ksyms.c: Sparc64 specific ksyms support.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
......@@ -80,6 +80,11 @@ extern u32 sunos_sys_table[], sys_call_table32[];
extern void tl0_solaris(void);
extern void sys_sigsuspend(void);
extern int sys_getppid(void);
extern int sys_getpid(void);
extern int sys_geteuid(void);
extern int sys_getuid(void);
extern int sys_getegid(void);
extern int sys_getgid(void);
extern int svr4_getcontext(svr4_ucontext_t *uc, struct pt_regs *regs);
extern int svr4_setcontext(svr4_ucontext_t *uc, struct pt_regs *regs);
extern int sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg);
......@@ -307,6 +312,11 @@ EXPORT_SYMBOL(sys_call_table32);
EXPORT_SYMBOL(tl0_solaris);
EXPORT_SYMBOL(sys_sigsuspend);
EXPORT_SYMBOL(sys_getppid);
EXPORT_SYMBOL(sys_getpid);
EXPORT_SYMBOL(sys_geteuid);
EXPORT_SYMBOL(sys_getuid);
EXPORT_SYMBOL(sys_getegid);
EXPORT_SYMBOL(sys_getgid);
EXPORT_SYMBOL(svr4_getcontext);
EXPORT_SYMBOL(svr4_setcontext);
EXPORT_SYMBOL(prom_cpu_nodes);
......
/* $Id: sys_sparc.c,v 1.56 2001/12/21 04:56:15 davem Exp $
/* $Id: sys_sparc.c,v 1.57 2002/02/09 19:49:30 davem Exp $
* linux/arch/sparc64/kernel/sys_sparc.c
*
* This file contains various random system calls that
......@@ -59,7 +59,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
return addr;
}
if (current->thread.flags & SPARC_FLAG_32BIT)
if (test_thread_flag(TIF_32BIT))
task_size = 0xf0000000UL;
if (len > task_size || len > -PAGE_OFFSET)
return -ENOMEM;
......@@ -140,7 +140,7 @@ extern asmlinkage unsigned long sys_brk(unsigned long brk);
asmlinkage unsigned long sparc_brk(unsigned long brk)
{
/* People could try to be nasty and use ta 0x6d in 32bit programs */
if ((current->thread.flags & SPARC_FLAG_32BIT) &&
if (test_thread_flag(TIF_32BIT) &&
brk >= 0xf0000000UL)
return current->mm->brk;
......@@ -289,7 +289,7 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
len = PAGE_ALIGN(len);
retval = -EINVAL;
if (current->thread.flags & SPARC_FLAG_32BIT) {
if (test_thread_flag(TIF_32BIT)) {
if (len > 0xf0000000UL ||
((flags & MAP_FIXED) && addr > 0xf0000000UL - len))
goto out_putf;
......@@ -334,7 +334,7 @@ asmlinkage unsigned long sys64_mremap(unsigned long addr,
{
struct vm_area_struct *vma;
unsigned long ret = -EINVAL;
if (current->thread.flags & SPARC_FLAG_32BIT)
if (test_thread_flag(TIF_32BIT))
goto out;
if (old_len > -PAGE_OFFSET || new_len > -PAGE_OFFSET)
goto out;
......@@ -401,7 +401,7 @@ sparc_breakpoint (struct pt_regs *regs)
{
siginfo_t info;
if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
......@@ -454,7 +454,7 @@ asmlinkage int solaris_syscall(struct pt_regs *regs)
regs->tpc = regs->tnpc;
regs->tnpc += 4;
if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
......@@ -474,7 +474,7 @@ asmlinkage int sunos_syscall(struct pt_regs *regs)
regs->tpc = regs->tnpc;
regs->tnpc += 4;
if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
......@@ -494,11 +494,11 @@ asmlinkage int sys_utrap_install(utrap_entry_t type, utrap_handler_t new_p,
return -EINVAL;
if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
if (old_p) {
if (!current->thread.utraps) {
if (!current_thread_info()->utraps) {
if (put_user(NULL, old_p))
return -EFAULT;
} else {
if (put_user((utrap_handler_t)(current->thread.utraps[type]), old_p))
if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
return -EFAULT;
}
}
......@@ -508,39 +508,39 @@ asmlinkage int sys_utrap_install(utrap_entry_t type, utrap_handler_t new_p,
}
return 0;
}
if (!current->thread.utraps) {
current->thread.utraps =
if (!current_thread_info()->utraps) {
current_thread_info()->utraps =
kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
if (!current->thread.utraps) return -ENOMEM;
current->thread.utraps[0] = 1;
memset(current->thread.utraps+1, 0, UT_TRAP_INSTRUCTION_31*sizeof(long));
if (!current_thread_info()->utraps) return -ENOMEM;
current_thread_info()->utraps[0] = 1;
memset(current_thread_info()->utraps+1, 0, UT_TRAP_INSTRUCTION_31*sizeof(long));
} else {
if ((utrap_handler_t)current->thread.utraps[type] != new_p &&
current->thread.utraps[0] > 1) {
long *p = current->thread.utraps;
if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
current_thread_info()->utraps[0] > 1) {
long *p = current_thread_info()->utraps;
current->thread.utraps =
current_thread_info()->utraps =
kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long),
GFP_KERNEL);
if (!current->thread.utraps) {
current->thread.utraps = p;
if (!current_thread_info()->utraps) {
current_thread_info()->utraps = p;
return -ENOMEM;
}
p[0]--;
current->thread.utraps[0] = 1;
memcpy(current->thread.utraps+1, p+1,
current_thread_info()->utraps[0] = 1;
memcpy(current_thread_info()->utraps+1, p+1,
UT_TRAP_INSTRUCTION_31*sizeof(long));
}
}
if (old_p) {
if (put_user((utrap_handler_t)(current->thread.utraps[type]), old_p))
if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
return -EFAULT;
}
if (old_d) {
if (put_user(NULL, old_d))
return -EFAULT;
}
current->thread.utraps[type] = (long)new_p;
current_thread_info()->utraps[type] = (long)new_p;
return 0;
}
......@@ -589,10 +589,10 @@ update_perfctrs(void)
unsigned long pic, tmp;
read_pic(pic);
tmp = (current->thread.kernel_cntd0 += (unsigned int)pic);
__put_user(tmp, current->thread.user_cntd0);
tmp = (current->thread.kernel_cntd1 += (pic >> 32));
__put_user(tmp, current->thread.user_cntd1);
tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
__put_user(tmp, current_thread_info()->user_cntd0);
tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
__put_user(tmp, current_thread_info()->user_cntd1);
reset_pic();
}
......@@ -603,24 +603,24 @@ sys_perfctr(int opcode, unsigned long arg0, unsigned long arg1, unsigned long ar
switch(opcode) {
case PERFCTR_ON:
current->thread.pcr_reg = arg2;
current->thread.user_cntd0 = (u64 *) arg0;
current->thread.user_cntd1 = (u64 *) arg1;
current->thread.kernel_cntd0 =
current->thread.kernel_cntd1 = 0;
current_thread_info()->pcr_reg = arg2;
current_thread_info()->user_cntd0 = (u64 *) arg0;
current_thread_info()->user_cntd1 = (u64 *) arg1;
current_thread_info()->kernel_cntd0 =
current_thread_info()->kernel_cntd1 = 0;
write_pcr(arg2);
reset_pic();
current->thread.flags |= SPARC_FLAG_PERFCTR;
set_thread_flag(TIF_PERFCTR);
break;
case PERFCTR_OFF:
err = -EINVAL;
if ((current->thread.flags & SPARC_FLAG_PERFCTR) != 0) {
current->thread.user_cntd0 =
current->thread.user_cntd1 = NULL;
current->thread.pcr_reg = 0;
if (test_thread_flag(TIF_PERFCTR)) {
current_thread_info()->user_cntd0 =
current_thread_info()->user_cntd1 = NULL;
current_thread_info()->pcr_reg = 0;
write_pcr(0);
current->thread.flags &= ~(SPARC_FLAG_PERFCTR);
clear_thread_flag(TIF_PERFCTR);
err = 0;
}
break;
......@@ -628,50 +628,50 @@ sys_perfctr(int opcode, unsigned long arg0, unsigned long arg1, unsigned long ar
case PERFCTR_READ: {
unsigned long pic, tmp;
if (!(current->thread.flags & SPARC_FLAG_PERFCTR)) {
if (!test_thread_flag(TIF_PERFCTR)) {
err = -EINVAL;
break;
}
read_pic(pic);
tmp = (current->thread.kernel_cntd0 += (unsigned int)pic);
err |= __put_user(tmp, current->thread.user_cntd0);
tmp = (current->thread.kernel_cntd1 += (pic >> 32));
err |= __put_user(tmp, current->thread.user_cntd1);
tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
err |= __put_user(tmp, current_thread_info()->user_cntd0);
tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
err |= __put_user(tmp, current_thread_info()->user_cntd1);
reset_pic();
break;
}
case PERFCTR_CLRPIC:
if (!(current->thread.flags & SPARC_FLAG_PERFCTR)) {
if (!test_thread_flag(TIF_PERFCTR)) {
err = -EINVAL;
break;
}
current->thread.kernel_cntd0 =
current->thread.kernel_cntd1 = 0;
current_thread_info()->kernel_cntd0 =
current_thread_info()->kernel_cntd1 = 0;
reset_pic();
break;
case PERFCTR_SETPCR: {
u64 *user_pcr = (u64 *)arg0;
if (!(current->thread.flags & SPARC_FLAG_PERFCTR)) {
if (!test_thread_flag(TIF_PERFCTR)) {
err = -EINVAL;
break;
}
err |= __get_user(current->thread.pcr_reg, user_pcr);
write_pcr(current->thread.pcr_reg);
current->thread.kernel_cntd0 =
current->thread.kernel_cntd1 = 0;
err |= __get_user(current_thread_info()->pcr_reg, user_pcr);
write_pcr(current_thread_info()->pcr_reg);
current_thread_info()->kernel_cntd0 =
current_thread_info()->kernel_cntd1 = 0;
reset_pic();
break;
}
case PERFCTR_GETPCR: {
u64 *user_pcr = (u64 *)arg0;
if (!(current->thread.flags & SPARC_FLAG_PERFCTR)) {
if (!test_thread_flag(TIF_PERFCTR)) {
err = -EINVAL;
break;
}
err |= __put_user(current->thread.pcr_reg, user_pcr);
err |= __put_user(current_thread_info()->pcr_reg, user_pcr);
break;
}
......
/* $Id: sys_sparc32.c,v 1.182 2001/10/18 09:06:36 davem Exp $
/* $Id: sys_sparc32.c,v 1.184 2002/02/09 19:49:31 davem Exp $
* sys_sparc32.c: Conversion between 32bit and 64bit native syscalls.
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
......@@ -49,6 +49,7 @@
#include <linux/in.h>
#include <linux/icmpv6.h>
#include <linux/sysctl.h>
#include <linux/binfmts.h>
#include <asm/types.h>
#include <asm/ipc.h>
......@@ -1461,6 +1462,8 @@ asmlinkage int sys32_select(int n, u32 *inp, u32 *outp, u32 *exp, u32 tvp_x)
static int cp_new_stat32(struct kstat *stat, struct stat32 *statbuf)
{
int err;
err = put_user(stat->dev, &statbuf->st_dev);
err |= put_user(stat->ino, &statbuf->st_ino);
err |= put_user(stat->mode, &statbuf->st_mode);
......@@ -1479,6 +1482,7 @@ static int cp_new_stat32(struct kstat *stat, struct stat32 *statbuf)
err |= put_user(stat->blocks, &statbuf->st_blocks);
err |= put_user(0, &statbuf->__unused4[0]);
err |= put_user(0, &statbuf->__unused4[1]);
return err;
}
......@@ -2723,8 +2727,8 @@ asmlinkage int sys32_sigaction (int sig, struct old_sigaction32 *act, struct old
struct k_sigaction new_ka, old_ka;
int ret;
if(sig < 0) {
current->thread.flags |= SPARC_FLAG_NEWSIGNALS;
if (sig < 0) {
set_thread_flag(TIF_NEWSIGNALS);
sig = -sig;
}
......@@ -2768,7 +2772,7 @@ sys32_rt_sigaction(int sig, struct sigaction32 *act, struct sigaction32 *oact,
/* All tasks which use RT signals (effectively) use
* new style signals.
*/
current->thread.flags |= SPARC_FLAG_NEWSIGNALS;
set_thread_flag(TIF_NEWSIGNALS);
if (act) {
new_ka.ka_restorer = restorer;
......@@ -2991,8 +2995,8 @@ asmlinkage int sparc32_execve(struct pt_regs *regs)
if(!error) {
fprs_write(0);
current->thread.xfsr[0] = 0;
current->thread.fpsaved[0] = 0;
current_thread_info()->xfsr[0] = 0;
current_thread_info()->fpsaved[0] = 0;
regs->tstate &= ~TSTATE_PEF;
}
out:
......
/* $Id: sys_sunos32.c,v 1.62 2002/01/08 16:00:14 davem Exp $
/* $Id: sys_sunos32.c,v 1.64 2002/02/09 19:49:31 davem Exp $
* sys_sunos32.c: SunOS binary compatability layer on sparc64.
*
* Copyright (C) 1995, 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
......@@ -455,8 +455,8 @@ asmlinkage int sunos_nosys(void)
siginfo_t info;
static int cnt;
regs = current->thread.kregs;
if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
regs = current_thread_info()->kregs;
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
......@@ -1046,7 +1046,7 @@ asmlinkage int sunos_msgsys(int op, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
if(!kmbuf)
break;
sp = (struct sparc_stackf32 *)
(current->thread.kregs->u_regs[UREG_FP] & 0xffffffffUL);
(current_thread_info()->kregs->u_regs[UREG_FP] & 0xffffffffUL);
if(get_user(arg5, &sp->xxargs[0])) {
rval = -EFAULT;
kfree(kmbuf);
......
/* $Id: systbls.S,v 1.80 2002/01/31 03:30:06 davem Exp $
/* $Id: systbls.S,v 1.81 2002/02/08 03:57:14 davem Exp $
* systbls.S: System call entry point tables for OS compatibility.
* The native Linux system call table lives here also.
*
......@@ -56,7 +56,7 @@ sys_call_table32:
/*170*/ .word sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys32_getdents
.word sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr
/*180*/ .word sys_flistxattr, sys_removexattr, sys_lremovexattr, sys32_sigpending, sys32_query_module
.word sys_setpgid, sys_fremovexattr, sys_nis_syscall, sys_nis_syscall, sparc64_newuname
.word sys_setpgid, sys_fremovexattr, sys_tkill, sys_nis_syscall, sparc64_newuname
/*190*/ .word sys32_init_module, sparc64_personality, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
.word sys_nis_syscall, sys_nis_syscall, sys_getppid, sys32_sigaction, sys_sgetmask
/*200*/ .word sys_ssetmask, sys_sigsuspend, sys32_newlstat, sys_uselib, old32_readdir
......@@ -70,7 +70,7 @@ sys_call_table32:
/*240*/ .word sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
.word sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys32_sched_rr_get_interval, sys32_nanosleep
/*250*/ .word sys32_mremap, sys32_sysctl, sys_getsid, sys_fdatasync, sys32_nfsservctl
.word sys_aplib, sys_tkill
.word sys_aplib
/* Now the 64-bit native Linux syscall table. */
......@@ -115,7 +115,7 @@ sys_call_table:
/*170*/ .word sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys_getdents
.word sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr
/*180*/ .word sys_flistxattr, sys_removexattr, sys_lremovexattr, sys_nis_syscall, sys_query_module
.word sys_setpgid, sys_fremovexattr, sys_nis_syscall, sys_nis_syscall, sparc64_newuname
.word sys_setpgid, sys_fremovexattr, sys_tkill, sys_nis_syscall, sparc64_newuname
/*190*/ .word sys_init_module, sparc64_personality, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
.word sys_nis_syscall, sys_nis_syscall, sys_getppid, sys_nis_syscall, sys_sgetmask
/*200*/ .word sys_ssetmask, sys_nis_syscall, sys_newlstat, sys_uselib, sys_nis_syscall
......@@ -129,7 +129,7 @@ sys_call_table:
/*240*/ .word sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
.word sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep
/*250*/ .word sys64_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
.word sys_aplib, sys_tkill
.word sys_aplib
#if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \
defined(CONFIG_SOLARIS_EMUL_MODULE)
......
/* $Id: trampoline.S,v 1.25 2002/01/11 08:45:38 davem Exp $
/* $Id: trampoline.S,v 1.26 2002/02/09 19:49:30 davem Exp $
* trampoline.S: Jump start slave processors on sparc64.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
......@@ -14,7 +14,7 @@
#include <asm/pgtable.h>
#include <asm/spitfire.h>
#include <asm/processor.h>
#include <asm/asm_offsets.h>
#include <asm/thread_info.h>
.data
.align 8
......@@ -262,7 +262,7 @@ startup_continue:
wrpr %o1, PSTATE_IG, %pstate
/* Get our UPA MID. */
lduw [%o2 + AOFF_task_cpu], %g1
ldub [%o2 + TI_CPU], %g1
sethi %hi(cpu_data), %g5
or %g5, %lo(cpu_data), %g5
......
/* $Id: traps.c,v 1.84 2002/01/30 01:39:56 davem Exp $
/* $Id: traps.c,v 1.85 2002/02/09 19:49:31 davem Exp $
* arch/sparc64/kernel/traps.c
*
* Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
......@@ -51,7 +51,7 @@ void bad_trap (struct pt_regs *regs, long lvl)
sprintf(buffer, "Kernel bad sw trap %lx", lvl);
die_if_kernel (buffer, regs);
}
if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
......@@ -89,7 +89,7 @@ void instruction_access_exception (struct pt_regs *regs,
sfsr, sfar);
die_if_kernel("Iax", regs);
}
if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
......@@ -1311,14 +1311,14 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned
void do_fpe_common(struct pt_regs *regs)
{
if(regs->tstate & TSTATE_PRIV) {
if (regs->tstate & TSTATE_PRIV) {
regs->tpc = regs->tnpc;
regs->tnpc += 4;
} else {
unsigned long fsr = current->thread.xfsr[0];
unsigned long fsr = current_thread_info()->xfsr[0];
siginfo_t info;
if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
......@@ -1355,7 +1355,7 @@ void do_fpother(struct pt_regs *regs)
struct fpustate *f = FPUSTATE;
int ret = 0;
switch ((current->thread.xfsr[0] & 0x1c000)) {
switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
case (2 << 14): /* unfinished_FPop */
case (3 << 14): /* unimplemented_FPop */
ret = do_mathemu(regs, f);
......@@ -1370,9 +1370,9 @@ void do_tof(struct pt_regs *regs)
{
siginfo_t info;
if(regs->tstate & TSTATE_PRIV)
if (regs->tstate & TSTATE_PRIV)
die_if_kernel("Penguin overflow trap from kernel mode", regs);
if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
......@@ -1388,7 +1388,7 @@ void do_div0(struct pt_regs *regs)
{
siginfo_t info;
if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
......@@ -1404,11 +1404,11 @@ void instruction_dump (unsigned int *pc)
{
int i;
if((((unsigned long) pc) & 3))
if ((((unsigned long) pc) & 3))
return;
printk("Instruction DUMP:");
for(i = -3; i < 6; i++)
for (i = -3; i < 6; i++)
printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
printk("\n");
}
......@@ -1418,14 +1418,14 @@ void user_instruction_dump (unsigned int *pc)
int i;
unsigned int buf[9];
if((((unsigned long) pc) & 3))
if ((((unsigned long) pc) & 3))
return;
if(copy_from_user(buf, pc - 3, sizeof(buf)))
if (copy_from_user(buf, pc - 3, sizeof(buf)))
return;
printk("Instruction DUMP:");
for(i = 0; i < 9; i++)
for (i = 0; i < 9; i++)
printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
printk("\n");
}
......@@ -1433,18 +1433,18 @@ void user_instruction_dump (unsigned int *pc)
void show_trace_task(struct task_struct *tsk)
{
unsigned long pc, fp;
unsigned long task_base = (unsigned long)tsk;
unsigned long thread_base = (unsigned long) tsk->thread_info;
struct reg_window *rw;
int count = 0;
if (!tsk)
return;
fp = tsk->thread.ksp + STACK_BIAS;
fp = tsk->thread_info->ksp + STACK_BIAS;
do {
/* Bogus frame pointer? */
if (fp < (task_base + sizeof(struct task_struct)) ||
fp >= (task_base + THREAD_SIZE))
if (fp < (thread_base + sizeof(struct thread_info)) ||
fp >= (thread_base + THREAD_SIZE))
break;
rw = (struct reg_window *)fp;
pc = rw->ins[7];
......@@ -1471,7 +1471,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
printk("%s(%d): %s\n", current->comm, current->pid, str);
__asm__ __volatile__("flushw");
__show_regs(regs);
if(regs->tstate & TSTATE_PRIV) {
if (regs->tstate & TSTATE_PRIV) {
struct reg_window *rw = (struct reg_window *)
(regs->u_regs[UREG_FP] + STACK_BIAS);
......@@ -1479,12 +1479,12 @@ void die_if_kernel(char *str, struct pt_regs *regs)
* find some badly aligned kernel stack.
*/
lastrw = (struct reg_window *)current;
while(rw &&
count++ < 30 &&
rw >= lastrw &&
(char *) rw < ((char *) current)
+ sizeof (union task_union) &&
!(((unsigned long) rw) & 0x7)) {
while (rw &&
count++ < 30 &&
rw >= lastrw &&
(char *) rw < ((char *) current)
+ sizeof (union thread_union) &&
!(((unsigned long) rw) & 0x7)) {
printk("Caller[%016lx]\n", rw->ins[7]);
lastrw = rw;
rw = (struct reg_window *)
......@@ -1492,7 +1492,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
}
instruction_dump ((unsigned int *) regs->tpc);
} else {
if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
......@@ -1502,7 +1502,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
smp_report_regs();
#endif
if(regs->tstate & TSTATE_PRIV)
if (regs->tstate & TSTATE_PRIV)
do_exit(SIGKILL);
do_exit(SIGSEGV);
}
......@@ -1517,9 +1517,9 @@ void do_illegal_instruction(struct pt_regs *regs)
u32 insn;
siginfo_t info;
if(tstate & TSTATE_PRIV)
if (tstate & TSTATE_PRIV)
die_if_kernel("Kernel illegal instruction", regs);
if(current->thread.flags & SPARC_FLAG_32BIT)
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
if (get_user(insn, (u32 *)pc) != -EFAULT) {
if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
......@@ -1542,7 +1542,7 @@ void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned lo
{
siginfo_t info;
if(regs->tstate & TSTATE_PRIV) {
if (regs->tstate & TSTATE_PRIV) {
extern void kernel_unaligned_trap(struct pt_regs *regs,
unsigned int insn,
unsigned long sfar, unsigned long sfsr);
......@@ -1561,7 +1561,7 @@ void do_privop(struct pt_regs *regs)
{
siginfo_t info;
if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
......@@ -1669,15 +1669,42 @@ void do_getpsr(struct pt_regs *regs)
regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
regs->tpc = regs->tnpc;
regs->tnpc += 4;
if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
}
extern void thread_info_offsets_are_bolixed_dave(void);
/* Only invoked on boot processor. */
void trap_init(void)
{
/* Compile time sanity check. */
if (TI_TASK != offsetof(struct thread_info, task) ||
TI_FLAGS != offsetof(struct thread_info, flags) ||
TI_CPU != offsetof(struct thread_info, cpu) ||
TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
TI_KSP != offsetof(struct thread_info, ksp) ||
TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
TI_KREGS != offsetof(struct thread_info, kregs) ||
TI_UTRAPS != offsetof(struct thread_info, utraps) ||
TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
TI_GSR != offsetof(struct thread_info, gsr) ||
TI_XFSR != offsetof(struct thread_info, xfsr) ||
TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
TI_PCR != offsetof(struct thread_info, pcr_reg) ||
TI_CEE_STUFF != offsetof(struct thread_info, cee_stuff) ||
TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
TI_FPREGS != offsetof(struct thread_info, fpregs) ||
(TI_FPREGS & (64 - 1)))
thread_info_offsets_are_bolixed_dave();
/* Attach to the address space of init_task. On SMP we
* do this in smp.c:smp_callin for other cpus.
*/
......@@ -1685,6 +1712,6 @@ void trap_init(void)
current->active_mm = &init_mm;
#ifdef CONFIG_SMP
current->cpu = hard_smp_processor_id();
current_thread_info()->cpu = hard_smp_processor_id();
#endif
}
/* $Id: ttable.S,v 1.37 2002/01/23 11:27:32 davem Exp $
/* $Id: ttable.S,v 1.38 2002/02/09 19:49:30 davem Exp $
* ttable.S: Sparc V9 Trap Table(s) with SpitFire/Cheetah extensions.
*
* Copyright (C) 1996, 2001 David S. Miller (davem@caip.rutgers.edu)
......@@ -211,20 +211,20 @@ tl1_ivec: TRAP_IVEC
tl1_paw: TRAPTL1(do_paw_tl1)
tl1_vaw: TRAPTL1(do_vaw_tl1)
/* The grotty trick to save %g1 into current->thread.kernel_cntd0
/* The grotty trick to save %g1 into current->thread.cee_stuff
* is because when we take this trap we could be interrupting trap
* code already using the trap alternate global registers. It is
* better to corrupt a performance counter than corrupt trap register
* state. We cross our fingers and pray that this store/load does
* code already using the trap alternate global registers.
*
* We cross our fingers and pray that this store/load does
* not cause yet another CEE trap.
*/
tl1_cee: membar #Sync
stx %g1, [%g6 + AOFF_task_thread + AOFF_thread_kernel_cntd0]
stx %g1, [%g6 + TI_CEE_STUFF]
ldxa [%g0] ASI_AFSR, %g1
membar #Sync
stxa %g1, [%g0] ASI_AFSR
membar #Sync
ldx [%g6 + AOFF_task_thread + AOFF_thread_kernel_cntd0], %g1
ldx [%g6 + TI_CEE_STUFF], %g1
retry
tl1_iamiss: BTRAPTL1(0x64) BTRAPTL1(0x65) BTRAPTL1(0x66) BTRAPTL1(0x67)
......
/* $Id: unaligned.c,v 1.23 2001/04/09 04:29:03 davem Exp $
/* $Id: unaligned.c,v 1.24 2002/02/09 19:49:31 davem Exp $
* unaligned.c: Unaligned load/store trap handling with special
* cases for the kernel to do them more quickly.
*
......@@ -42,7 +42,7 @@ static inline enum direction decode_direction(unsigned int insn)
{
unsigned long tmp = (insn >> 21) & 1;
if(!tmp)
if (!tmp)
return load;
else {
switch ((insn>>19)&0xf) {
......@@ -63,15 +63,15 @@ static inline int decode_access_size(unsigned int insn)
if (tmp == 11 || tmp == 14) /* ldx/stx */
return 8;
tmp &= 3;
if(!tmp)
if (!tmp)
return 4;
else if(tmp == 3)
else if (tmp == 3)
return 16; /* ldd/std - Although it is actually 8 */
else if(tmp == 2)
else if (tmp == 2)
return 2;
else {
printk("Impossible unaligned trap. insn=%08x\n", insn);
die_if_kernel("Byte sized unaligned access?!?!", current->thread.kregs);
die_if_kernel("Byte sized unaligned access?!?!", current_thread_info()->kregs);
}
}
......@@ -95,8 +95,8 @@ static inline int decode_signedness(unsigned int insn)
static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
unsigned int rd, int from_kernel)
{
if(rs2 >= 16 || rs1 >= 16 || rd >= 16) {
if(from_kernel != 0)
if (rs2 >= 16 || rs1 >= 16 || rd >= 16) {
if (from_kernel != 0)
__asm__ __volatile__("flushw");
else
flushw_user();
......@@ -112,13 +112,13 @@ static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
{
unsigned long value;
if(reg < 16)
if (reg < 16)
return (!reg ? 0 : regs->u_regs[reg]);
if (regs->tstate & TSTATE_PRIV) {
struct reg_window *win;
win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
value = win->locals[reg - 16];
} else if (current->thread.flags & SPARC_FLAG_32BIT) {
} else if (test_thread_flag(TIF_32BIT)) {
struct reg_window32 *win32;
win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
get_user(value, &win32->locals[reg - 16]);
......@@ -132,13 +132,13 @@ static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
{
if(reg < 16)
if (reg < 16)
return &regs->u_regs[reg];
if (regs->tstate & TSTATE_PRIV) {
struct reg_window *win;
win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
return &win->locals[reg - 16];
} else if (current->thread.flags & SPARC_FLAG_32BIT) {
} else if (test_thread_flag(TIF_32BIT)) {
struct reg_window32 *win32;
win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
return (unsigned long *)&win32->locals[reg - 16];
......@@ -156,7 +156,7 @@ static inline unsigned long compute_effective_address(struct pt_regs *regs,
unsigned int rs2 = insn & 0x1f;
int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
if(insn & 0x2000) {
if (insn & 0x2000) {
maybe_flush_windows(rs1, 0, rd, from_kernel);
return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
} else {
......@@ -335,7 +335,7 @@ static inline void advance(struct pt_regs *regs)
{
regs->tpc = regs->tnpc;
regs->tnpc += 4;
if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
......@@ -360,7 +360,7 @@ void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
if (!fixup) {
unsigned long address = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f));
if(address < PAGE_SIZE) {
if (address < PAGE_SIZE) {
printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler");
} else
printk(KERN_ALERT "Unable to handle kernel paging request in mna handler");
......@@ -387,7 +387,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u
enum direction dir = decode_direction(insn);
int size = decode_access_size(insn);
if(!ok_for_kernel(insn) || dir == both) {
if (!ok_for_kernel(insn) || dir == both) {
printk("Unsupported unaligned load/store trap for kernel at <%016lx>.\n",
regs->tpc);
unaligned_panic("Kernel does fpu/atomic unaligned load/store.", regs);
......@@ -408,7 +408,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u
printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] retpc[%016lx]\n",
regs->tpc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]);
#endif
switch(dir) {
switch (dir) {
case load:
do_integer_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
size, (unsigned long *) addr,
......@@ -459,11 +459,11 @@ int handle_popc(u32 insn, struct pt_regs *regs)
ret += popc_helper[value & 0xf];
value >>= 4;
}
if(rd < 16) {
if (rd < 16) {
if (rd)
regs->u_regs[rd] = ret;
} else {
if (current->thread.flags & SPARC_FLAG_32BIT) {
if (test_thread_flag(TIF_32BIT)) {
struct reg_window32 *win32;
win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
put_user(ret, &win32->locals[rd - 16]);
......@@ -490,9 +490,9 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
save_and_clear_fpu();
current->thread.xfsr[0] &= ~0x1c000;
current_thread_info()->xfsr[0] &= ~0x1c000;
if (freg & 3) {
current->thread.xfsr[0] |= (6 << 14) /* invalid_fp_register */;
current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
do_fpother(regs);
return 0;
}
......@@ -500,7 +500,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
/* STQ */
u64 first = 0, second = 0;
if (current->thread.fpsaved[0] & flag) {
if (current_thread_info()->fpsaved[0] & flag) {
first = *(u64 *)&f->regs[freg];
second = *(u64 *)&f->regs[freg+2];
}
......@@ -575,18 +575,18 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
break;
}
}
if (!(current->thread.fpsaved[0] & FPRS_FEF)) {
current->thread.fpsaved[0] = FPRS_FEF;
current->thread.gsr[0] = 0;
if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
current_thread_info()->fpsaved[0] = FPRS_FEF;
current_thread_info()->gsr[0] = 0;
}
if (!(current->thread.fpsaved[0] & flag)) {
if (!(current_thread_info()->fpsaved[0] & flag)) {
if (freg < 32)
memset(f->regs, 0, 32*sizeof(u32));
else
memset(f->regs+32, 0, 32*sizeof(u32));
}
memcpy(f->regs + freg, data, size * 4);
current->thread.fpsaved[0] |= flag;
current_thread_info()->fpsaved[0] |= flag;
}
advance(regs);
return 1;
......@@ -604,7 +604,7 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs)
reg[0] = 0;
if ((insn & 0x780000) == 0x180000)
reg[1] = 0;
} else if (current->thread.flags & SPARC_FLAG_32BIT) {
} else if (test_thread_flag(TIF_32BIT)) {
put_user(0, (int *)reg);
if ((insn & 0x780000) == 0x180000)
put_user(0, ((int *)reg) + 1);
......@@ -627,9 +627,9 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
int flag;
struct fpustate *f = FPUSTATE;
if(tstate & TSTATE_PRIV)
if (tstate & TSTATE_PRIV)
die_if_kernel("lddfmna from kernel", regs);
if(current->thread.flags & SPARC_FLAG_32BIT)
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
if (get_user(insn, (u32 *)pc) != -EFAULT) {
asi = sfsr >> 16;
......@@ -649,18 +649,18 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
if (asi & 0x8) /* Little */
value = __swab64p(&value);
flag = (freg < 32) ? FPRS_DL : FPRS_DU;
if (!(current->thread.fpsaved[0] & FPRS_FEF)) {
current->thread.fpsaved[0] = FPRS_FEF;
current->thread.gsr[0] = 0;
if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
current_thread_info()->fpsaved[0] = FPRS_FEF;
current_thread_info()->gsr[0] = 0;
}
if (!(current->thread.fpsaved[0] & flag)) {
if (!(current_thread_info()->fpsaved[0] & flag)) {
if (freg < 32)
memset(f->regs, 0, 32*sizeof(u32));
else
memset(f->regs+32, 0, 32*sizeof(u32));
}
*(u64 *)(f->regs + freg) = value;
current->thread.fpsaved[0] |= flag;
current_thread_info()->fpsaved[0] |= flag;
} else {
daex: data_access_exception(regs);
return;
......@@ -679,9 +679,9 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
int flag;
struct fpustate *f = FPUSTATE;
if(tstate & TSTATE_PRIV)
if (tstate & TSTATE_PRIV)
die_if_kernel("stdfmna from kernel", regs);
if(current->thread.flags & SPARC_FLAG_32BIT)
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
if (get_user(insn, (u32 *)pc) != -EFAULT) {
freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
......@@ -692,7 +692,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
(asi < ASI_P))
goto daex;
save_and_clear_fpu();
if (current->thread.fpsaved[0] & flag)
if (current_thread_info()->fpsaved[0] & flag)
value = *(u64 *)&f->regs[freg];
switch (asi) {
case ASI_P:
......
This diff is collapsed.
/* $Id: VIScopy.S,v 1.26 2001/09/27 04:36:24 kanoj Exp $
/* $Id: VIScopy.S,v 1.27 2002/02/09 19:49:30 davem Exp $
* VIScopy.S: High speed copy operations utilizing the UltraSparc
* Visual Instruction Set.
*
......@@ -26,24 +26,24 @@
#ifdef __KERNEL__
#include <asm/visasm.h>
#include <asm/asm_offsets.h>
#include <asm/thread_info.h>
#define FPU_CLEAN_RETL \
ldub [%g6 + AOFF_task_thread + AOFF_thread_current_ds], %o1; \
VISExit \
clr %o0; \
retl; \
#define FPU_CLEAN_RETL \
ldub [%g6 + TI_CURRENT_DS], %o1; \
VISExit \
clr %o0; \
retl; \
wr %o1, %g0, %asi;
#define FPU_RETL \
ldub [%g6 + AOFF_task_thread + AOFF_thread_current_ds], %o1; \
VISExit \
clr %o0; \
retl; \
#define FPU_RETL \
ldub [%g6 + TI_CURRENT_DS], %o1; \
VISExit \
clr %o0; \
retl; \
wr %o1, %g0, %asi;
#define NORMAL_RETL \
ldub [%g6 + AOFF_task_thread + AOFF_thread_current_ds], %o1; \
clr %o0; \
retl; \
#define NORMAL_RETL \
ldub [%g6 + TI_CURRENT_DS], %o1; \
clr %o0; \
retl; \
wr %o1, %g0, %asi;
#define EX(x,y,a,b) \
98: x,y; \
......@@ -1032,7 +1032,7 @@ VIScopyfixup_ret:
/* If this is copy_from_user(), zero out the rest of the
* kernel buffer.
*/
ldub [%g6 + AOFF_task_thread + AOFF_thread_current_ds], %o4
ldub [%g6 + TI_CURRENT_DS], %o4
andcc asi_src, 0x1, %g0
be,pt %icc, 1f
VISExit
......
/* $Id: VIScsum.S,v 1.6 2000/02/20 23:21:39 davem Exp $
/* $Id: VIScsum.S,v 1.7 2002/02/09 19:49:30 davem Exp $
* VIScsum.S: High bandwidth IP checksumming utilizing the UltraSparc
* Visual Instruction Set.
*
......@@ -28,7 +28,7 @@
#include <asm/head.h>
#include <asm/asi.h>
#include <asm/visasm.h>
#include <asm/asm_offsets.h>
#include <asm/thread_info.h>
#else
#define ASI_BLK_P 0xf0
#define FRPS_FEF 0x04
......@@ -342,7 +342,7 @@ csum_partial:
DO_THE_TRICK(f44,f46,f48,f50,f52,f54,f56,f58,f60,f62,f0,f2,f4,f6,f8,f10,f12,f14)
END_THE_TRICK(f60,f62,f0,f2,f4,f6,f8,f10,f12,f14,f16,f18,f20,f22,f24,f26,f28,f30)
#ifdef __KERNEL__
ldub [%g6 + AOFF_task_thread + AOFF_thread_current_ds], %g7
ldub [%g6 + TI_CURRENT_DS], %g7
#endif
and %o1, 0x3f, %o1 /* IEU0 Group */
#ifdef __KERNEL__
......
/* $Id: VISsave.S,v 1.5 2001/03/08 22:08:51 davem Exp $
/* $Id: VISsave.S,v 1.6 2002/02/09 19:49:30 davem Exp $
* VISsave.S: Code for saving FPU register state for
* VIS routines. One should not call this directly,
* but use macros provided in <asm/visasm.h>.
......@@ -10,6 +10,7 @@
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/visasm.h>
#include <asm/thread_info.h>
.text
.globl VISenter, VISenterhalf
......@@ -17,47 +18,51 @@
/* On entry: %o5=current FPRS value, %g7 is callers address */
/* May clobber %o5, %g1, %g2, %g3, %g7, %icc, %xcc */
/* Nothing special need be done here to handle pre-emption, this
* FPU save/restore mechanism is already preemption safe.
*/
.align 32
VISenter:
ldub [%g6 + AOFF_task_thread + AOFF_thread_fpdepth], %g1
ldub [%g6 + TI_FPDEPTH], %g1
brnz,a,pn %g1, 1f
cmp %g1, 1
stb %g0, [%g6 + AOFF_task_thread + AOFF_thread_fpsaved]
stx %fsr, [%g6 + AOFF_task_thread + AOFF_thread_xfsr]
stb %g0, [%g6 + TI_FPSAVED]
stx %fsr, [%g6 + TI_XFSR]
9: jmpl %g7 + %g0, %g0
nop
1: bne,pn %icc, 2f
srl %g1, 1, %g1
vis1: ldub [%g6 + AOFF_task_thread + AOFF_thread_fpsaved], %g3
stx %fsr, [%g6 + AOFF_task_thread + AOFF_thread_xfsr]
vis1: ldub [%g6 + TI_FPSAVED], %g3
stx %fsr, [%g6 + TI_XFSR]
or %g3, %o5, %g3
stb %g3, [%g6 + AOFF_task_thread + AOFF_thread_fpsaved]
stb %g3, [%g6 + TI_FPSAVED]
rd %gsr, %g3
clr %g1
ba,pt %xcc, 3f
stx %g3, [%g6 + AOFF_task_thread + AOFF_thread_gsr]
stx %g3, [%g6 + TI_GSR]
2: add %g6, %g1, %g3
cmp %o5, FPRS_DU
be,pn %icc, 6f
sll %g1, 3, %g1
stb %o5, [%g3 + AOFF_task_thread + AOFF_thread_fpsaved]
stb %o5, [%g3 + TI_FPSAVED]
rd %gsr, %g2
add %g6, %g1, %g3
stx %g2, [%g3 + AOFF_task_thread + AOFF_thread_gsr]
stx %g2, [%g3 + TI_GSR]
add %g6, %g1, %g2
stx %fsr, [%g2 + AOFF_task_thread + AOFF_thread_xfsr]
stx %fsr, [%g2 + TI_XFSR]
sll %g1, 5, %g1
3: andcc %o5, FPRS_DL|FPRS_DU, %g0
be,pn %icc, 9b
add %g6, AOFF_task_fpregs, %g2
add %g6, TI_FPREGS, %g2
andcc %o5, FPRS_DL, %g0
membar #StoreStore | #LoadStore
be,pn %icc, 4f
add %g6, AOFF_task_fpregs+0x40, %g3
add %g6, TI_FPREGS+0x40, %g3
stda %f0, [%g2 + %g1] ASI_BLK_P
stda %f16, [%g3 + %g1] ASI_BLK_P
andcc %o5, FPRS_DU, %g0
......@@ -70,13 +75,13 @@ vis1: ldub [%g6 + AOFF_task_thread + AOFF_thread_fpsaved], %g3
jmpl %g7 + %g0, %g0
nop
6: ldub [%g3 + AOFF_task_thread + AOFF_thread_fpsaved], %o5
6: ldub [%g3 + TI_FPSAVED], %o5
or %o5, FPRS_DU, %o5
add %g6, AOFF_task_fpregs+0x80, %g2
stb %o5, [%g3 + AOFF_task_thread + AOFF_thread_fpsaved]
add %g6, TI_FPREGS+0x80, %g2
stb %o5, [%g3 + TI_FPSAVED]
sll %g1, 5, %g1
add %g6, AOFF_task_fpregs+0xc0, %g3
add %g6, TI_FPREGS+0xc0, %g3
wr %g0, FPRS_FEF, %fprs
membar #StoreStore | #LoadStore
stda %f32, [%g2 + %g1] ASI_BLK_P
......@@ -88,11 +93,11 @@ vis1: ldub [%g6 + AOFF_task_thread + AOFF_thread_fpsaved], %g3
.align 32
VISenterhalf:
ldub [%g6 + AOFF_task_thread + AOFF_thread_fpdepth], %g1
ldub [%g6 + TI_FPDEPTH], %g1
brnz,a,pn %g1, 1f
cmp %g1, 1
stb %g0, [%g6 + AOFF_task_thread + AOFF_thread_fpsaved]
stx %fsr, [%g6 + AOFF_task_thread + AOFF_thread_xfsr]
stb %g0, [%g6 + TI_FPSAVED]
stx %fsr, [%g6 + TI_XFSR]
clr %o5
jmpl %g7 + %g0, %g0
wr %g0, FPRS_FEF, %fprs
......@@ -104,20 +109,20 @@ VISenterhalf:
2: addcc %g6, %g1, %g3
sll %g1, 3, %g1
andn %o5, FPRS_DU, %g2
stb %g2, [%g3 + AOFF_task_thread + AOFF_thread_fpsaved]
stb %g2, [%g3 + TI_FPSAVED]
rd %gsr, %g2
add %g6, %g1, %g3
stx %g2, [%g3 + AOFF_task_thread + AOFF_thread_gsr]
stx %g2, [%g3 + TI_GSR]
add %g6, %g1, %g2
stx %fsr, [%g2 + AOFF_task_thread + AOFF_thread_xfsr]
stx %fsr, [%g2 + TI_XFSR]
sll %g1, 5, %g1
3: andcc %o5, FPRS_DL, %g0
be,pn %icc, 4f
add %g6, AOFF_task_fpregs, %g2
add %g6, TI_FPREGS, %g2
membar #StoreStore | #LoadStore
add %g6, AOFF_task_fpregs+0x40, %g3
add %g6, TI_FPREGS+0x40, %g3
stda %f0, [%g2 + %g1] ASI_BLK_P
stda %f16, [%g3 + %g1] ASI_BLK_P
membar #Sync
......
/* $Id: blockops.S,v 1.41 2001/12/05 06:05:35 davem Exp $
/* $Id: blockops.S,v 1.42 2002/02/09 19:49:30 davem Exp $
* blockops.S: UltraSparc block zero optimized routines.
*
* Copyright (C) 1996, 1998, 1999, 2000 David S. Miller (davem@redhat.com)
......@@ -7,9 +7,9 @@
#include "VIS.h"
#include <asm/visasm.h>
#include <asm/thread_info.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/asm_offsets.h>
#define TOUCH(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7) \
fmovd %reg0, %f48; fmovd %reg1, %f50; \
......@@ -61,7 +61,7 @@ cheetah_patch_1:
* so we do not risk a multiple TLB match condition later when
* restoring those entries.
*/
ldub [%g6 + AOFF_task_thread + AOFF_thread_use_blkcommit], %g3
ldx [%g6 + TI_FLAGS], %g3
/* Spitfire Errata #32 workaround */
mov 0x8, %o4
......@@ -100,7 +100,7 @@ cheetah_patch_1:
stxa %g2, [%o3] ASI_DTLB_DATA_ACCESS
membar #Sync
cmp %g3, 0
andcc %g3, _TIF_BLKCOMMIT, %g0
bne,pn %xcc, copy_page_using_blkcommit
nop
......
......@@ -18,7 +18,7 @@
#include <asm/ptrace.h>
#include <asm/asi.h>
#include <asm/page.h>
#include <asm/asm_offsets.h>
#include <asm/thread_info.h>
/* The problem with the "add with carry" instructions on Ultra
* are two fold. Firstly, they cannot pair with jack shit,
......@@ -498,7 +498,7 @@ cpc_user_end:
.globl cpc_handler
cpc_handler:
ldx [%sp + 0x7ff + 128], %g1
ldub [%g6 + AOFF_task_thread + AOFF_thread_current_ds], %g3
ldub [%g6 + TI_CURRENT_DS], %g3
sub %g0, EFAULT, %g2
brnz,a,pt %g1, 1f
st %g2, [%g1]
......
......@@ -5,6 +5,7 @@
* Copyright (C) 2000 David S. Miller (davem@redhat.com)
*/
#include <linux/config.h>
#include <asm/thread_info.h>
#ifndef CONFIG_DEBUG_SPINLOCK
.text
......@@ -40,6 +41,11 @@ out:
membar #StoreLoad | #StoreStore
retl
mov %g1, %o0
#ifdef CONFIG_PREEMPT
ldsw [%g6 + TI_PRE_COUNT], %g3
add %g3, 1, %g3
stw %g3, [%g6 + TI_PRE_COUNT]
#endif
to_zero:
ldstub [%o1], %g3
brnz,pn %g3, spin_on_lock
......@@ -55,6 +61,11 @@ loop2: cas [%o0], %g5, %g7 /* ASSERT(g7 == 0) */
nop
membar #StoreStore | #LoadStore
stb %g0, [%o1]
#ifdef CONFIG_PREEMPT
ldsw [%g6 + TI_PRE_COUNT], %g3
sub %g3, 1, %g3
stw %g3, [%g6 + TI_PRE_COUNT]
#endif
b,pt %xcc, nzero
nop
......
/* $Id: math.c,v 1.11 1999/12/20 05:02:25 davem Exp $
/* $Id: math.c,v 1.12 2002/02/09 19:49:31 davem Exp $
* arch/sparc64/math-emu/math.c
*
* Copyright (C) 1997,1999 Jakub Jelinek (jj@ultra.linux.cz)
......@@ -90,25 +90,25 @@
*/
static inline int record_exception(struct pt_regs *regs, int eflag)
{
u64 fsr = current->thread.xfsr[0];
u64 fsr = current_thread_info()->xfsr[0];
int would_trap;
/* Determine if this exception would have generated a trap. */
would_trap = (fsr & ((long)eflag << FSR_TEM_SHIFT)) != 0UL;
/* If trapping, we only want to signal one bit. */
if(would_trap != 0) {
if (would_trap != 0) {
eflag &= ((fsr & FSR_TEM_MASK) >> FSR_TEM_SHIFT);
if((eflag & (eflag - 1)) != 0) {
if(eflag & FP_EX_INVALID)
if ((eflag & (eflag - 1)) != 0) {
if (eflag & FP_EX_INVALID)
eflag = FP_EX_INVALID;
else if(eflag & FP_EX_OVERFLOW)
else if (eflag & FP_EX_OVERFLOW)
eflag = FP_EX_OVERFLOW;
else if(eflag & FP_EX_UNDERFLOW)
else if (eflag & FP_EX_UNDERFLOW)
eflag = FP_EX_UNDERFLOW;
else if(eflag & FP_EX_DIVZERO)
else if (eflag & FP_EX_DIVZERO)
eflag = FP_EX_DIVZERO;
else if(eflag & FP_EX_INEXACT)
else if (eflag & FP_EX_INEXACT)
eflag = FP_EX_INEXACT;
}
}
......@@ -128,19 +128,19 @@ static inline int record_exception(struct pt_regs *regs, int eflag)
* CEXC just generated is OR'd into the
* existing value of AEXC.
*/
if(would_trap == 0)
if (would_trap == 0)
fsr |= ((long)eflag << FSR_AEXC_SHIFT);
/* If trapping, indicate fault trap type IEEE. */
if(would_trap != 0)
if (would_trap != 0)
fsr |= (1UL << 14);
current->thread.xfsr[0] = fsr;
current_thread_info()->xfsr[0] = fsr;
/* If we will not trap, advance the program counter over
* the instruction being handled.
*/
if(would_trap == 0) {
if (would_trap == 0) {
regs->tpc = regs->tnpc;
regs->tnpc += 4;
}
......@@ -174,10 +174,10 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
int IR;
long XR, xfsr;
if(tstate & TSTATE_PRIV)
if (tstate & TSTATE_PRIV)
die_if_kernel("FPQuad from kernel", regs);
if(current->thread.flags & SPARC_FLAG_32BIT)
pc = (u32)pc;
if (test_thread_flag(TIF_32BIT))
pc &= 0xffffffff;
if (get_user(insn, (u32 *)pc) != -EFAULT) {
if ((insn & 0xc1f80000) == 0x81a00000) /* FPOP1 */ {
switch ((insn >> 5) & 0x1ff) {
......@@ -231,9 +231,9 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
case FMOVQ3:
/* fmovq %fccX, %fY, %fZ */
if (!((insn >> 11) & 3))
XR = current->thread.xfsr[0] >> 10;
XR = current_thread_info()->xfsr[0] >> 10;
else
XR = current->thread.xfsr[0] >> (30 + ((insn >> 10) & 0x6));
XR = current_thread_info()->xfsr[0] >> (30 + ((insn >> 10) & 0x6));
XR &= 3;
IR = 0;
switch ((insn >> 14) & 0x7) {
......@@ -282,7 +282,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
XR = 0;
else if (freg < 16)
XR = regs->u_regs[freg];
else if (current->thread.flags & SPARC_FLAG_32BIT) {
else if (test_thread_flag(TIF_32BIT)) {
struct reg_window32 *win32;
flushw_user ();
win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
......@@ -305,7 +305,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
}
if (IR == 0) {
/* The fmov test was false. Do a nop instead */
current->thread.xfsr[0] &= ~(FSR_CEXC_MASK);
current_thread_info()->xfsr[0] &= ~(FSR_CEXC_MASK);
regs->tpc = regs->tnpc;
regs->tnpc += 4;
return 1;
......@@ -319,20 +319,20 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
if (type) {
argp rs1 = NULL, rs2 = NULL, rd = NULL;
freg = (current->thread.xfsr[0] >> 14) & 0xf;
freg = (current_thread_info()->xfsr[0] >> 14) & 0xf;
if (freg != (type >> 9))
goto err;
current->thread.xfsr[0] &= ~0x1c000;
current_thread_info()->xfsr[0] &= ~0x1c000;
freg = ((insn >> 14) & 0x1f);
switch (type & 0x3) {
case 3: if (freg & 2) {
current->thread.xfsr[0] |= (6 << 14) /* invalid_fp_register */;
current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
goto err;
}
case 2: freg = ((freg & 1) << 5) | (freg & 0x1e);
case 1: rs1 = (argp)&f->regs[freg];
flags = (freg < 32) ? FPRS_DL : FPRS_DU;
if (!(current->thread.fpsaved[0] & flags))
if (!(current_thread_info()->fpsaved[0] & flags))
rs1 = (argp)&zero;
break;
}
......@@ -344,13 +344,13 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
freg = (insn & 0x1f);
switch ((type >> 3) & 0x3) {
case 3: if (freg & 2) {
current->thread.xfsr[0] |= (6 << 14) /* invalid_fp_register */;
current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
goto err;
}
case 2: freg = ((freg & 1) << 5) | (freg & 0x1e);
case 1: rs2 = (argp)&f->regs[freg];
flags = (freg < 32) ? FPRS_DL : FPRS_DU;
if (!(current->thread.fpsaved[0] & flags))
if (!(current_thread_info()->fpsaved[0] & flags))
rs2 = (argp)&zero;
break;
}
......@@ -362,23 +362,23 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
freg = ((insn >> 25) & 0x1f);
switch ((type >> 6) & 0x3) {
case 3: if (freg & 2) {
current->thread.xfsr[0] |= (6 << 14) /* invalid_fp_register */;
current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
goto err;
}
case 2: freg = ((freg & 1) << 5) | (freg & 0x1e);
case 1: rd = (argp)&f->regs[freg];
flags = (freg < 32) ? FPRS_DL : FPRS_DU;
if (!(current->thread.fpsaved[0] & FPRS_FEF)) {
current->thread.fpsaved[0] = FPRS_FEF;
current->thread.gsr[0] = 0;
if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
current_thread_info()->fpsaved[0] = FPRS_FEF;
current_thread_info()->gsr[0] = 0;
}
if (!(current->thread.fpsaved[0] & flags)) {
if (!(current_thread_info()->fpsaved[0] & flags)) {
if (freg < 32)
memset(f->regs, 0, 32*sizeof(u32));
else
memset(f->regs+32, 0, 32*sizeof(u32));
}
current->thread.fpsaved[0] |= flags;
current_thread_info()->fpsaved[0] |= flags;
break;
}
switch ((insn >> 5) & 0x1ff) {
......@@ -439,7 +439,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
}
if (!FP_INHIBIT_RESULTS) {
switch ((type >> 6) & 0x7) {
case 0: xfsr = current->thread.xfsr[0];
case 0: xfsr = current_thread_info()->xfsr[0];
if (XR == -1) XR = 2;
switch (freg & 3) {
/* fcc0, 1, 2, 3 */
......@@ -448,7 +448,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
case 2: xfsr &= ~0xc00000000UL; xfsr |= (XR << 34); break;
case 3: xfsr &= ~0x3000000000UL; xfsr |= (XR << 36); break;
}
current->thread.xfsr[0] = xfsr;
current_thread_info()->xfsr[0] = xfsr;
break;
case 1: rd->s = IR; break;
case 2: rd->d = XR; break;
......@@ -458,11 +458,11 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
}
}
if(_fex != 0)
if (_fex != 0)
return record_exception(regs, _fex);
/* Success and no exceptions detected. */
current->thread.xfsr[0] &= ~(FSR_CEXC_MASK);
current_thread_info()->xfsr[0] &= ~(FSR_CEXC_MASK);
regs->tpc = regs->tnpc;
regs->tnpc += 4;
return 1;
......
/* $Id: fault.c,v 1.58 2001/09/01 00:11:16 kanoj Exp $
/* $Id: fault.c,v 1.59 2002/02/09 19:49:31 davem Exp $
* arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
......@@ -287,8 +287,8 @@ asmlinkage void do_sparc64_fault(struct pt_regs *regs)
unsigned long address;
si_code = SEGV_MAPERR;
fault_code = current->thread.fault_code;
address = current->thread.fault_address;
fault_code = get_thread_fault_code();
address = current_thread_info()->fault_address;
if ((fault_code & FAULT_CODE_ITLB) &&
(fault_code & FAULT_CODE_DTLB))
......@@ -301,7 +301,7 @@ asmlinkage void do_sparc64_fault(struct pt_regs *regs)
if (in_interrupt() || !mm)
goto intr_or_no_mm;
if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
address &= 0xffffffff;
}
......@@ -358,7 +358,7 @@ asmlinkage void do_sparc64_fault(struct pt_regs *regs)
if (tlb_type == spitfire &&
(vma->vm_flags & VM_EXEC) != 0 &&
vma->vm_file != NULL)
current->thread.use_blkcommit = 1;
set_thread_flag(TIF_BLKCOMMIT);
} else {
/* Allow reads even for write-only mappings */
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
......@@ -426,7 +426,7 @@ asmlinkage void do_sparc64_fault(struct pt_regs *regs)
fault_done:
/* These values are no longer needed, clear them. */
current->thread.fault_code = 0;
current->thread.use_blkcommit = 0;
current->thread.fault_address = 0;
set_thread_fault_code(0);
clear_thread_flag(TIF_BLKCOMMIT);
current_thread_info()->fault_address = 0;
}
/* $Id: init.c,v 1.208 2001/12/21 04:56:15 davem Exp $
/* $Id: init.c,v 1.209 2002/02/09 19:49:31 davem Exp $
* arch/sparc64/mm/init.c
*
* Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
......@@ -111,8 +111,6 @@ int do_check_pgt_cache(int low, int high)
return freed;
}
extern void __update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_t dcpage_flushes = ATOMIC_INIT(0);
#ifdef CONFIG_SMP
......@@ -181,6 +179,8 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c
: "g5", "g7");
}
extern void __update_mmu_cache(unsigned long mmu_context_hw, unsigned long address, pte_t pte, int code);
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
struct page *page = pte_page(pte);
......@@ -201,7 +201,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
clear_dcache_dirty_cpu(page, cpu);
}
__update_mmu_cache(vma, address, pte);
if (get_thread_fault_code())
__update_mmu_cache(vma->vm_mm->context & TAG_CONTEXT_BITS,
address, pte, get_thread_fault_code());
}
void flush_dcache_page(struct page *page)
......@@ -706,7 +708,7 @@ void prom_world(int enter)
int i;
if (!enter)
set_fs(current->thread.current_ds);
set_fs((mm_segment_t) { get_thread_current_ds() });
if (!prom_ditlb_set)
return;
......
/* $Id: ultra.S,v 1.71 2002/01/23 11:27:36 davem Exp $
/* $Id: ultra.S,v 1.72 2002/02/09 19:49:31 davem Exp $
* ultra.S: Don't expand these all over the place...
*
* Copyright (C) 1997, 2000 David S. Miller (davem@redhat.com)
......@@ -391,8 +391,8 @@ __prefill_dtlb:
rdpr %pstate, %g7
wrpr %g7, PSTATE_IE, %pstate
mov TLB_TAG_ACCESS, %g1
stxa %o0, [%g1] ASI_DMMU
stxa %o1, [%g0] ASI_DTLB_DATA_IN
stxa %o5, [%g1] ASI_DMMU
stxa %o2, [%g0] ASI_DTLB_DATA_IN
flush %g6
retl
wrpr %g7, %pstate
......@@ -400,28 +400,20 @@ __prefill_itlb:
rdpr %pstate, %g7
wrpr %g7, PSTATE_IE, %pstate
mov TLB_TAG_ACCESS, %g1
stxa %o0, [%g1] ASI_IMMU
stxa %o1, [%g0] ASI_ITLB_DATA_IN
stxa %o5, [%g1] ASI_IMMU
stxa %o2, [%g0] ASI_ITLB_DATA_IN
flush %g6
retl
wrpr %g7, %pstate
.globl __update_mmu_cache
__update_mmu_cache: /* %o0=vma, %o1=address, %o2=pte */
ldub [%g6 + AOFF_task_thread + AOFF_thread_fault_code], %o3
__update_mmu_cache: /* %o0=hw_context, %o1=address, %o2=pte, %o3=fault_code */
srlx %o1, PAGE_SHIFT, %o1
ldx [%o0 + 0x0], %o4 /* XXX vma->vm_mm */
brz,pn %o3, 1f
sllx %o1, PAGE_SHIFT, %o0
ldx [%o4 + AOFF_mm_context], %o5
andcc %o3, FAULT_CODE_DTLB, %g0
mov %o2, %o1
and %o5, TAG_CONTEXT_BITS, %o5
sllx %o1, PAGE_SHIFT, %o5
bne,pt %xcc, __prefill_dtlb
or %o0, %o5, %o0
or %o5, %o0, %o5
ba,a,pt %xcc, __prefill_itlb
1: retl
nop
#ifdef CONFIG_SMP
/* These are all called by the slaves of a cross call, at
......@@ -501,8 +493,8 @@ xcall_report_regs:
109: or %g7, %lo(109b), %g7
call __show_regs
add %sp, STACK_BIAS + REGWIN_SZ, %o0
b,pt %xcc, rtrap
clr %l6
b,pt %xcc, rtrap_irq
nop
.align 32
.globl xcall_flush_dcache_page_cheetah
......@@ -562,8 +554,8 @@ xcall_capture:
109: or %g7, %lo(109b), %g7
call smp_penguin_jailcell
nop
b,pt %xcc, rtrap
clr %l6
b,pt %xcc, rtrap_irq
nop
.globl xcall_promstop
xcall_promstop:
......@@ -689,8 +681,8 @@ xcall_call_function:
109: or %g7, %lo(109b), %g7
call smp_call_function_client
nop
b,pt %xcc, rtrap
clr %l6
b,pt %xcc, rtrap_irq
nop
.globl xcall_migrate_task
xcall_migrate_task:
......
/* $Id: entry64.S,v 1.6 2000/01/12 02:59:26 davem Exp $
/* $Id: entry64.S,v 1.7 2002/02/09 19:49:31 davem Exp $
* entry64.S: Solaris syscall emulation entry point.
*
* Copyright (C) 1996,1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
......@@ -68,9 +68,11 @@ linux_syscall_for_solaris:
/* Solaris system calls enter here... */
.align 32
.globl solaris_sparc_syscall
.globl solaris_sparc_syscall, entry64_personality_patch
solaris_sparc_syscall:
ldub [%g6 + AOFF_task_personality + ASIZ_task_personality - 1], %l0
ldx [%g6 + TI_TASK], %l0
entry64_personality_patch:
ldub [%l0 + 0x0], %l0
cmp %g1, 255
bg,pn %icc, solaris_unimplemented
srl %g1, 0, %g1
......@@ -83,7 +85,7 @@ solaris_sparc_syscall:
1: srl %i0, 0, %o0
lduw [%l7 + %l4], %l3
srl %i1, 0, %o1
ldx [%g6 + AOFF_task_flags], %l5
ldx [%g6 + TI_FLAGS], %l5
cmp %l3, NR_SYSCALLS
bleu,a,pn %xcc, linux_syscall_for_solaris
sethi %hi(sys_call_table32), %l6
......@@ -93,21 +95,21 @@ solaris_sparc_syscall:
10: srl %i2, 0, %o2
mov %i5, %o5
andn %l3, 3, %l7
andcc %l5, 0x20, %g0
andcc %l5, _TIF_SYSCALL_TRACE, %g0
bne,pn %icc, solaris_syscall_trace
mov %i0, %l5
2: call %l7
srl %i3, 0, %o3
ret_from_solaris:
stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I0]
ldx [%g6 + AOFF_task_flags], %l6
ldx [%g6 + TI_FLAGS], %l6
sra %o0, 0, %o0
mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TSTATE], %g3
cmp %o0, -ENOIOCTLCMD
sllx %g2, 32, %g2
bgeu,pn %xcc, 1f
andcc %l6, 0x20, %l6
andcc %l6, _TIF_SYSCALL_TRACE, %l6
/* System call success, clear Carry condition code. */
andn %g3, %g2, %g3
......@@ -175,25 +177,30 @@ solaris_sigsuspend:
.globl solaris_getpid
solaris_getpid:
call sys_getppid /* This is tricky, so don't do it in assembly */
call sys_getppid
nop
stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I1]
call sys_getpid
stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I1]
b,pt %xcc, ret_from_solaris
lduw [%g6 + AOFF_task_pid], %o0
nop
.globl solaris_getuid
solaris_getuid:
lduw [%g6 + AOFF_task_euid], %o1
lduw [%g6 + AOFF_task_uid], %o0
b,pt %xcc, ret_from_solaris
call sys_geteuid
nop
call sys_getuid
stx %o1, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I1]
b,pt %xcc, ret_from_solaris
nop
.globl solaris_getgid
solaris_getgid:
lduw [%g6 + AOFF_task_egid], %o1
lduw [%g6 + AOFF_task_gid], %o0
b,pt %xcc, ret_from_solaris
call sys_getegid
nop
call sys_getgid
stx %o1, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I1]
b,pt %xcc, ret_from_solaris
nop
.globl solaris_unimplemented
solaris_unimplemented:
......
/* $Id: fs.c,v 1.26 2002/01/08 16:00:21 davem Exp $
/* $Id: fs.c,v 1.27 2002/02/08 03:57:14 davem Exp $
* fs.c: fs related syscall emulation for Solaris
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
......
/* $Id: ioctl.c,v 1.16 2000/11/18 02:10:59 davem Exp $
/* $Id: ioctl.c,v 1.17 2002/02/08 03:57:14 davem Exp $
* ioctl.c: Solaris ioctl emulation.
*
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
......
/* $Id: misc.c,v 1.35 2002/01/08 16:00:21 davem Exp $
/* $Id: misc.c,v 1.36 2002/02/09 19:49:31 davem Exp $
* misc.c: Miscelaneous syscall emulation for Solaris
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
......@@ -739,6 +739,8 @@ extern u32 solaris_sparc_syscall[];
extern u32 solaris_syscall[];
extern void cleanup_socksys(void);
extern u32 entry64_personality_patch;
int init_module(void)
{
int ret;
......@@ -750,6 +752,11 @@ int init_module(void)
return ret;
}
update_ttable(solaris_sparc_syscall);
entry64_personality_patch |=
(offsetof(struct task_struct, personality) +
(sizeof(unsigned long) - 1));
__asm__ __volatile__("membar #StoreStore; flush %0"
: : "r" (&entry64_personality_patch));
return 0;
}
......
/* $Id: socket.c,v 1.5 2001/02/13 01:16:44 davem Exp $
/* $Id: socket.c,v 1.6 2002/02/08 03:57:14 davem Exp $
* socket.c: Socket syscall emulation for Solaris 2.6+
*
* Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
......
/* $Id: socksys.c,v 1.20 2002/01/08 16:00:21 davem Exp $
/* $Id: socksys.c,v 1.21 2002/02/08 03:57:14 davem Exp $
* socksys.c: /dev/inet/ stuff for Solaris emulation.
*
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
......
/* $Id: timod.c,v 1.18 2002/01/08 16:00:21 davem Exp $
/* $Id: timod.c,v 1.19 2002/02/08 03:57:14 davem Exp $
* timod.c: timod emulation.
*
* Copyright (C) 1998 Patrik Rak (prak3264@ss1000.ms.mff.cuni.cz)
......
......@@ -1338,7 +1338,7 @@ kbd_read (struct file *f, char *buffer, size_t count, loff_t *ppos)
spin_unlock_irqrestore(&kbd_queue_lock, flags);
#ifdef CONFIG_SPARC32_COMPAT
if (current->thread.flags & SPARC_FLAG_32BIT) {
if (test_thread_flag(TIF_32BIT)) {
if (copy_to_user((Firm_event *)p, &this_event,
sizeof(Firm_event)-sizeof(struct timeval)))
return -EFAULT;
......
......@@ -462,7 +462,7 @@ sun_mouse_read(struct file *file, char *buffer,
spin_unlock_irqrestore(&sunmouse.lock, flags);
#ifdef CONFIG_SPARC32_COMPAT
if (current->thread.flags & SPARC_FLAG_32BIT) {
if (test_thread_flag(TIF_32BIT)) {
if ((end - p) <
((sizeof(Firm_event) - sizeof(struct timeval) +
(sizeof(u32) * 2))))
......
......@@ -3,6 +3,7 @@
* ATI Mach64 Hardware Acceleration
*/
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/fb.h>
......
......@@ -16,6 +16,7 @@
#define __NO_VERSION__
#include <linux/module.h>
#include <linux/smp_lock.h>
#include <linux/init.h>
#include <asm/system.h>
#include <asm/uaccess.h>
......
......@@ -439,9 +439,9 @@ extern void release_segments(struct mm_struct * mm);
/*
* Return saved PC of a blocked thread.
*/
static inline unsigned long thread_saved_pc(struct thread_struct *t)
static inline unsigned long thread_saved_pc(struct task_struct *tsk)
{
return ((unsigned long *)t->esp)[3];
return ((unsigned long *)tsk->thread->esp)[3];
}
unsigned long get_wchan(struct task_struct *p);
......
/* $Id: checksum.h,v 1.32 2001/10/30 04:32:24 davem Exp $ */
/* $Id: checksum.h,v 1.33 2002/02/01 22:01:05 davem Exp $ */
#ifndef __SPARC_CHECKSUM_H
#define __SPARC_CHECKSUM_H
......@@ -16,6 +16,7 @@
* RFC1071 Computing the Internet Checksum
*/
#include <linux/in6.h>
#include <asm/uaccess.h>
#include <asm/cprefix.h>
......
/* $Id: siginfo.h,v 1.8 2000/05/27 00:49:37 davem Exp $
/* $Id: siginfo.h,v 1.9 2002/02/08 03:57:18 davem Exp $
* siginfo.c:
*/
......
/* $Id: unistd.h,v 1.73 2002/01/31 03:30:13 davem Exp $ */
/* $Id: unistd.h,v 1.74 2002/02/08 03:57:18 davem Exp $ */
#ifndef _SPARC_UNISTD_H
#define _SPARC_UNISTD_H
......@@ -202,7 +202,7 @@
#define __NR_query_module 184 /* Linux Specific */
#define __NR_setpgid 185 /* Common */
#define __NR_fremovexattr 186 /* SunOS: pathconf */
/* #define __NR_fpathconf 187 SunOS Specific */
#define __NR_tkill 187 /* SunOS: fpathconf */
/* #define __NR_sysconf 188 SunOS Specific */
#define __NR_uname 189 /* Linux Specific */
#define __NR_init_module 190 /* Linux Specific */
......@@ -271,7 +271,6 @@
#define __NR_fdatasync 253
#define __NR_nfsservctl 254
#define __NR_aplib 255
#define __NR_tkill 257
#define _syscall0(type,name) \
type name(void) \
......
/* $Id: a.out.h,v 1.7 2001/04/24 01:09:12 davem Exp $ */
/* $Id: a.out.h,v 1.8 2002/02/09 19:49:31 davem Exp $ */
#ifndef __SPARC64_A_OUT_H__
#define __SPARC64_A_OUT_H__
......@@ -95,7 +95,7 @@ struct relocation_info /* used when header.a_machtype == M_SPARC */
#ifdef __KERNEL__
#define STACK_TOP (current->thread.flags & SPARC_FLAG_32BIT ? 0xf0000000 : 0x80000000000L)
#define STACK_TOP (test_thread_flag(TIF_32BIT) ? 0xf0000000 : 0x80000000000L)
#endif
......
/* $Id: checksum.h,v 1.17 2001/04/24 01:09:12 davem Exp $ */
/* $Id: checksum.h,v 1.19 2002/02/09 19:49:31 davem Exp $ */
#ifndef __SPARC64_CHECKSUM_H
#define __SPARC64_CHECKSUM_H
......@@ -16,7 +16,8 @@
* RFC1071 Computing the Internet Checksum
*/
#include <asm/uaccess.h>
#include <linux/in6.h>
#include <asm/uaccess.h>
/* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
......@@ -44,7 +45,7 @@ csum_partial_copy_nocheck (const char *src, char *dst, int len,
unsigned int sum)
{
int ret;
unsigned char cur_ds = current->thread.current_ds.seg;
unsigned char cur_ds = get_thread_current_ds();
__asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "i" (ASI_P));
ret = csum_partial_copy_sparc64(src, dst, len, sum);
__asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" (cur_ds));
......
#ifndef _SPARC64_CURRENT_H
#define _SPARC64_CURRENT_H
/* Sparc rules... */
register struct task_struct *current asm("g6");
#include <asm/thread_info.h>
#define current (current_thread_info()->task)
#endif /* !(_SPARC64_CURRENT_H) */
/* $Id: delay.h,v 1.12 2001/04/24 01:09:12 davem Exp $
/* $Id: delay.h,v 1.13 2002/02/02 03:33:48 kanoj Exp $
* delay.h: Linux delay routines on the V9.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu).
......@@ -9,9 +9,13 @@
#include <linux/config.h>
#include <linux/param.h>
#ifndef __ASSEMBLY__
#ifdef CONFIG_SMP
#include <linux/sched.h>
#include <asm/smp.h>
#else
extern unsigned long loops_per_jiffy;
#endif
extern __inline__ void __delay(unsigned long loops)
......@@ -49,4 +53,6 @@ extern __inline__ void __udelay(unsigned long usecs, unsigned long lps)
#define udelay(usecs) __udelay((usecs),__udelay_val)
#endif /* !__ASSEMBLY__ */
#endif /* defined(__SPARC64_DELAY_H) */
/* $Id: elf.h,v 1.31 2002/01/08 16:00:20 davem Exp $ */
/* $Id: elf.h,v 1.32 2002/02/09 19:49:31 davem Exp $ */
#ifndef __ASM_SPARC64_ELF_H
#define __ASM_SPARC64_ELF_H
......@@ -69,16 +69,11 @@ typedef struct {
#ifdef __KERNEL__
#define SET_PERSONALITY(ex, ibcs2) \
do { unsigned char flags = current->thread.flags; \
if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
flags |= SPARC_FLAG_32BIT; \
do { if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
set_thread_flag(TIF_32BIT); \
else \
flags &= ~SPARC_FLAG_32BIT; \
if (flags != current->thread.flags) { \
/* flush_thread will update pgd cache */\
current->thread.flags = flags; \
} \
\
clear_thread_flag(TIF_32BIT); \
/* flush_thread will update pgd cache */ \
if (ibcs2) \
set_personality(PER_SVR4); \
else if (current->personality != PER_LINUX32) \
......
......@@ -14,7 +14,7 @@ struct fpustate {
u32 regs[64];
};
#define FPUSTATE (struct fpustate *)(((unsigned long)current) + AOFF_task_fpregs)
#define FPUSTATE (struct fpustate *)(current_thread_info()->fpregs)
extern __inline__ unsigned long fprs_read(void)
{
......
......@@ -56,6 +56,8 @@ typedef struct {
#define synchronize_irq() barrier()
#define release_irqlock(cpu) do { } while (0)
#else /* (CONFIG_SMP) */
static __inline__ int irqs_running(void)
......
/* $Id: mmu_context.h,v 1.53 2002/01/30 01:40:00 davem Exp $ */
/* $Id: mmu_context.h,v 1.54 2002/02/09 19:49:31 davem Exp $ */
#ifndef __SPARC64_MMU_CONTEXT_H
#define __SPARC64_MMU_CONTEXT_H
......@@ -101,7 +101,7 @@ do { \
register unsigned long pgd_cache asm("o4"); \
paddr = __pa((__mm)->pgd); \
pgd_cache = 0UL; \
if ((__tsk)->thread.flags & SPARC_FLAG_32BIT) \
if ((__tsk)->thread_info->flags & _TIF_32BIT) \
pgd_cache = pgd_val((__mm)->pgd[0]) << 11UL; \
__asm__ __volatile__("wrpr %%g0, 0x494, %%pstate\n\t" \
"mov %3, %%g4\n\t" \
......
/* $Id: page.h,v 1.38 2001/11/30 01:04:10 davem Exp $ */
/* $Id: page.h,v 1.39 2002/02/09 19:49:31 davem Exp $ */
#ifndef _SPARC64_PAGE_H
#define _SPARC64_PAGE_H
......@@ -95,7 +95,7 @@ typedef unsigned long iopgprot_t;
#endif /* (STRICT_MM_TYPECHECKS) */
#define TASK_UNMAPPED_BASE ((current->thread.flags & SPARC_FLAG_32BIT) ? \
#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \
(0x0000000070000000UL) : (PAGE_OFFSET))
#endif /* !(__ASSEMBLY__) */
......
......@@ -158,6 +158,7 @@ extern __inline__ void free_pgd_fast(pgd_t *pgd)
{
struct page *page = virt_to_page(pgd);
preempt_disable();
if (!page->pprev_hash) {
(unsigned long *)page->next_hash = pgd_quicklist;
pgd_quicklist = (unsigned long *)page;
......@@ -165,12 +166,14 @@ extern __inline__ void free_pgd_fast(pgd_t *pgd)
(unsigned long)page->pprev_hash |=
(((unsigned long)pgd & (PAGE_SIZE / 2)) ? 2 : 1);
pgd_cache_size++;
preempt_enable();
}
extern __inline__ pgd_t *get_pgd_fast(void)
{
struct page *ret;
preempt_disable();
if ((ret = (struct page *)pgd_quicklist) != NULL) {
unsigned long mask = (unsigned long)ret->pprev_hash;
unsigned long off = 0;
......@@ -186,16 +189,22 @@ extern __inline__ pgd_t *get_pgd_fast(void)
pgd_quicklist = (unsigned long *)ret->next_hash;
ret = (struct page *)(__page_address(ret) + off);
pgd_cache_size--;
preempt_enable();
} else {
struct page *page = alloc_page(GFP_KERNEL);
struct page *page;
preempt_enable();
page = alloc_page(GFP_KERNEL);
if (page) {
ret = (struct page *)page_address(page);
clear_page(ret);
(unsigned long)page->pprev_hash = 2;
preempt_disable();
(unsigned long *)page->next_hash = pgd_quicklist;
pgd_quicklist = (unsigned long *)page;
pgd_cache_size++;
preempt_enable();
}
}
return (pgd_t *)ret;
......@@ -205,20 +214,25 @@ extern __inline__ pgd_t *get_pgd_fast(void)
extern __inline__ void free_pgd_fast(pgd_t *pgd)
{
preempt_disable();
*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
pgd_quicklist = (unsigned long *) pgd;
pgtable_cache_size++;
preempt_enable();
}
extern __inline__ pgd_t *get_pgd_fast(void)
{
unsigned long *ret;
preempt_disable();
if((ret = pgd_quicklist) != NULL) {
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
preempt_enable();
} else {
preempt_enable();
ret = (unsigned long *) __get_free_page(GFP_KERNEL);
if(ret)
memset(ret, 0, PAGE_SIZE);
......@@ -258,20 +272,27 @@ extern __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long
if (pte_quicklist[color] == NULL)
color = 1;
preempt_disable();
if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
pte_quicklist[color] = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
}
preempt_enable();
return (pmd_t *)ret;
}
extern __inline__ void free_pmd_fast(pmd_t *pmd)
{
unsigned long color = DCACHE_COLOR((unsigned long)pmd);
preempt_disable();
*(unsigned long *)pmd = (unsigned long) pte_quicklist[color];
pte_quicklist[color] = (unsigned long *) pmd;
pgtable_cache_size++;
preempt_enable();
}
extern __inline__ void free_pmd_slow(pmd_t *pmd)
......@@ -288,20 +309,25 @@ extern __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long
unsigned long color = VPTE_COLOR(address);
unsigned long *ret;
preempt_disable();
if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
pte_quicklist[color] = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
}
preempt_enable();
return (pte_t *)ret;
}
extern __inline__ void free_pte_fast(pte_t *pte)
{
unsigned long color = DCACHE_COLOR((unsigned long)pte);
preempt_disable();
*(unsigned long *)pte = (unsigned long) pte_quicklist[color];
pte_quicklist[color] = (unsigned long *) pte;
pgtable_cache_size++;
preempt_enable();
}
extern __inline__ void free_pte_slow(pte_t *pte)
......
/* $Id: pgtable.h,v 1.155 2001/12/21 04:56:17 davem Exp $
/* $Id: pgtable.h,v 1.156 2002/02/09 19:49:31 davem Exp $
* pgtable.h: SpitFire page table operations.
*
* Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu)
......@@ -76,7 +76,7 @@
* is different so we can optimize correctly for 32-bit tasks.
*/
#define REAL_PTRS_PER_PMD (1UL << PMD_BITS)
#define PTRS_PER_PMD ((const int)((current->thread.flags & SPARC_FLAG_32BIT) ? \
#define PTRS_PER_PMD ((const int)(test_thread_flag(TIF_32BIT) ? \
(1UL << (32 - (PAGE_SHIFT-3) - PAGE_SHIFT)) : (REAL_PTRS_PER_PMD)))
/*
......@@ -90,8 +90,8 @@
(PAGE_SHIFT-3) + PMD_BITS)))
/* Kernel has a separate 44bit address space. */
#define USER_PTRS_PER_PGD ((const int)((current->thread.flags & SPARC_FLAG_32BIT) ? \
(1) : (PTRS_PER_PGD)))
#define USER_PTRS_PER_PGD ((const int)(test_thread_flag(TIF_32BIT)) ? \
(1) : (PTRS_PER_PGD))
#define FIRST_USER_PGD_NR 0
#define pte_ERROR(e) __builtin_trap()
......
This diff is collapsed.
/* $Id: ptrace.h,v 1.13 1997/09/17 17:27:51 davem Exp $ */
/* $Id: ptrace.h,v 1.14 2002/02/09 19:49:32 davem Exp $ */
#ifndef _SPARC64_PTRACE_H
#define _SPARC64_PTRACE_H
......@@ -110,8 +110,6 @@ extern void show_regs(struct pt_regs *);
#define TRACEREG32_SZ 0x50
#define STACKFRAME32_SZ 0x60
#define REGWIN32_SZ 0x40
#include <asm/asm_offsets.h>
#endif
#ifdef __KERNEL__
......
......@@ -74,7 +74,7 @@
/* Obtain the current rounding mode. */
#ifndef FP_ROUNDMODE
#define FP_ROUNDMODE ((current->thread.xfsr[0] >> 30) & 0x3)
#define FP_ROUNDMODE ((current_thread_info()->xfsr[0] >> 30) & 0x3)
#endif
/* Exception flags. */
......@@ -86,6 +86,6 @@
#define FP_HANDLE_EXCEPTIONS return _fex
#define FP_INHIBIT_RESULTS ((current->thread.xfsr[0] >> 23) & _fex)
#define FP_INHIBIT_RESULTS ((current_thread_info()->xfsr[0] >> 23) & _fex)
#endif
......@@ -103,7 +103,7 @@ extern __inline__ int hard_smp_processor_id(void)
}
}
#define smp_processor_id() (current->cpu)
#define smp_processor_id() (current_thread_info()->cpu)
/* This needn't do anything as we do not sleep the cpu
* inside of the idler task, so an interrupt is not needed
......
......@@ -9,9 +9,17 @@
extern spinlock_t kernel_flag;
#ifdef CONFIG_SMP
#define kernel_locked() \
(spin_is_locked(&kernel_flag) &&\
(current->lock_depth >= 0))
#else
#ifdef CONFIG_PREEMPT
#define kernel_locked() preempt_get_count()
#else
#define kernel_locked() 1
#endif
#endif
/*
* Release global kernel lock and global interrupt lock
......
......@@ -10,14 +10,15 @@
#include <asm/hardirq.h>
#include <asm/system.h> /* for membar() */
#define local_bh_disable() (local_bh_count(smp_processor_id())++)
#define __local_bh_enable() (local_bh_count(smp_processor_id())--)
#define local_bh_disable() do { barrier(); preempt_disable(); local_bh_count(smp_processor_id())++; } while (0)
#define __local_bh_enable() do { local_bh_count(smp_processor_id())--; preempt_enable(); barrier(); } while (0)
#define local_bh_enable() \
do { if (!--local_bh_count(smp_processor_id()) && \
softirq_pending(smp_processor_id())) { \
do_softirq(); \
__sti(); \
} \
preempt_enable(); \
} while (0)
#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
......
......@@ -40,7 +40,7 @@ typedef unsigned char spinlock_t;
do { membar("#LoadLoad"); \
} while(*((volatile unsigned char *)lock))
extern __inline__ void spin_lock(spinlock_t *lock)
extern __inline__ void _raw_spin_lock(spinlock_t *lock)
{
__asm__ __volatile__(
"1: ldstub [%0], %%g7\n"
......@@ -57,7 +57,7 @@ extern __inline__ void spin_lock(spinlock_t *lock)
: "g7", "memory");
}
extern __inline__ int spin_trylock(spinlock_t *lock)
extern __inline__ int _raw_spin_trylock(spinlock_t *lock)
{
unsigned int result;
__asm__ __volatile__("ldstub [%1], %0\n\t"
......@@ -68,7 +68,7 @@ extern __inline__ int spin_trylock(spinlock_t *lock)
return (result == 0);
}
extern __inline__ void spin_unlock(spinlock_t *lock)
extern __inline__ void _raw_spin_unlock(spinlock_t *lock)
{
__asm__ __volatile__("membar #StoreStore | #LoadStore\n\t"
"stb %%g0, [%0]"
......@@ -99,9 +99,9 @@ extern void _do_spin_lock (spinlock_t *lock, char *str);
extern void _do_spin_unlock (spinlock_t *lock);
extern int _spin_trylock (spinlock_t *lock);
#define spin_trylock(lp) _spin_trylock(lp)
#define spin_lock(lock) _do_spin_lock(lock, "spin_lock")
#define spin_unlock(lock) _do_spin_unlock(lock)
#define _raw_spin_trylock(lp) _spin_trylock(lp)
#define _raw_spin_lock(lock) _do_spin_lock(lock, "spin_lock")
#define _raw_spin_unlock(lock) _do_spin_unlock(lock)
#endif /* CONFIG_DEBUG_SPINLOCK */
......@@ -118,10 +118,10 @@ extern void __read_unlock(rwlock_t *);
extern void __write_lock(rwlock_t *);
extern void __write_unlock(rwlock_t *);
#define read_lock(p) __read_lock(p)
#define read_unlock(p) __read_unlock(p)
#define write_lock(p) __write_lock(p)
#define write_unlock(p) __write_unlock(p)
#define _raw_read_lock(p) __read_lock(p)
#define _raw_read_unlock(p) __read_unlock(p)
#define _raw_write_lock(p) __write_lock(p)
#define _raw_write_unlock(p) __write_unlock(p)
#else /* !(CONFIG_DEBUG_SPINLOCK) */
......@@ -138,28 +138,28 @@ extern void _do_read_unlock(rwlock_t *rw, char *str);
extern void _do_write_lock(rwlock_t *rw, char *str);
extern void _do_write_unlock(rwlock_t *rw);
#define read_lock(lock) \
#define _raw_read_lock(lock) \
do { unsigned long flags; \
__save_and_cli(flags); \
_do_read_lock(lock, "read_lock"); \
__restore_flags(flags); \
} while(0)
#define read_unlock(lock) \
#define _raw_read_unlock(lock) \
do { unsigned long flags; \
__save_and_cli(flags); \
_do_read_unlock(lock, "read_unlock"); \
__restore_flags(flags); \
} while(0)
#define write_lock(lock) \
#define _raw_write_lock(lock) \
do { unsigned long flags; \
__save_and_cli(flags); \
_do_write_lock(lock, "write_lock"); \
__restore_flags(flags); \
} while(0)
#define write_unlock(lock) \
#define _raw_write_unlock(lock) \
do { unsigned long flags; \
__save_and_cli(flags); \
_do_write_unlock(lock); \
......
This diff is collapsed.
This diff is collapsed.
/* $Id: ttable.h,v 1.17 2001/11/28 23:32:16 davem Exp $ */
/* $Id: ttable.h,v 1.18 2002/02/09 19:49:32 davem Exp $ */
#ifndef _SPARC64_TTABLE_H
#define _SPARC64_TTABLE_H
#include <linux/config.h>
#include <asm/asm_offsets.h>
#include <asm/thread_info.h>
#include <asm/utrap.h>
#define BOOT_KERNEL b sparc64_boot; nop; nop; nop; nop; nop; nop; nop;
......@@ -104,14 +104,14 @@
mov num, %g1; \
nop;nop;nop;
#define TRAP_UTRAP(handler,lvl) \
ldx [%g6 + AOFF_task_thread + AOFF_thread_utraps], %g1; \
sethi %hi(109f), %g7; \
brz,pn %g1, utrap; \
or %g7, %lo(109f), %g7; \
ba,pt %xcc, utrap; \
109: ldx [%g1 + handler*8], %g1; \
ba,pt %xcc, utrap_ill; \
#define TRAP_UTRAP(handler,lvl) \
ldx [%g6 + TI_UTRAPS], %g1; \
sethi %hi(109f), %g7; \
brz,pn %g1, utrap; \
or %g7, %lo(109f), %g7; \
ba,pt %xcc, utrap; \
109: ldx [%g1 + handler*8], %g1; \
ba,pt %xcc, utrap_ill; \
mov lvl, %o1;
#ifdef CONFIG_SUNOS_EMUL
......@@ -140,7 +140,7 @@
mov level, %o0; \
call routine; \
add %sp, STACK_BIAS + REGWIN_SZ, %o1; \
ba,a,pt %xcc, rtrap_clr_l6;
ba,a,pt %xcc, rtrap_irq;
#define TICK_SMP_IRQ \
rdpr %pil, %g2; \
......@@ -150,7 +150,7 @@
109: or %g7, %lo(109b), %g7; \
call smp_percpu_timer_interrupt; \
add %sp, STACK_BIAS + REGWIN_SZ, %o0; \
ba,a,pt %xcc, rtrap_clr_l6;
ba,a,pt %xcc, rtrap_irq;
#define TRAP_IVEC TRAP_NOSAVE(do_ivec)
......
This diff is collapsed.
This diff is collapsed.
......@@ -250,6 +250,9 @@ EXPORT_SYMBOL(vfs_rmdir);
EXPORT_SYMBOL(vfs_unlink);
EXPORT_SYMBOL(vfs_rename);
EXPORT_SYMBOL(vfs_statfs);
EXPORT_SYMBOL(vfs_fstat);
EXPORT_SYMBOL(vfs_stat);
EXPORT_SYMBOL(vfs_lstat);
EXPORT_SYMBOL(generic_read_dir);
EXPORT_SYMBOL(generic_file_llseek);
EXPORT_SYMBOL(remote_llseek);
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment