Commit 04324f44 authored by Thomas Bogendoerfer's avatar Thomas Bogendoerfer

MIPS: Remove get_fs/set_fs

All get_fs/set_fs calls in MIPS code are gone, so remove implementation
of it.  With the clear separation of user/kernel space access we no
longer need the EVA special handling, so get rid of that, too.
Signed-off-by: default avatarThomas Bogendoerfer <tsbogend@alpha.franken.de>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
parent 45deb5fa
...@@ -94,7 +94,6 @@ config MIPS ...@@ -94,7 +94,6 @@ config MIPS
select PERF_USE_VMALLOC select PERF_USE_VMALLOC
select PCI_MSI_ARCH_FALLBACKS if PCI_MSI select PCI_MSI_ARCH_FALLBACKS if PCI_MSI
select RTC_LIB select RTC_LIB
select SET_FS
select SYSCTL_EXCEPTION_TRACE select SYSCTL_EXCEPTION_TRACE
select VIRT_TO_BUS select VIRT_TO_BUS
select ARCH_HAS_ELFCORE_COMPAT select ARCH_HAS_ELFCORE_COMPAT
......
...@@ -221,10 +221,6 @@ struct nlm_cop2_state { ...@@ -221,10 +221,6 @@ struct nlm_cop2_state {
#define COP2_INIT #define COP2_INIT
#endif #endif
typedef struct {
unsigned long seg;
} mm_segment_t;
#ifdef CONFIG_CPU_HAS_MSA #ifdef CONFIG_CPU_HAS_MSA
# define ARCH_MIN_TASKALIGN 16 # define ARCH_MIN_TASKALIGN 16
# define FPU_ALIGN __aligned(16) # define FPU_ALIGN __aligned(16)
......
...@@ -28,11 +28,6 @@ struct thread_info { ...@@ -28,11 +28,6 @@ struct thread_info {
unsigned long tp_value; /* thread pointer */ unsigned long tp_value; /* thread pointer */
__u32 cpu; /* current CPU */ __u32 cpu; /* current CPU */
int preempt_count; /* 0 => preemptable, <0 => BUG */ int preempt_count; /* 0 => preemptable, <0 => BUG */
mm_segment_t addr_limit; /*
* thread address space limit:
* 0x7fffffff for user-thead
* 0xffffffff for kernel-thread
*/
struct pt_regs *regs; struct pt_regs *regs;
long syscall; /* syscall number */ long syscall; /* syscall number */
}; };
...@@ -46,7 +41,6 @@ struct thread_info { ...@@ -46,7 +41,6 @@ struct thread_info {
.flags = _TIF_FIXADE, \ .flags = _TIF_FIXADE, \
.cpu = 0, \ .cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \ .preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
} }
/* /*
......
This diff is collapsed.
...@@ -98,7 +98,6 @@ void output_thread_info_defines(void) ...@@ -98,7 +98,6 @@ void output_thread_info_defines(void)
OFFSET(TI_TP_VALUE, thread_info, tp_value); OFFSET(TI_TP_VALUE, thread_info, tp_value);
OFFSET(TI_CPU, thread_info, cpu); OFFSET(TI_CPU, thread_info, cpu);
OFFSET(TI_PRE_COUNT, thread_info, preempt_count); OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
OFFSET(TI_REGS, thread_info, regs); OFFSET(TI_REGS, thread_info, regs);
DEFINE(_THREAD_SIZE, THREAD_SIZE); DEFINE(_THREAD_SIZE, THREAD_SIZE);
DEFINE(_THREAD_MASK, THREAD_MASK); DEFINE(_THREAD_MASK, THREAD_MASK);
......
...@@ -124,7 +124,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, ...@@ -124,7 +124,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
/* kernel thread */ /* kernel thread */
unsigned long status = p->thread.cp0_status; unsigned long status = p->thread.cp0_status;
memset(childregs, 0, sizeof(struct pt_regs)); memset(childregs, 0, sizeof(struct pt_regs));
ti->addr_limit = KERNEL_DS;
p->thread.reg16 = usp; /* fn */ p->thread.reg16 = usp; /* fn */
p->thread.reg17 = kthread_arg; p->thread.reg17 = kthread_arg;
p->thread.reg29 = childksp; p->thread.reg29 = childksp;
...@@ -145,7 +144,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, ...@@ -145,7 +144,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
childregs->regs[2] = 0; /* Child gets zero as return value */ childregs->regs[2] = 0; /* Child gets zero as return value */
if (usp) if (usp)
childregs->regs[29] = usp; childregs->regs[29] = usp;
ti->addr_limit = USER_DS;
p->thread.reg29 = (unsigned long) childregs; p->thread.reg29 = (unsigned long) childregs;
p->thread.reg31 = (unsigned long) ret_from_fork; p->thread.reg31 = (unsigned long) ret_from_fork;
......
...@@ -48,10 +48,8 @@ NESTED(handle_sys, PT_SIZE, sp) ...@@ -48,10 +48,8 @@ NESTED(handle_sys, PT_SIZE, sp)
* We intentionally keep the kernel stack a little below the top of * We intentionally keep the kernel stack a little below the top of
* userspace so we don't have to do a slower byte accurate check here. * userspace so we don't have to do a slower byte accurate check here.
*/ */
lw t5, TI_ADDR_LIMIT($28)
addu t4, t0, 32 addu t4, t0, 32
and t5, t4 bltz t4, bad_stack # -> sp is bad
bltz t5, bad_stack # -> sp is bad
/* /*
* Ok, copy the args from the luser stack to the kernel stack. * Ok, copy the args from the luser stack to the kernel stack.
......
...@@ -661,8 +661,14 @@ LEAF(memcpy) /* a0=dst a1=src a2=len */ ...@@ -661,8 +661,14 @@ LEAF(memcpy) /* a0=dst a1=src a2=len */
EXPORT_SYMBOL(memcpy) EXPORT_SYMBOL(memcpy)
move v0, dst /* return value */ move v0, dst /* return value */
.L__memcpy: .L__memcpy:
FEXPORT(__copy_user) #ifndef CONFIG_EVA
EXPORT_SYMBOL(__copy_user) FEXPORT(__raw_copy_from_user)
EXPORT_SYMBOL(__raw_copy_from_user)
FEXPORT(__raw_copy_to_user)
EXPORT_SYMBOL(__raw_copy_to_user)
FEXPORT(__raw_copy_in_user)
EXPORT_SYMBOL(__raw_copy_in_user)
#endif
/* Legacy Mode, user <-> user */ /* Legacy Mode, user <-> user */
__BUILD_COPY_USER LEGACY_MODE USEROP USEROP __BUILD_COPY_USER LEGACY_MODE USEROP USEROP
...@@ -681,10 +687,10 @@ EXPORT_SYMBOL(__copy_user) ...@@ -681,10 +687,10 @@ EXPORT_SYMBOL(__copy_user)
* __copy_from_user (EVA) * __copy_from_user (EVA)
*/ */
LEAF(__copy_from_user_eva) LEAF(__raw_copy_from_user)
EXPORT_SYMBOL(__copy_from_user_eva) EXPORT_SYMBOL(__raw_copy_from_user)
__BUILD_COPY_USER EVA_MODE USEROP KERNELOP __BUILD_COPY_USER EVA_MODE USEROP KERNELOP
END(__copy_from_user_eva) END(__raw_copy_from_user)
...@@ -692,18 +698,18 @@ END(__copy_from_user_eva) ...@@ -692,18 +698,18 @@ END(__copy_from_user_eva)
* __copy_to_user (EVA) * __copy_to_user (EVA)
*/ */
LEAF(__copy_to_user_eva) LEAF(__raw_copy_to_user)
EXPORT_SYMBOL(__copy_to_user_eva) EXPORT_SYMBOL(__raw_copy_to_user)
__BUILD_COPY_USER EVA_MODE KERNELOP USEROP __BUILD_COPY_USER EVA_MODE KERNELOP USEROP
END(__copy_to_user_eva) END(__raw_copy_to_user)
/* /*
* __copy_in_user (EVA) * __copy_in_user (EVA)
*/ */
LEAF(__copy_in_user_eva) LEAF(__raw_copy_in_user)
EXPORT_SYMBOL(__copy_in_user_eva) EXPORT_SYMBOL(__raw_copy_in_user)
__BUILD_COPY_USER EVA_MODE USEROP USEROP __BUILD_COPY_USER EVA_MODE USEROP USEROP
END(__copy_in_user_eva) END(__raw_copy_in_user)
#endif #endif
...@@ -314,9 +314,6 @@ EXPORT_SYMBOL(memset) ...@@ -314,9 +314,6 @@ EXPORT_SYMBOL(memset)
#ifndef CONFIG_EVA #ifndef CONFIG_EVA
FEXPORT(__bzero) FEXPORT(__bzero)
EXPORT_SYMBOL(__bzero) EXPORT_SYMBOL(__bzero)
#else
FEXPORT(__bzero_kernel)
EXPORT_SYMBOL(__bzero_kernel)
#endif #endif
__BUILD_BZERO LEGACY_MODE __BUILD_BZERO LEGACY_MODE
......
...@@ -29,19 +29,17 @@ ...@@ -29,19 +29,17 @@
* it happens at most some bytes of the exceptions handlers will be copied. * it happens at most some bytes of the exceptions handlers will be copied.
*/ */
.macro __BUILD_STRNCPY_ASM func LEAF(__strncpy_from_user_asm)
LEAF(__strncpy_from_\func\()_asm)
LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
and v0, a1
bnez v0, .Lfault\@
move t0, zero move t0, zero
move v1, a1 move v1, a1
.ifeqs "\func","kernel" #ifdef CONFIG_EVA
1: EX(lbu, v0, (v1), .Lfault\@) .set push
.else .set eva
1: EX(lbue, v0, (v1), .Lfault\@) 1: EX(lbue, v0, (v1), .Lfault)
.endif .set pop
#else
1: EX(lbu, v0, (v1), .Lfault)
#endif
PTR_ADDIU v1, 1 PTR_ADDIU v1, 1
R10KCBARRIER(0(ra)) R10KCBARRIER(0(ra))
sb v0, (a0) sb v0, (a0)
...@@ -51,35 +49,17 @@ LEAF(__strncpy_from_\func\()_asm) ...@@ -51,35 +49,17 @@ LEAF(__strncpy_from_\func\()_asm)
bne t0, a2, 1b bne t0, a2, 1b
2: PTR_ADDU v0, a1, t0 2: PTR_ADDU v0, a1, t0
xor v0, a1 xor v0, a1
bltz v0, .Lfault\@ bltz v0, .Lfault
move v0, t0 move v0, t0
jr ra # return n jr ra # return n
END(__strncpy_from_\func\()_asm) END(__strncpy_from_user_asm)
.Lfault\@: .Lfault:
li v0, -EFAULT li v0, -EFAULT
jr ra jr ra
.section __ex_table,"a" .section __ex_table,"a"
PTR 1b, .Lfault\@ PTR 1b, .Lfault
.previous .previous
.endm EXPORT_SYMBOL(__strncpy_from_user_asm)
#ifndef CONFIG_EVA
/* Set aliases */
.global __strncpy_from_user_asm
.set __strncpy_from_user_asm, __strncpy_from_kernel_asm
EXPORT_SYMBOL(__strncpy_from_user_asm)
#endif
__BUILD_STRNCPY_ASM kernel
EXPORT_SYMBOL(__strncpy_from_kernel_asm)
#ifdef CONFIG_EVA
.set push
.set eva
__BUILD_STRNCPY_ASM user
.set pop
EXPORT_SYMBOL(__strncpy_from_user_asm)
#endif
...@@ -26,12 +26,7 @@ ...@@ -26,12 +26,7 @@
* bytes. There's nothing secret there. On 64-bit accessing beyond * bytes. There's nothing secret there. On 64-bit accessing beyond
* the maximum is a tad hairier ... * the maximum is a tad hairier ...
*/ */
.macro __BUILD_STRNLEN_ASM func LEAF(__strnlen_user_asm)
LEAF(__strnlen_\func\()_asm)
LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
and v0, a0
bnez v0, .Lfault\@
move v0, a0 move v0, a0
PTR_ADDU a1, a0 # stop pointer PTR_ADDU a1, a0 # stop pointer
1: 1:
...@@ -40,11 +35,14 @@ LEAF(__strnlen_\func\()_asm) ...@@ -40,11 +35,14 @@ LEAF(__strnlen_\func\()_asm)
li AT, 1 li AT, 1
#endif #endif
beq v0, a1, 1f # limit reached? beq v0, a1, 1f # limit reached?
.ifeqs "\func", "kernel" #ifdef CONFIG_EVA
EX(lb, t0, (v0), .Lfault\@) .set push
.else .set eva
EX(lbe, t0, (v0), .Lfault\@) EX(lbe, t0, (v0), .Lfault)
.endif .set pop
#else
EX(lb, t0, (v0), .Lfault)
#endif
.set noreorder .set noreorder
bnez t0, 1b bnez t0, 1b
1: 1:
...@@ -57,28 +55,10 @@ LEAF(__strnlen_\func\()_asm) ...@@ -57,28 +55,10 @@ LEAF(__strnlen_\func\()_asm)
.set reorder .set reorder
PTR_SUBU v0, a0 PTR_SUBU v0, a0
jr ra jr ra
END(__strnlen_\func\()_asm) END(__strnlen_user_asm)
.Lfault\@: .Lfault:
move v0, zero move v0, zero
jr ra jr ra
.endm
#ifndef CONFIG_EVA
/* Set aliases */
.global __strnlen_user_asm
.set __strnlen_user_asm, __strnlen_kernel_asm
EXPORT_SYMBOL(__strnlen_user_asm)
#endif
__BUILD_STRNLEN_ASM kernel
EXPORT_SYMBOL(__strnlen_kernel_asm)
#ifdef CONFIG_EVA
.set push EXPORT_SYMBOL(__strnlen_user_asm)
.set eva
__BUILD_STRNLEN_ASM user
.set pop
EXPORT_SYMBOL(__strnlen_user_asm)
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment