Commit 987a0874 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org:/pub/scm/linux/kernel/git/davem/sparc

Pull sparc updates from David Miller:
 "Just some more random bits from Al, including a conversion over to
  generic extables"

* git://git.kernel.org:/pub/scm/linux/kernel/git/davem/sparc:
  sparc32: take ->thread.flags out
  sparc32: get rid of fake_swapper_regs
  sparc64: get rid of fake_swapper_regs
  sparc32: switch to generic extables
  sparc32: switch copy_user.S away from range exception table entries
  sparc32: get rid of range exception table entries in checksum_32.S
  sparc32: switch __bzero() away from range exception table entries
  sparc32: kill lookup_fault()
  sparc32: don't bother with lookup_fault() in __bzero()
parents 144c79ef cf64c2a9
...@@ -8,7 +8,6 @@ ...@@ -8,7 +8,6 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/extable_64.h>
#include <asm/spitfire.h> #include <asm/spitfire.h>
#include <asm/adi.h> #include <asm/adi.h>
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_EXTABLE64_H #ifndef __ASM_EXTABLE_H
#define __ASM_EXTABLE64_H #define __ASM_EXTABLE_H
/* /*
* The exception table consists of pairs of addresses: the first is the * The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is * address of an instruction that is allowed to fault, and the second is
......
...@@ -50,16 +50,12 @@ struct thread_struct { ...@@ -50,16 +50,12 @@ struct thread_struct {
unsigned long fsr; unsigned long fsr;
unsigned long fpqdepth; unsigned long fpqdepth;
struct fpq fpqueue[16]; struct fpq fpqueue[16];
unsigned long flags;
mm_segment_t current_ds; mm_segment_t current_ds;
}; };
#define SPARC_FLAG_KTHREAD 0x1 /* task is a kernel thread */
#define SPARC_FLAG_UNALIGNED 0x2 /* is allowed to do unaligned accesses */
#define INIT_THREAD { \ #define INIT_THREAD { \
.flags = SPARC_FLAG_KTHREAD, \
.current_ds = KERNEL_DS, \ .current_ds = KERNEL_DS, \
.kregs = (struct pt_regs *)(init_stack+THREAD_SIZE)-1 \
} }
/* Do necessary setup to start up a newly executed thread. */ /* Do necessary setup to start up a newly executed thread. */
......
...@@ -118,6 +118,7 @@ struct thread_info { ...@@ -118,6 +118,7 @@ struct thread_info {
.task = &tsk, \ .task = &tsk, \
.current_ds = ASI_P, \ .current_ds = ASI_P, \
.preempt_count = INIT_PREEMPT_COUNT, \ .preempt_count = INIT_PREEMPT_COUNT, \
.kregs = (struct pt_regs *)(init_stack+THREAD_SIZE)-1 \
} }
/* how to get the thread information struct from C */ /* how to get the thread information struct from C */
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef ___ASM_SPARC_UACCESS_H #ifndef ___ASM_SPARC_UACCESS_H
#define ___ASM_SPARC_UACCESS_H #define ___ASM_SPARC_UACCESS_H
#include <asm/extable.h>
#if defined(__sparc__) && defined(__arch64__) #if defined(__sparc__) && defined(__arch64__)
#include <asm/uaccess_64.h> #include <asm/uaccess_64.h>
#else #else
......
...@@ -13,9 +13,6 @@ ...@@ -13,9 +13,6 @@
#include <asm/processor.h> #include <asm/processor.h>
#define ARCH_HAS_SORT_EXTABLE
#define ARCH_HAS_SEARCH_EXTABLE
/* Sparc is not segmented, however we need to be able to fool access_ok() /* Sparc is not segmented, however we need to be able to fool access_ok()
* when doing system calls from kernel mode legitimately. * when doing system calls from kernel mode legitimately.
* *
...@@ -40,36 +37,6 @@ ...@@ -40,36 +37,6 @@
#define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size))) #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
#define access_ok(addr, size) __access_ok((unsigned long)(addr), size) #define access_ok(addr, size) __access_ok((unsigned long)(addr), size)
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*
* There is a special way how to put a range of potentially faulting
* insns (like twenty ldd/std's with now intervening other instructions)
* You specify address of first in insn and 0 in fixup and in the next
* exception_table_entry you specify last potentially faulting insn + 1
* and in fixup the routine which should handle the fault.
* That fixup code will get
* (faulting_insn_address - first_insn_in_the_range_address)/4
* in %g2 (ie. index of the faulting instruction in the range).
*/
struct exception_table_entry
{
unsigned long insn, fixup;
};
/* Returns 0 if exception not found and fixup otherwise. */
unsigned long search_extables_range(unsigned long addr, unsigned long *g2);
/* Uh, these should become the main single-value transfer routines.. /* Uh, these should become the main single-value transfer routines..
* They automatically use the right size if we just have the right * They automatically use the right size if we just have the right
* pointer type.. * pointer type..
...@@ -252,12 +219,7 @@ static inline unsigned long __clear_user(void __user *addr, unsigned long size) ...@@ -252,12 +219,7 @@ static inline unsigned long __clear_user(void __user *addr, unsigned long size)
unsigned long ret; unsigned long ret;
__asm__ __volatile__ ( __asm__ __volatile__ (
".section __ex_table,#alloc\n\t"
".align 4\n\t"
".word 1f,3\n\t"
".previous\n\t"
"mov %2, %%o1\n" "mov %2, %%o1\n"
"1:\n\t"
"call __bzero\n\t" "call __bzero\n\t"
" mov %1, %%o0\n\t" " mov %1, %%o0\n\t"
"mov %%o0, %0\n" "mov %%o0, %0\n"
......
...@@ -10,7 +10,6 @@ ...@@ -10,7 +10,6 @@
#include <linux/string.h> #include <linux/string.h>
#include <asm/asi.h> #include <asm/asi.h>
#include <asm/spitfire.h> #include <asm/spitfire.h>
#include <asm/extable_64.h>
#include <asm/processor.h> #include <asm/processor.h>
......
...@@ -515,7 +515,7 @@ continue_boot: ...@@ -515,7 +515,7 @@ continue_boot:
/* I want a kernel stack NOW! */ /* I want a kernel stack NOW! */
set init_thread_union, %g1 set init_thread_union, %g1
set (THREAD_SIZE - STACKFRAME_SZ), %g2 set (THREAD_SIZE - STACKFRAME_SZ - TRACEREG_SZ), %g2
add %g1, %g2, %sp add %g1, %g2, %sp
mov 0, %fp /* And for good luck */ mov 0, %fp /* And for good luck */
......
...@@ -706,7 +706,7 @@ tlb_fixup_done: ...@@ -706,7 +706,7 @@ tlb_fixup_done:
wr %g0, ASI_P, %asi wr %g0, ASI_P, %asi
mov 1, %g1 mov 1, %g1
sllx %g1, THREAD_SHIFT, %g1 sllx %g1, THREAD_SHIFT, %g1
sub %g1, (STACKFRAME_SZ + STACK_BIAS), %g1 sub %g1, (STACKFRAME_SZ + STACK_BIAS + TRACEREG_SZ), %g1
add %g6, %g1, %sp add %g6, %g1, %sp
/* Set per-cpu pointer initially to zero, this makes /* Set per-cpu pointer initially to zero, this makes
......
...@@ -216,16 +216,6 @@ void flush_thread(void) ...@@ -216,16 +216,6 @@ void flush_thread(void)
clear_thread_flag(TIF_USEDFPU); clear_thread_flag(TIF_USEDFPU);
#endif #endif
} }
/* This task is no longer a kernel thread. */
if (current->thread.flags & SPARC_FLAG_KTHREAD) {
current->thread.flags &= ~SPARC_FLAG_KTHREAD;
/* We must fixup kregs as well. */
/* XXX This was not fixed for ti for a while, worked. Unused? */
current->thread.kregs = (struct pt_regs *)
(task_stack_page(current) + (THREAD_SIZE - TRACEREG_SZ));
}
} }
static inline struct sparc_stackf __user * static inline struct sparc_stackf __user *
...@@ -313,7 +303,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, ...@@ -313,7 +303,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
extern int nwindows; extern int nwindows;
unsigned long psr; unsigned long psr;
memset(new_stack, 0, STACKFRAME_SZ + TRACEREG_SZ); memset(new_stack, 0, STACKFRAME_SZ + TRACEREG_SZ);
p->thread.flags |= SPARC_FLAG_KTHREAD;
p->thread.current_ds = KERNEL_DS; p->thread.current_ds = KERNEL_DS;
ti->kpc = (((unsigned long) ret_from_kernel_thread) - 0x8); ti->kpc = (((unsigned long) ret_from_kernel_thread) - 0x8);
childregs->u_regs[UREG_G1] = sp; /* function */ childregs->u_regs[UREG_G1] = sp; /* function */
...@@ -325,7 +314,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, ...@@ -325,7 +314,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
} }
memcpy(new_stack, (char *)regs - STACKFRAME_SZ, STACKFRAME_SZ + TRACEREG_SZ); memcpy(new_stack, (char *)regs - STACKFRAME_SZ, STACKFRAME_SZ + TRACEREG_SZ);
childregs->u_regs[UREG_FP] = sp; childregs->u_regs[UREG_FP] = sp;
p->thread.flags &= ~SPARC_FLAG_KTHREAD;
p->thread.current_ds = USER_DS; p->thread.current_ds = USER_DS;
ti->kpc = (((unsigned long) ret_from_fork) - 0x8); ti->kpc = (((unsigned long) ret_from_fork) - 0x8);
ti->kpsr = current->thread.fork_kpsr | PSR_PIL; ti->kpsr = current->thread.fork_kpsr | PSR_PIL;
......
...@@ -266,7 +266,6 @@ static __init void leon_patch(void) ...@@ -266,7 +266,6 @@ static __init void leon_patch(void)
} }
struct tt_entry *sparc_ttable; struct tt_entry *sparc_ttable;
static struct pt_regs fake_swapper_regs;
/* Called from head_32.S - before we have setup anything /* Called from head_32.S - before we have setup anything
* in the kernel. Be very careful with what you do here. * in the kernel. Be very careful with what you do here.
...@@ -363,8 +362,6 @@ void __init setup_arch(char **cmdline_p) ...@@ -363,8 +362,6 @@ void __init setup_arch(char **cmdline_p)
(*(linux_dbvec->teach_debugger))(); (*(linux_dbvec->teach_debugger))();
} }
init_task.thread.kregs = &fake_swapper_regs;
/* Run-time patch instructions to match the cpu model */ /* Run-time patch instructions to match the cpu model */
per_cpu_patch(); per_cpu_patch();
......
...@@ -165,8 +165,6 @@ extern int root_mountflags; ...@@ -165,8 +165,6 @@ extern int root_mountflags;
char reboot_command[COMMAND_LINE_SIZE]; char reboot_command[COMMAND_LINE_SIZE];
static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
static void __init per_cpu_patch(void) static void __init per_cpu_patch(void)
{ {
struct cpuid_patch_entry *p; struct cpuid_patch_entry *p;
...@@ -661,8 +659,6 @@ void __init setup_arch(char **cmdline_p) ...@@ -661,8 +659,6 @@ void __init setup_arch(char **cmdline_p)
rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK; rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
#endif #endif
task_thread_info(&init_task)->kregs = &fake_swapper_regs;
#ifdef CONFIG_IP_PNP #ifdef CONFIG_IP_PNP
if (!ic_set_manually) { if (!ic_set_manually) {
phandle chosen = prom_finddevice("/chosen"); phandle chosen = prom_finddevice("/chosen");
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <linux/extable.h>
#include <asm/setup.h> #include <asm/setup.h>
...@@ -213,10 +214,10 @@ static inline int ok_for_kernel(unsigned int insn) ...@@ -213,10 +214,10 @@ static inline int ok_for_kernel(unsigned int insn)
static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
{ {
unsigned long g2 = regs->u_regs [UREG_G2]; const struct exception_table_entry *entry;
unsigned long fixup = search_extables_range(regs->pc, &g2);
if (!fixup) { entry = search_exception_tables(regs->pc);
if (!entry) {
unsigned long address = compute_effective_address(regs, insn); unsigned long address = compute_effective_address(regs, insn);
if(address < PAGE_SIZE) { if(address < PAGE_SIZE) {
printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler"); printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler");
...@@ -232,9 +233,8 @@ static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) ...@@ -232,9 +233,8 @@ static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
die_if_kernel("Oops", regs); die_if_kernel("Oops", regs);
/* Not reached */ /* Not reached */
} }
regs->pc = fixup; regs->pc = entry->fixup;
regs->npc = regs->pc + 4; regs->npc = regs->pc + 4;
regs->u_regs [UREG_G2] = g2;
} }
asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
...@@ -274,103 +274,9 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) ...@@ -274,103 +274,9 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
} }
} }
static inline int ok_for_user(struct pt_regs *regs, unsigned int insn, asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
enum direction dir)
{
unsigned int reg;
int size = ((insn >> 19) & 3) == 3 ? 8 : 4;
if ((regs->pc | regs->npc) & 3)
return 0;
/* Must access_ok() in all the necessary places. */
#define WINREG_ADDR(regnum) \
((void __user *)(((unsigned long *)regs->u_regs[UREG_FP])+(regnum)))
reg = (insn >> 25) & 0x1f;
if (reg >= 16) {
if (!access_ok(WINREG_ADDR(reg - 16), size))
return -EFAULT;
}
reg = (insn >> 14) & 0x1f;
if (reg >= 16) {
if (!access_ok(WINREG_ADDR(reg - 16), size))
return -EFAULT;
}
if (!(insn & 0x2000)) {
reg = (insn & 0x1f);
if (reg >= 16) {
if (!access_ok(WINREG_ADDR(reg - 16), size))
return -EFAULT;
}
}
#undef WINREG_ADDR
return 0;
}
static void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
{ {
send_sig_fault(SIGBUS, BUS_ADRALN, send_sig_fault(SIGBUS, BUS_ADRALN,
(void __user *)safe_compute_effective_address(regs, insn), (void __user *)safe_compute_effective_address(regs, insn),
0, current); 0, current);
} }
asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
{
enum direction dir;
if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) ||
(((insn >> 30) & 3) != 3))
goto kill_user;
dir = decode_direction(insn);
if(!ok_for_user(regs, insn, dir)) {
goto kill_user;
} else {
int err, size = decode_access_size(insn);
unsigned long addr;
if(floating_point_load_or_store_p(insn)) {
printk("User FPU load/store unaligned unsupported.\n");
goto kill_user;
}
addr = compute_effective_address(regs, insn);
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
switch(dir) {
case load:
err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
regs),
size, (unsigned long *) addr,
decode_signedness(insn));
break;
case store:
err = do_int_store(((insn>>25)&0x1f), size,
(unsigned long *) addr, regs);
break;
case both:
/*
* This was supported in 2.4. However, we question
* the value of SWAP instruction across word boundaries.
*/
printk("Unaligned SWAP unsupported.\n");
err = -EFAULT;
break;
default:
unaligned_panic("Impossible user unaligned trap.");
goto out;
}
if (err)
goto kill_user;
else
advance(regs);
goto out;
}
kill_user:
user_mna_trap_fault(regs, insn);
out:
;
}
...@@ -155,13 +155,6 @@ cpout: retl ! get outta here ...@@ -155,13 +155,6 @@ cpout: retl ! get outta here
.text; \ .text; \
.align 4 .align 4
#define EXT(start,end) \
.section __ex_table,ALLOC; \
.align 4; \
.word start, 0, end, cc_fault; \
.text; \
.align 4
/* This aligned version executes typically in 8.5 superscalar cycles, this /* This aligned version executes typically in 8.5 superscalar cycles, this
* is the best I can do. I say 8.5 because the final add will pair with * is the best I can do. I say 8.5 because the final add will pair with
* the next ldd in the main unrolled loop. Thus the pipe is always full. * the next ldd in the main unrolled loop. Thus the pipe is always full.
...@@ -169,20 +162,20 @@ cpout: retl ! get outta here ...@@ -169,20 +162,20 @@ cpout: retl ! get outta here
* please check the fixup code below as well. * please check the fixup code below as well.
*/ */
#define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ #define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
ldd [src + off + 0x00], t0; \ EX(ldd [src + off + 0x00], t0); \
ldd [src + off + 0x08], t2; \ EX(ldd [src + off + 0x08], t2); \
addxcc t0, sum, sum; \ addxcc t0, sum, sum; \
ldd [src + off + 0x10], t4; \ EX(ldd [src + off + 0x10], t4); \
addxcc t1, sum, sum; \ addxcc t1, sum, sum; \
ldd [src + off + 0x18], t6; \ EX(ldd [src + off + 0x18], t6); \
addxcc t2, sum, sum; \ addxcc t2, sum, sum; \
std t0, [dst + off + 0x00]; \ EX(std t0, [dst + off + 0x00]); \
addxcc t3, sum, sum; \ addxcc t3, sum, sum; \
std t2, [dst + off + 0x08]; \ EX(std t2, [dst + off + 0x08]); \
addxcc t4, sum, sum; \ addxcc t4, sum, sum; \
std t4, [dst + off + 0x10]; \ EX(std t4, [dst + off + 0x10]); \
addxcc t5, sum, sum; \ addxcc t5, sum, sum; \
std t6, [dst + off + 0x18]; \ EX(std t6, [dst + off + 0x18]); \
addxcc t6, sum, sum; \ addxcc t6, sum, sum; \
addxcc t7, sum, sum; addxcc t7, sum, sum;
...@@ -191,39 +184,39 @@ cpout: retl ! get outta here ...@@ -191,39 +184,39 @@ cpout: retl ! get outta here
* Viking MXCC into streaming mode. Ho hum... * Viking MXCC into streaming mode. Ho hum...
*/ */
#define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ #define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
ldd [src + off + 0x00], t0; \ EX(ldd [src + off + 0x00], t0); \
ldd [src + off + 0x08], t2; \ EX(ldd [src + off + 0x08], t2); \
ldd [src + off + 0x10], t4; \ EX(ldd [src + off + 0x10], t4); \
ldd [src + off + 0x18], t6; \ EX(ldd [src + off + 0x18], t6); \
st t0, [dst + off + 0x00]; \ EX(st t0, [dst + off + 0x00]); \
addxcc t0, sum, sum; \ addxcc t0, sum, sum; \
st t1, [dst + off + 0x04]; \ EX(st t1, [dst + off + 0x04]); \
addxcc t1, sum, sum; \ addxcc t1, sum, sum; \
st t2, [dst + off + 0x08]; \ EX(st t2, [dst + off + 0x08]); \
addxcc t2, sum, sum; \ addxcc t2, sum, sum; \
st t3, [dst + off + 0x0c]; \ EX(st t3, [dst + off + 0x0c]); \
addxcc t3, sum, sum; \ addxcc t3, sum, sum; \
st t4, [dst + off + 0x10]; \ EX(st t4, [dst + off + 0x10]); \
addxcc t4, sum, sum; \ addxcc t4, sum, sum; \
st t5, [dst + off + 0x14]; \ EX(st t5, [dst + off + 0x14]); \
addxcc t5, sum, sum; \ addxcc t5, sum, sum; \
st t6, [dst + off + 0x18]; \ EX(st t6, [dst + off + 0x18]); \
addxcc t6, sum, sum; \ addxcc t6, sum, sum; \
st t7, [dst + off + 0x1c]; \ EX(st t7, [dst + off + 0x1c]); \
addxcc t7, sum, sum; addxcc t7, sum, sum;
/* Yuck, 6 superscalar cycles... */ /* Yuck, 6 superscalar cycles... */
#define CSUMCOPY_LASTCHUNK(src, dst, sum, off, t0, t1, t2, t3) \ #define CSUMCOPY_LASTCHUNK(src, dst, sum, off, t0, t1, t2, t3) \
ldd [src - off - 0x08], t0; \ EX(ldd [src - off - 0x08], t0); \
ldd [src - off - 0x00], t2; \ EX(ldd [src - off - 0x00], t2); \
addxcc t0, sum, sum; \ addxcc t0, sum, sum; \
st t0, [dst - off - 0x08]; \ EX(st t0, [dst - off - 0x08]); \
addxcc t1, sum, sum; \ addxcc t1, sum, sum; \
st t1, [dst - off - 0x04]; \ EX(st t1, [dst - off - 0x04]); \
addxcc t2, sum, sum; \ addxcc t2, sum, sum; \
st t2, [dst - off - 0x00]; \ EX(st t2, [dst - off - 0x00]); \
addxcc t3, sum, sum; \ addxcc t3, sum, sum; \
st t3, [dst - off + 0x04]; EX(st t3, [dst - off + 0x04]);
/* Handle the end cruft code out of band for better cache patterns. */ /* Handle the end cruft code out of band for better cache patterns. */
cc_end_cruft: cc_end_cruft:
...@@ -331,7 +324,6 @@ __csum_partial_copy_sparc_generic: ...@@ -331,7 +324,6 @@ __csum_partial_copy_sparc_generic:
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
10: EXT(5b, 10b) ! note for exception handling
sub %g1, 128, %g1 ! detract from length sub %g1, 128, %g1 ! detract from length
addx %g0, %g7, %g7 ! add in last carry bit addx %g0, %g7, %g7 ! add in last carry bit
andcc %g1, 0xffffff80, %g0 ! more to csum? andcc %g1, 0xffffff80, %g0 ! more to csum?
...@@ -356,8 +348,7 @@ cctbl: CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5) ...@@ -356,8 +348,7 @@ cctbl: CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5)
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x28,%g2,%g3,%g4,%g5) CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x28,%g2,%g3,%g4,%g5)
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x18,%g2,%g3,%g4,%g5) CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x18,%g2,%g3,%g4,%g5)
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x08,%g2,%g3,%g4,%g5) CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x08,%g2,%g3,%g4,%g5)
12: EXT(cctbl, 12b) ! note for exception table handling 12: addx %g0, %g7, %g7
addx %g0, %g7, %g7
andcc %o3, 0xf, %g0 ! check for low bits set andcc %o3, 0xf, %g0 ! check for low bits set
ccte: bne cc_end_cruft ! something left, handle it out of band ccte: bne cc_end_cruft ! something left, handle it out of band
andcc %o3, 8, %g0 ! begin checks for that code andcc %o3, 8, %g0 ! begin checks for that code
...@@ -367,7 +358,6 @@ ccdbl: CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o ...@@ -367,7 +358,6 @@ ccdbl: CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
11: EXT(ccdbl, 11b) ! note for exception table handling
sub %g1, 128, %g1 ! detract from length sub %g1, 128, %g1 ! detract from length
addx %g0, %g7, %g7 ! add in last carry bit addx %g0, %g7, %g7 ! add in last carry bit
andcc %g1, 0xffffff80, %g0 ! more to csum? andcc %g1, 0xffffff80, %g0 ! more to csum?
......
This diff is collapsed.
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
98: x,y; \ 98: x,y; \
.section .fixup,ALLOC,EXECINSTR; \ .section .fixup,ALLOC,EXECINSTR; \
.align 4; \ .align 4; \
99: ba 30f; \ 99: retl; \
a, b, %o0; \ a, b, %o0; \
.section __ex_table,ALLOC; \ .section __ex_table,ALLOC; \
.align 4; \ .align 4; \
...@@ -27,35 +27,44 @@ ...@@ -27,35 +27,44 @@
.text; \ .text; \
.align 4 .align 4
#define EXT(start,end,handler) \ #define STORE(source, base, offset, n) \
98: std source, [base + offset + n]; \
.section .fixup,ALLOC,EXECINSTR; \
.align 4; \
99: ba 30f; \
sub %o3, n - offset, %o3; \
.section __ex_table,ALLOC; \ .section __ex_table,ALLOC; \
.align 4; \ .align 4; \
.word start, 0, end, handler; \ .word 98b, 99b; \
.text; \ .text; \
.align 4 .align 4;
#define STORE_LAST(source, base, offset, n) \
EX(std source, [base - offset - n], \
add %o1, offset + n);
/* Please don't change these macros, unless you change the logic /* Please don't change these macros, unless you change the logic
* in the .fixup section below as well. * in the .fixup section below as well.
* Store 64 bytes at (BASE + OFFSET) using value SOURCE. */ * Store 64 bytes at (BASE + OFFSET) using value SOURCE. */
#define ZERO_BIG_BLOCK(base, offset, source) \ #define ZERO_BIG_BLOCK(base, offset, source) \
std source, [base + offset + 0x00]; \ STORE(source, base, offset, 0x00); \
std source, [base + offset + 0x08]; \ STORE(source, base, offset, 0x08); \
std source, [base + offset + 0x10]; \ STORE(source, base, offset, 0x10); \
std source, [base + offset + 0x18]; \ STORE(source, base, offset, 0x18); \
std source, [base + offset + 0x20]; \ STORE(source, base, offset, 0x20); \
std source, [base + offset + 0x28]; \ STORE(source, base, offset, 0x28); \
std source, [base + offset + 0x30]; \ STORE(source, base, offset, 0x30); \
std source, [base + offset + 0x38]; STORE(source, base, offset, 0x38);
#define ZERO_LAST_BLOCKS(base, offset, source) \ #define ZERO_LAST_BLOCKS(base, offset, source) \
std source, [base - offset - 0x38]; \ STORE_LAST(source, base, offset, 0x38); \
std source, [base - offset - 0x30]; \ STORE_LAST(source, base, offset, 0x30); \
std source, [base - offset - 0x28]; \ STORE_LAST(source, base, offset, 0x28); \
std source, [base - offset - 0x20]; \ STORE_LAST(source, base, offset, 0x20); \
std source, [base - offset - 0x18]; \ STORE_LAST(source, base, offset, 0x18); \
std source, [base - offset - 0x10]; \ STORE_LAST(source, base, offset, 0x10); \
std source, [base - offset - 0x08]; \ STORE_LAST(source, base, offset, 0x08); \
std source, [base - offset - 0x00]; STORE_LAST(source, base, offset, 0x00);
.text .text
.align 4 .align 4
...@@ -68,8 +77,6 @@ __bzero_begin: ...@@ -68,8 +77,6 @@ __bzero_begin:
.globl memset .globl memset
EXPORT_SYMBOL(__bzero) EXPORT_SYMBOL(__bzero)
EXPORT_SYMBOL(memset) EXPORT_SYMBOL(memset)
.globl __memset_start, __memset_end
__memset_start:
memset: memset:
mov %o0, %g1 mov %o0, %g1
mov 1, %g4 mov 1, %g4
...@@ -122,8 +129,6 @@ __bzero: ...@@ -122,8 +129,6 @@ __bzero:
ZERO_BIG_BLOCK(%o0, 0x00, %g2) ZERO_BIG_BLOCK(%o0, 0x00, %g2)
subcc %o3, 128, %o3 subcc %o3, 128, %o3
ZERO_BIG_BLOCK(%o0, 0x40, %g2) ZERO_BIG_BLOCK(%o0, 0x40, %g2)
11:
EXT(10b, 11b, 20f)
bne 10b bne 10b
add %o0, 128, %o0 add %o0, 128, %o0
...@@ -138,11 +143,9 @@ __bzero: ...@@ -138,11 +143,9 @@ __bzero:
jmp %o4 jmp %o4
add %o0, %o2, %o0 add %o0, %o2, %o0
12:
ZERO_LAST_BLOCKS(%o0, 0x48, %g2) ZERO_LAST_BLOCKS(%o0, 0x48, %g2)
ZERO_LAST_BLOCKS(%o0, 0x08, %g2) ZERO_LAST_BLOCKS(%o0, 0x08, %g2)
13: 13:
EXT(12b, 13b, 21f)
be 8f be 8f
andcc %o1, 4, %g0 andcc %o1, 4, %g0
...@@ -182,37 +185,13 @@ __bzero: ...@@ -182,37 +185,13 @@ __bzero:
5: 5:
retl retl
clr %o0 clr %o0
__memset_end:
.section .fixup,#alloc,#execinstr .section .fixup,#alloc,#execinstr
.align 4 .align 4
20: 30:
cmp %g2, 8
bleu 1f
and %o1, 0x7f, %o1 and %o1, 0x7f, %o1
sub %g2, 9, %g2 retl
add %o3, 64, %o3
1:
sll %g2, 3, %g2
add %o3, %o1, %o0 add %o3, %o1, %o0
b 30f
sub %o0, %g2, %o0
21:
mov 8, %o0
and %o1, 7, %o1
sub %o0, %g2, %o0
sll %o0, 3, %o0
b 30f
add %o0, %o1, %o0
30:
/* %o4 is faulting address, %o5 is %pc where fault occurred */
save %sp, -104, %sp
mov %i5, %o0
mov %i7, %o1
call lookup_fault
mov %i4, %o2
ret
restore
.globl __bzero_end .globl __bzero_end
__bzero_end: __bzero_end:
...@@ -8,7 +8,7 @@ ccflags-y := -Werror ...@@ -8,7 +8,7 @@ ccflags-y := -Werror
obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
obj-y += fault_$(BITS).o obj-y += fault_$(BITS).o
obj-y += init_$(BITS).o obj-y += init_$(BITS).o
obj-$(CONFIG_SPARC32) += extable.o srmmu.o iommu.o io-unit.o obj-$(CONFIG_SPARC32) += srmmu.o iommu.o io-unit.o
obj-$(CONFIG_SPARC32) += srmmu_access.o obj-$(CONFIG_SPARC32) += srmmu_access.o
obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o
obj-$(CONFIG_SPARC32) += leon_mm.o obj-$(CONFIG_SPARC32) += leon_mm.o
......
// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/sparc/mm/extable.c
*/
#include <linux/module.h>
#include <linux/extable.h>
#include <linux/uaccess.h>
void sort_extable(struct exception_table_entry *start,
struct exception_table_entry *finish)
{
}
/* Caller knows they are in a range if ret->fixup == 0 */
const struct exception_table_entry *
search_extable(const struct exception_table_entry *base,
const size_t num,
unsigned long value)
{
int i;
/* Single insn entries are encoded as:
* word 1: insn address
* word 2: fixup code address
*
* Range entries are encoded as:
* word 1: first insn address
* word 2: 0
* word 3: last insn address + 4 bytes
* word 4: fixup code address
*
* Deleted entries are encoded as:
* word 1: unused
* word 2: -1
*
* See asm/uaccess.h for more details.
*/
/* 1. Try to find an exact match. */
for (i = 0; i < num; i++) {
if (base[i].fixup == 0) {
/* A range entry, skip both parts. */
i++;
continue;
}
/* A deleted entry; see trim_init_extable */
if (base[i].fixup == -1)
continue;
if (base[i].insn == value)
return &base[i];
}
/* 2. Try to find a range match. */
for (i = 0; i < (num - 1); i++) {
if (base[i].fixup)
continue;
if (base[i].insn <= value && base[i + 1].insn > value)
return &base[i];
i++;
}
return NULL;
}
#ifdef CONFIG_MODULES
/* We could memmove them around; easier to mark the trimmed ones. */
void trim_init_extable(struct module *m)
{
unsigned int i;
bool range;
for (i = 0; i < m->num_exentries; i += range ? 2 : 1) {
range = m->extable[i].fixup == 0;
if (within_module_init(m->extable[i].insn, m)) {
m->extable[i].fixup = -1;
if (range)
m->extable[i+1].fixup = -1;
}
if (range)
i++;
}
}
#endif /* CONFIG_MODULES */
/* Special extable search, which handles ranges. Returns fixup */
unsigned long search_extables_range(unsigned long addr, unsigned long *g2)
{
const struct exception_table_entry *entry;
entry = search_exception_tables(addr);
if (!entry)
return 0;
/* Inside range? Fix g2 and return correct fixup */
if (!entry->fixup) {
*g2 = (addr - entry->insn) / 4;
return (entry + 1)->fixup;
}
return entry->fixup;
}
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/extable.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/openprom.h> #include <asm/openprom.h>
...@@ -54,54 +55,6 @@ static void __noreturn unhandled_fault(unsigned long address, ...@@ -54,54 +55,6 @@ static void __noreturn unhandled_fault(unsigned long address,
die_if_kernel("Oops", regs); die_if_kernel("Oops", regs);
} }
asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
unsigned long address)
{
struct pt_regs regs;
unsigned long g2;
unsigned int insn;
int i;
i = search_extables_range(ret_pc, &g2);
switch (i) {
case 3:
/* load & store will be handled by fixup */
return 3;
case 1:
/* store will be handled by fixup, load will bump out */
/* for _to_ macros */
insn = *((unsigned int *) pc);
if ((insn >> 21) & 1)
return 1;
break;
case 2:
/* load will be handled by fixup, store will bump out */
/* for _from_ macros */
insn = *((unsigned int *) pc);
if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
return 2;
break;
default:
break;
}
memset(&regs, 0, sizeof(regs));
regs.pc = pc;
regs.npc = pc + 4;
__asm__ __volatile__(
"rd %%psr, %0\n\t"
"nop\n\t"
"nop\n\t"
"nop\n" : "=r" (regs.psr));
unhandled_fault(address, current, &regs);
/* Not reached */
return 0;
}
static inline void static inline void
show_signal_msg(struct pt_regs *regs, int sig, int code, show_signal_msg(struct pt_regs *regs, int sig, int code,
unsigned long address, struct task_struct *tsk) unsigned long address, struct task_struct *tsk)
...@@ -162,8 +115,6 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, ...@@ -162,8 +115,6 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm; struct mm_struct *mm = tsk->mm;
unsigned int fixup;
unsigned long g2;
int from_user = !(regs->psr & PSR_PS); int from_user = !(regs->psr & PSR_PS);
int code; int code;
vm_fault_t fault; vm_fault_t fault;
...@@ -281,31 +232,20 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, ...@@ -281,31 +232,20 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
/* Is this in ex_table? */ /* Is this in ex_table? */
no_context: no_context:
g2 = regs->u_regs[UREG_G2];
if (!from_user) { if (!from_user) {
fixup = search_extables_range(regs->pc, &g2); const struct exception_table_entry *entry;
/* Values below 10 are reserved for other things */
if (fixup > 10) {
extern const unsigned int __memset_start[];
extern const unsigned int __memset_end[];
entry = search_exception_tables(regs->pc);
#ifdef DEBUG_EXCEPTIONS #ifdef DEBUG_EXCEPTIONS
printk("Exception: PC<%08lx> faddr<%08lx>\n", printk("Exception: PC<%08lx> faddr<%08lx>\n",
regs->pc, address); regs->pc, address);
printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n", printk("EX_TABLE: insn<%08lx> fixup<%08x>\n",
regs->pc, fixup, g2); regs->pc, entry->fixup);
#endif #endif
if ((regs->pc >= (unsigned long)__memset_start && regs->pc = entry->fixup;
regs->pc < (unsigned long)__memset_end)) {
regs->u_regs[UREG_I4] = address;
regs->u_regs[UREG_I5] = regs->pc;
}
regs->u_regs[UREG_G2] = g2;
regs->pc = fixup;
regs->npc = regs->pc + 4; regs->npc = regs->pc + 4;
return; return;
} }
}
unhandled_fault(address, tsk, regs); unhandled_fault(address, tsk, regs);
do_exit(SIGKILL); do_exit(SIGKILL);
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
/* fault_32.c - visible as they are called from assembler */ /* fault_32.c - visible as they are called from assembler */
asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
unsigned long address);
asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
unsigned long address); unsigned long address);
......
...@@ -21,7 +21,6 @@ static inline unsigned long ex_to_insn(const struct exception_table_entry *x) ...@@ -21,7 +21,6 @@ static inline unsigned long ex_to_insn(const struct exception_table_entry *x)
} }
#endif #endif
#ifndef ARCH_HAS_SORT_EXTABLE
#ifndef ARCH_HAS_RELATIVE_EXTABLE #ifndef ARCH_HAS_RELATIVE_EXTABLE
#define swap_ex NULL #define swap_ex NULL
#else #else
...@@ -88,9 +87,6 @@ void trim_init_extable(struct module *m) ...@@ -88,9 +87,6 @@ void trim_init_extable(struct module *m)
m->num_exentries--; m->num_exentries--;
} }
#endif /* CONFIG_MODULES */ #endif /* CONFIG_MODULES */
#endif /* !ARCH_HAS_SORT_EXTABLE */
#ifndef ARCH_HAS_SEARCH_EXTABLE
static int cmp_ex_search(const void *key, const void *elt) static int cmp_ex_search(const void *key, const void *elt)
{ {
...@@ -120,4 +116,3 @@ search_extable(const struct exception_table_entry *base, ...@@ -120,4 +116,3 @@ search_extable(const struct exception_table_entry *base,
return bsearch(&value, base, num, return bsearch(&value, base, num,
sizeof(struct exception_table_entry), cmp_ex_search); sizeof(struct exception_table_entry), cmp_ex_search);
} }
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment