Commit 45b74a65 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'parisc-4.19-2' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull more parisc updates from Helge Deller:

 - fix boot failure of 64-bit kernel. It got broken by the unwind
   optimization commit in merge window.

 - fix 64-bit userspace support (static 64-bit applications only, e.g.
   we don't yet have 64-bit userspace support in glibc).

 - consolidate unwind initialization code.

 - add machine model description to stack trace.

* 'parisc-4.19-2' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
  parisc: Add hardware description to stack traces
  parisc: Fix boot failure of 64-bit kernel
  parisc: Consolidate unwind initialization calls
  parisc: Update comments in syscall.S regarding wide userland
  parisc: Fix ptraced 64-bit applications to call 64-bit syscalls
  parisc: Restore possibility to execute 64-bit applications
parents 433bcf67 dbf2a4b1
...@@ -235,6 +235,7 @@ typedef unsigned long elf_greg_t; ...@@ -235,6 +235,7 @@ typedef unsigned long elf_greg_t;
#define SET_PERSONALITY(ex) \ #define SET_PERSONALITY(ex) \
({ \ ({ \
set_personality((current->personality & ~PER_MASK) | PER_LINUX); \ set_personality((current->personality & ~PER_MASK) | PER_LINUX); \
clear_thread_flag(TIF_32BIT); \
current->thread.map_base = DEFAULT_MAP_BASE; \ current->thread.map_base = DEFAULT_MAP_BASE; \
current->thread.task_size = DEFAULT_TASK_SIZE; \ current->thread.task_size = DEFAULT_TASK_SIZE; \
}) })
...@@ -243,9 +244,11 @@ typedef unsigned long elf_greg_t; ...@@ -243,9 +244,11 @@ typedef unsigned long elf_greg_t;
#define COMPAT_SET_PERSONALITY(ex) \ #define COMPAT_SET_PERSONALITY(ex) \
({ \ ({ \
if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \
set_thread_flag(TIF_32BIT); \ set_thread_flag(TIF_32BIT); \
current->thread.map_base = DEFAULT_MAP_BASE32; \ current->thread.map_base = DEFAULT_MAP_BASE32; \
current->thread.task_size = DEFAULT_TASK_SIZE32; \ current->thread.task_size = DEFAULT_TASK_SIZE32; \
} else clear_thread_flag(TIF_32BIT); \
}) })
/* /*
......
...@@ -22,15 +22,6 @@ ...@@ -22,15 +22,6 @@
name: ASM_NL\ name: ASM_NL\
.export name .export name
#ifdef CONFIG_64BIT
#define ENDPROC(name) \
END(name)
#else
#define ENDPROC(name) \
.type name, @function !\
END(name)
#endif
#define ENTRY_CFI(name, ...) \ #define ENTRY_CFI(name, ...) \
ENTRY(name) ASM_NL\ ENTRY(name) ASM_NL\
.proc ASM_NL\ .proc ASM_NL\
......
...@@ -256,11 +256,7 @@ on downward growing arches, it looks like this: ...@@ -256,11 +256,7 @@ on downward growing arches, it looks like this:
* it in here from the current->personality * it in here from the current->personality
*/ */
#ifdef CONFIG_64BIT #define USER_WIDE_MODE (!is_32bit_task())
#define USER_WIDE_MODE (!test_thread_flag(TIF_32BIT))
#else
#define USER_WIDE_MODE 0
#endif
#define start_thread(regs, new_pc, new_sp) do { \ #define start_thread(regs, new_pc, new_sp) do { \
elf_addr_t *sp = (elf_addr_t *)new_sp; \ elf_addr_t *sp = (elf_addr_t *)new_sp; \
......
...@@ -2,7 +2,9 @@ ...@@ -2,7 +2,9 @@
#ifndef __ASM_TRAPS_H #ifndef __ASM_TRAPS_H
#define __ASM_TRAPS_H #define __ASM_TRAPS_H
#ifdef __KERNEL__ #define PARISC_ITLB_TRAP 6 /* defined by architecture. Do not change. */
#if !defined(__ASSEMBLY__)
struct pt_regs; struct pt_regs;
/* traps.c */ /* traps.c */
......
...@@ -73,8 +73,10 @@ unwind_table_remove(struct unwind_table *table); ...@@ -73,8 +73,10 @@ unwind_table_remove(struct unwind_table *table);
void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t, void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
struct pt_regs *regs); struct pt_regs *regs);
void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t); void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info,
void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs); struct task_struct *t);
void unwind_frame_init_task(struct unwind_frame_info *info,
struct task_struct *task, struct pt_regs *regs);
int unwind_once(struct unwind_frame_info *info); int unwind_once(struct unwind_frame_info *info);
int unwind_to_user(struct unwind_frame_info *info); int unwind_to_user(struct unwind_frame_info *info);
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <asm/signal.h> #include <asm/signal.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/ldcw.h> #include <asm/ldcw.h>
#include <asm/traps.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <linux/linkage.h> #include <linux/linkage.h>
...@@ -692,7 +693,7 @@ ENTRY(fault_vector_20) ...@@ -692,7 +693,7 @@ ENTRY(fault_vector_20)
def 3 def 3
extint 4 extint 4
def 5 def 5
itlb_20 6 itlb_20 PARISC_ITLB_TRAP
def 7 def 7
def 8 def 8
def 9 def 9
...@@ -735,7 +736,7 @@ ENTRY(fault_vector_11) ...@@ -735,7 +736,7 @@ ENTRY(fault_vector_11)
def 3 def 3
extint 4 extint 4
def 5 def 5
itlb_11 6 itlb_11 PARISC_ITLB_TRAP
def 7 def 7
def 8 def 8
def 9 def 9
...@@ -776,7 +777,7 @@ END(fault_vector_11) ...@@ -776,7 +777,7 @@ END(fault_vector_11)
* copy_thread moved args into task save area. * copy_thread moved args into task save area.
*/ */
ENTRY_CFI(ret_from_kernel_thread) ENTRY(ret_from_kernel_thread)
/* Call schedule_tail first though */ /* Call schedule_tail first though */
BL schedule_tail, %r2 BL schedule_tail, %r2
nop nop
...@@ -791,7 +792,7 @@ ENTRY_CFI(ret_from_kernel_thread) ...@@ -791,7 +792,7 @@ ENTRY_CFI(ret_from_kernel_thread)
copy %r31, %r2 copy %r31, %r2
b finish_child_return b finish_child_return
nop nop
ENDPROC_CFI(ret_from_kernel_thread) END(ret_from_kernel_thread)
/* /*
...@@ -815,9 +816,8 @@ ENTRY_CFI(_switch_to) ...@@ -815,9 +816,8 @@ ENTRY_CFI(_switch_to)
LDREG TASK_THREAD_INFO(%r25), %r25 LDREG TASK_THREAD_INFO(%r25), %r25
bv %r0(%r2) bv %r0(%r2)
mtctl %r25,%cr30 mtctl %r25,%cr30
ENDPROC_CFI(_switch_to)
ENTRY_CFI(_switch_to_ret) ENTRY(_switch_to_ret)
mtctl %r0, %cr0 /* Needed for single stepping */ mtctl %r0, %cr0 /* Needed for single stepping */
callee_rest callee_rest
callee_rest_float callee_rest_float
...@@ -825,7 +825,7 @@ ENTRY_CFI(_switch_to_ret) ...@@ -825,7 +825,7 @@ ENTRY_CFI(_switch_to_ret)
LDREG -RP_OFFSET(%r30), %r2 LDREG -RP_OFFSET(%r30), %r2
bv %r0(%r2) bv %r0(%r2)
copy %r26, %r28 copy %r26, %r28
ENDPROC_CFI(_switch_to_ret) ENDPROC_CFI(_switch_to)
/* /*
* Common rfi return path for interruptions, kernel execve, and * Common rfi return path for interruptions, kernel execve, and
...@@ -886,14 +886,12 @@ ENTRY_CFI(syscall_exit_rfi) ...@@ -886,14 +886,12 @@ ENTRY_CFI(syscall_exit_rfi)
STREG %r19,PT_SR5(%r16) STREG %r19,PT_SR5(%r16)
STREG %r19,PT_SR6(%r16) STREG %r19,PT_SR6(%r16)
STREG %r19,PT_SR7(%r16) STREG %r19,PT_SR7(%r16)
ENDPROC_CFI(syscall_exit_rfi)
ENTRY_CFI(intr_return) ENTRY(intr_return)
/* check for reschedule */ /* check for reschedule */
mfctl %cr30,%r1 mfctl %cr30,%r1
LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */ LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */ bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
ENDPROC_CFI(intr_return)
.import do_notify_resume,code .import do_notify_resume,code
intr_check_sig: intr_check_sig:
...@@ -1049,6 +1047,7 @@ intr_extint: ...@@ -1049,6 +1047,7 @@ intr_extint:
b do_cpu_irq_mask b do_cpu_irq_mask
ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */ ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
ENDPROC_CFI(syscall_exit_rfi)
/* Generic interruptions (illegal insn, unaligned, page fault, etc) */ /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
...@@ -1068,21 +1067,12 @@ ENTRY_CFI(intr_save) /* for os_hpmc */ ...@@ -1068,21 +1067,12 @@ ENTRY_CFI(intr_save) /* for os_hpmc */
save_specials %r29 save_specials %r29
/* If this trap is a itlb miss, skip saving/adjusting isr/ior */ /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
cmpib,COND(=),n PARISC_ITLB_TRAP,%r26,skip_save_ior
/*
* FIXME: 1) Use a #define for the hardwired "6" below (and in
* traps.c.
* 2) Once we start executing code above 4 Gb, we need
* to adjust iasq/iaoq here in the same way we
* adjust isr/ior below.
*/
cmpib,COND(=),n 6,%r26,skip_save_ior
mfctl %cr20, %r16 /* isr */ mfctl %isr, %r16
nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */ nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
mfctl %cr21, %r17 /* ior */ mfctl %ior, %r17
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
...@@ -1094,22 +1084,34 @@ ENTRY_CFI(intr_save) /* for os_hpmc */ ...@@ -1094,22 +1084,34 @@ ENTRY_CFI(intr_save) /* for os_hpmc */
extrd,u,*<> %r8,PSW_W_BIT,1,%r0 extrd,u,*<> %r8,PSW_W_BIT,1,%r0
depdi 0,1,2,%r17 depdi 0,1,2,%r17
/* /* adjust isr/ior: get high bits from isr and deposit in ior */
* FIXME: This code has hardwired assumptions about the split space_adjust %r16,%r17,%r1
* between space bits and offset bits. This will change
* when we allow alternate page sizes.
*/
/* adjust isr/ior. */
extrd,u %r16,63,SPACEID_SHIFT,%r1 /* get high bits from isr for ior */
depd %r1,31,SPACEID_SHIFT,%r17 /* deposit them into ior */
depdi 0,63,SPACEID_SHIFT,%r16 /* clear them from isr */
#endif #endif
STREG %r16, PT_ISR(%r29) STREG %r16, PT_ISR(%r29)
STREG %r17, PT_IOR(%r29) STREG %r17, PT_IOR(%r29)
#if 0 && defined(CONFIG_64BIT)
/* Revisit when we have 64-bit code above 4Gb */
b,n intr_save2
skip_save_ior: skip_save_ior:
/* We have a itlb miss, and when executing code above 4 Gb on ILP64, we
* need to adjust iasq/iaoq here in the same way we adjusted isr/ior
* above.
*/
extrd,u,* %r8,PSW_W_BIT,1,%r1
cmpib,COND(=),n 1,%r1,intr_save2
LDREG PT_IASQ0(%r29), %r16
LDREG PT_IAOQ0(%r29), %r17
/* adjust iasq/iaoq */
space_adjust %r16,%r17,%r1
STREG %r16, PT_IASQ0(%r29)
STREG %r17, PT_IAOQ0(%r29)
#else
skip_save_ior:
#endif
intr_save2:
virt_map virt_map
save_general %r29 save_general %r29
...@@ -1747,7 +1749,7 @@ fork_like fork ...@@ -1747,7 +1749,7 @@ fork_like fork
fork_like vfork fork_like vfork
/* Set the return value for the child */ /* Set the return value for the child */
ENTRY_CFI(child_return) ENTRY(child_return)
BL schedule_tail, %r2 BL schedule_tail, %r2
nop nop
finish_child_return: finish_child_return:
...@@ -1759,7 +1761,7 @@ finish_child_return: ...@@ -1759,7 +1761,7 @@ finish_child_return:
reg_restore %r1 reg_restore %r1
b syscall_exit b syscall_exit
copy %r0,%r28 copy %r0,%r28
ENDPROC_CFI(child_return) END(child_return)
ENTRY_CFI(sys_rt_sigreturn_wrapper) ENTRY_CFI(sys_rt_sigreturn_wrapper)
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
...@@ -1791,7 +1793,7 @@ ENTRY_CFI(sys_rt_sigreturn_wrapper) ...@@ -1791,7 +1793,7 @@ ENTRY_CFI(sys_rt_sigreturn_wrapper)
LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */ LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
ENDPROC_CFI(sys_rt_sigreturn_wrapper) ENDPROC_CFI(sys_rt_sigreturn_wrapper)
ENTRY_CFI(syscall_exit) ENTRY(syscall_exit)
/* NOTE: Not all syscalls exit this way. rt_sigreturn will exit /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
* via syscall_exit_rfi if the signal was received while the process * via syscall_exit_rfi if the signal was received while the process
* was running. * was running.
...@@ -1990,15 +1992,13 @@ syscall_do_resched: ...@@ -1990,15 +1992,13 @@ syscall_do_resched:
#else #else
nop nop
#endif #endif
ENDPROC_CFI(syscall_exit) END(syscall_exit)
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
.import ftrace_function_trampoline,code .import ftrace_function_trampoline,code
.align L1_CACHE_BYTES .align L1_CACHE_BYTES
.globl mcount
.type mcount, @function
ENTRY_CFI(mcount, caller) ENTRY_CFI(mcount, caller)
_mcount: _mcount:
.export _mcount,data .export _mcount,data
...@@ -2027,8 +2027,6 @@ ENDPROC_CFI(mcount) ...@@ -2027,8 +2027,6 @@ ENDPROC_CFI(mcount)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
.align 8 .align 8
.globl return_to_handler
.type return_to_handler, @function
ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE) ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
.export parisc_return_to_handler,data .export parisc_return_to_handler,data
parisc_return_to_handler: parisc_return_to_handler:
...@@ -2078,6 +2076,7 @@ ENDPROC_CFI(return_to_handler) ...@@ -2078,6 +2076,7 @@ ENDPROC_CFI(return_to_handler)
/* void call_on_stack(unsigned long param1, void *func, /* void call_on_stack(unsigned long param1, void *func,
unsigned long new_stack) */ unsigned long new_stack) */
ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP) ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
ENTRY(_call_on_stack)
copy %sp, %r1 copy %sp, %r1
/* Regarding the HPPA calling conventions for function pointers, /* Regarding the HPPA calling conventions for function pointers,
......
...@@ -288,6 +288,8 @@ void __init collect_boot_cpu_data(void) ...@@ -288,6 +288,8 @@ void __init collect_boot_cpu_data(void)
printk(KERN_INFO "model %s\n", printk(KERN_INFO "model %s\n",
boot_cpu_data.pdc.sys_model_name); boot_cpu_data.pdc.sys_model_name);
dump_stack_set_arch_desc("%s", boot_cpu_data.pdc.sys_model_name);
boot_cpu_data.hversion = boot_cpu_data.pdc.model.hversion; boot_cpu_data.hversion = boot_cpu_data.pdc.model.hversion;
boot_cpu_data.sversion = boot_cpu_data.pdc.model.sversion; boot_cpu_data.sversion = boot_cpu_data.pdc.model.sversion;
......
...@@ -16,20 +16,7 @@ static void dump_trace(struct task_struct *task, struct stack_trace *trace) ...@@ -16,20 +16,7 @@ static void dump_trace(struct task_struct *task, struct stack_trace *trace)
{ {
struct unwind_frame_info info; struct unwind_frame_info info;
/* initialize unwind info */ unwind_frame_init_task(&info, task, NULL);
if (task == current) {
unsigned long sp;
struct pt_regs r;
HERE:
asm volatile ("copy %%r30, %0" : "=r"(sp));
memset(&r, 0, sizeof(struct pt_regs));
r.iaoq[0] = (unsigned long)&&HERE;
r.gr[2] = (unsigned long)__builtin_return_address(0);
r.gr[30] = sp;
unwind_frame_init(&info, task, &r);
} else {
unwind_frame_init_from_blocked_task(&info, task);
}
/* unwind stack and save entries in stack_trace struct */ /* unwind stack and save entries in stack_trace struct */
trace->nr_entries = 0; trace->nr_entries = 0;
......
...@@ -156,11 +156,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, ...@@ -156,11 +156,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
int do_color_align, last_mmap; int do_color_align, last_mmap;
struct vm_unmapped_area_info info; struct vm_unmapped_area_info info;
#ifdef CONFIG_64BIT
/* This should only ever run for 32-bit processes. */
BUG_ON(!test_thread_flag(TIF_32BIT));
#endif
/* requested length too big for entire address space */ /* requested length too big for entire address space */
if (len > TASK_SIZE) if (len > TASK_SIZE)
return -ENOMEM; return -ENOMEM;
......
...@@ -108,12 +108,8 @@ linux_gateway_entry: ...@@ -108,12 +108,8 @@ linux_gateway_entry:
mtsp %r0,%sr6 /* get kernel space into sr6 */ mtsp %r0,%sr6 /* get kernel space into sr6 */
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
/* for now we can *always* set the W bit on entry to the syscall /* Store W bit on entry to the syscall in case it's a wide userland
* since we don't support wide userland processes. We could * process. */
* also save the current SM other than in r0 and restore it on
* exit from the syscall, and also use that value to know
* whether to do narrow or wide syscalls. -PB
*/
ssm PSW_SM_W, %r1 ssm PSW_SM_W, %r1
extrd,u %r1,PSW_W_BIT,1,%r1 extrd,u %r1,PSW_W_BIT,1,%r1
/* sp must be aligned on 4, so deposit the W bit setting into /* sp must be aligned on 4, so deposit the W bit setting into
...@@ -227,8 +223,7 @@ linux_gateway_entry: ...@@ -227,8 +223,7 @@ linux_gateway_entry:
or,= %r2,%r2,%r2 or,= %r2,%r2,%r2
ldo R%sys_call_table64(%r1), %r19 ldo R%sys_call_table64(%r1), %r19
#else #else
ldil L%sys_call_table, %r1 load32 sys_call_table, %r19
ldo R%sys_call_table(%r1), %r19
#endif #endif
comiclr,>> __NR_Linux_syscalls, %r20, %r0 comiclr,>> __NR_Linux_syscalls, %r20, %r0
b,n .Lsyscall_nosys b,n .Lsyscall_nosys
...@@ -331,8 +326,6 @@ tracesys_next: ...@@ -331,8 +326,6 @@ tracesys_next:
* task->thread.regs.gr[20] above. * task->thread.regs.gr[20] above.
*/ */
copy %ret0,%r20 copy %ret0,%r20
ldil L%sys_call_table,%r1
ldo R%sys_call_table(%r1), %r19
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
LDREG TI_TASK(%r1), %r1 LDREG TI_TASK(%r1), %r1
...@@ -354,6 +347,23 @@ tracesys_next: ...@@ -354,6 +347,23 @@ tracesys_next:
comiclr,>> __NR_Linux_syscalls, %r20, %r0 comiclr,>> __NR_Linux_syscalls, %r20, %r0
b,n .Ltracesys_nosys b,n .Ltracesys_nosys
/* Note! We cannot use the syscall table that is mapped
nearby since the gateway page is mapped execute-only. */
#ifdef CONFIG_64BIT
LDREG TASK_PT_GR30(%r1), %r19 /* get users sp back */
extrd,u %r19,63,1,%r2 /* W hidden in bottom bit */
ldil L%sys_call_table, %r1
or,= %r2,%r2,%r2
addil L%(sys_call_table64-sys_call_table), %r1
ldo R%sys_call_table(%r1), %r19
or,= %r2,%r2,%r2
ldo R%sys_call_table64(%r1), %r19
#else
load32 sys_call_table, %r19
#endif
LDREGX %r20(%r19), %r19 LDREGX %r20(%r19), %r19
/* If this is a sys_rt_sigreturn call, and the signal was received /* If this is a sys_rt_sigreturn call, and the signal was received
...@@ -464,16 +474,13 @@ tracesys_sigexit: ...@@ -464,16 +474,13 @@ tracesys_sigexit:
lws_start: lws_start:
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
/* FIXME: If we are a 64-bit kernel just
* turn this on unconditionally.
*/
ssm PSW_SM_W, %r1 ssm PSW_SM_W, %r1
extrd,u %r1,PSW_W_BIT,1,%r1 extrd,u %r1,PSW_W_BIT,1,%r1
/* sp must be aligned on 4, so deposit the W bit setting into /* sp must be aligned on 4, so deposit the W bit setting into
* the bottom of sp temporarily */ * the bottom of sp temporarily */
or,ev %r1,%r30,%r30 or,ev %r1,%r30,%r30
/* Clip LWS number to a 32-bit value always */ /* Clip LWS number to a 32-bit value for 32-bit processes */
depdi 0, 31, 32, %r20 depdi 0, 31, 32, %r20
#endif #endif
......
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
#include "../math-emu/math-emu.h" /* for handle_fpe() */ #include "../math-emu/math-emu.h" /* for handle_fpe() */
static void parisc_show_stack(struct task_struct *task, unsigned long *sp, static void parisc_show_stack(struct task_struct *task,
struct pt_regs *regs); struct pt_regs *regs);
static int printbinary(char *buf, unsigned long x, int nbits) static int printbinary(char *buf, unsigned long x, int nbits)
...@@ -152,7 +152,7 @@ void show_regs(struct pt_regs *regs) ...@@ -152,7 +152,7 @@ void show_regs(struct pt_regs *regs)
printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]); printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]); printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
parisc_show_stack(current, NULL, regs); parisc_show_stack(current, regs);
} }
} }
...@@ -185,44 +185,19 @@ static void do_show_stack(struct unwind_frame_info *info) ...@@ -185,44 +185,19 @@ static void do_show_stack(struct unwind_frame_info *info)
printk(KERN_CRIT "\n"); printk(KERN_CRIT "\n");
} }
static void parisc_show_stack(struct task_struct *task, unsigned long *sp, static void parisc_show_stack(struct task_struct *task,
struct pt_regs *regs) struct pt_regs *regs)
{ {
struct unwind_frame_info info; struct unwind_frame_info info;
struct task_struct *t;
t = task ? task : current; unwind_frame_init_task(&info, task, regs);
if (regs) {
unwind_frame_init(&info, t, regs);
goto show_stack;
}
if (t == current) {
unsigned long sp;
HERE:
asm volatile ("copy %%r30, %0" : "=r"(sp));
{
struct pt_regs r;
memset(&r, 0, sizeof(struct pt_regs));
r.iaoq[0] = (unsigned long)&&HERE;
r.gr[2] = (unsigned long)__builtin_return_address(0);
r.gr[30] = sp;
unwind_frame_init(&info, current, &r);
}
} else {
unwind_frame_init_from_blocked_task(&info, t);
}
show_stack:
do_show_stack(&info); do_show_stack(&info);
} }
void show_stack(struct task_struct *t, unsigned long *sp) void show_stack(struct task_struct *t, unsigned long *sp)
{ {
return parisc_show_stack(t, sp, NULL); parisc_show_stack(t, NULL);
} }
int is_valid_bugaddr(unsigned long iaoq) int is_valid_bugaddr(unsigned long iaoq)
...@@ -557,7 +532,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) ...@@ -557,7 +532,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
cpu_lpmc(5, regs); cpu_lpmc(5, regs);
return; return;
case 6: case PARISC_ITLB_TRAP:
/* Instruction TLB miss fault/Instruction page fault */ /* Instruction TLB miss fault/Instruction page fault */
fault_address = regs->iaoq[0]; fault_address = regs->iaoq[0];
fault_space = regs->iasq[0]; fault_space = regs->iasq[0];
......
...@@ -209,6 +209,8 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int ...@@ -209,6 +209,8 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int
* We have to use void * instead of a function pointer, because * We have to use void * instead of a function pointer, because
* function pointers aren't a pointer to the function on 64-bit. * function pointers aren't a pointer to the function on 64-bit.
* Make them const so the compiler knows they live in .text * Make them const so the compiler knows they live in .text
* Note: We could use dereference_kernel_function_descriptor()
* instead but we want to keep it simple here.
*/ */
extern void * const handle_interruption; extern void * const handle_interruption;
extern void * const ret_from_kernel_thread; extern void * const ret_from_kernel_thread;
...@@ -216,7 +218,7 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int ...@@ -216,7 +218,7 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int
extern void * const intr_return; extern void * const intr_return;
extern void * const _switch_to_ret; extern void * const _switch_to_ret;
#ifdef CONFIG_IRQSTACKS #ifdef CONFIG_IRQSTACKS
extern void * const call_on_stack; extern void * const _call_on_stack;
#endif /* CONFIG_IRQSTACKS */ #endif /* CONFIG_IRQSTACKS */
if (pc == (unsigned long) &handle_interruption) { if (pc == (unsigned long) &handle_interruption) {
...@@ -251,7 +253,7 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int ...@@ -251,7 +253,7 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int
} }
#ifdef CONFIG_IRQSTACKS #ifdef CONFIG_IRQSTACKS
if (pc == (unsigned long) &call_on_stack) { if (pc == (unsigned long) &_call_on_stack) {
info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ); info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ);
info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET); info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET);
return 1; return 1;
...@@ -403,9 +405,31 @@ void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct ...@@ -403,9 +405,31 @@ void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct
kfree(r2); kfree(r2);
} }
void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs) #define get_parisc_stackpointer() ({ \
unsigned long sp; \
__asm__("copy %%r30, %0" : "=r"(sp)); \
(sp); \
})
void unwind_frame_init_task(struct unwind_frame_info *info,
struct task_struct *task, struct pt_regs *regs)
{ {
unwind_frame_init(info, current, regs); task = task ? task : current;
if (task == current) {
struct pt_regs r;
if (!regs) {
memset(&r, 0, sizeof(r));
r.iaoq[0] = _THIS_IP_;
r.gr[2] = _RET_IP_;
r.gr[30] = get_parisc_stackpointer();
regs = &r;
}
unwind_frame_init(info, task, &r);
} else {
unwind_frame_init_from_blocked_task(info, task);
}
} }
int unwind_once(struct unwind_frame_info *next_frame) int unwind_once(struct unwind_frame_info *next_frame)
...@@ -442,19 +466,12 @@ int unwind_to_user(struct unwind_frame_info *info) ...@@ -442,19 +466,12 @@ int unwind_to_user(struct unwind_frame_info *info)
unsigned long return_address(unsigned int level) unsigned long return_address(unsigned int level)
{ {
struct unwind_frame_info info; struct unwind_frame_info info;
struct pt_regs r;
unsigned long sp;
/* initialize unwind info */ /* initialize unwind info */
asm volatile ("copy %%r30, %0" : "=r"(sp)); unwind_frame_init_task(&info, current, NULL);
memset(&r, 0, sizeof(struct pt_regs));
r.iaoq[0] = _THIS_IP_;
r.gr[2] = _RET_IP_;
r.gr[30] = sp;
unwind_frame_init(&info, current, &r);
/* unwind stack */ /* unwind stack */
++level; level += 2;
do { do {
if (unwind_once(&info) < 0 || info.ip == 0) if (unwind_once(&info) < 0 || info.ip == 0)
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment