Commit c8921d72 authored by Helge Deller's avatar Helge Deller

parisc: Fix and improve kernel stack unwinding

This patchset fixes and improves stack unwinding a lot:
1. Show backward stack traces with up to 30 callsites
2. Add callinfo to ENTRY_CFI() such that every assembler function will get an
   entry in the unwind table
3. Use constants instead of numbers in call_on_stack()
4. Do not depend on CONFIG_KALLSYMS to generate backtraces.
5. Speed up backtrace generation

Make sure you have this patch to GNU as installed:
https://sourceware.org/ml/binutils/2018-07/msg00474.html
Without this patch, unwind info in the kernel is often wrong for various
functions.
Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent 3b885ac1
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#define RP_OFFSET 16 #define RP_OFFSET 16
#define FRAME_SIZE 128 #define FRAME_SIZE 128
#define CALLEE_REG_FRAME_SIZE 144 #define CALLEE_REG_FRAME_SIZE 144
#define REG_SZ 8
#define ASM_ULONG_INSN .dword #define ASM_ULONG_INSN .dword
#else /* CONFIG_64BIT */ #else /* CONFIG_64BIT */
#define LDREG ldw #define LDREG ldw
...@@ -50,6 +51,7 @@ ...@@ -50,6 +51,7 @@
#define RP_OFFSET 20 #define RP_OFFSET 20
#define FRAME_SIZE 64 #define FRAME_SIZE 64
#define CALLEE_REG_FRAME_SIZE 128 #define CALLEE_REG_FRAME_SIZE 128
#define REG_SZ 4
#define ASM_ULONG_INSN .word #define ASM_ULONG_INSN .word
#endif #endif
......
...@@ -18,9 +18,9 @@ ...@@ -18,9 +18,9 @@
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
#define ENTRY(name) \ #define ENTRY(name) \
.export name !\
ALIGN !\ ALIGN !\
name: name: ASM_NL\
.export name
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#define ENDPROC(name) \ #define ENDPROC(name) \
...@@ -31,13 +31,18 @@ ...@@ -31,13 +31,18 @@
END(name) END(name)
#endif #endif
#define ENTRY_CFI(name) \ #define ENTRY_CFI(name, ...) \
ENTRY(name) ASM_NL\ ENTRY(name) ASM_NL\
.proc ASM_NL\
.callinfo __VA_ARGS__ ASM_NL\
.entry ASM_NL\
CFI_STARTPROC CFI_STARTPROC
#define ENDPROC_CFI(name) \ #define ENDPROC_CFI(name) \
ENDPROC(name) ASM_NL\ CFI_ENDPROC ASM_NL\
CFI_ENDPROC .exit ASM_NL\
.procend ASM_NL\
ENDPROC(name)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -4,6 +4,9 @@ ...@@ -4,6 +4,9 @@
#include <linux/list.h> #include <linux/list.h>
/* Max number of levels to backtrace */
#define MAX_UNWIND_ENTRIES 30
/* From ABI specifications */ /* From ABI specifications */
struct unwind_table_entry { struct unwind_table_entry {
unsigned int region_start; unsigned int region_start;
......
...@@ -766,7 +766,6 @@ END(fault_vector_11) ...@@ -766,7 +766,6 @@ END(fault_vector_11)
#endif #endif
/* Fault vector is separately protected and *must* be on its own page */ /* Fault vector is separately protected and *must* be on its own page */
.align PAGE_SIZE .align PAGE_SIZE
ENTRY(end_fault_vector)
.import handle_interruption,code .import handle_interruption,code
.import do_cpu_irq_mask,code .import do_cpu_irq_mask,code
...@@ -778,7 +777,6 @@ ENTRY(end_fault_vector) ...@@ -778,7 +777,6 @@ ENTRY(end_fault_vector)
*/ */
ENTRY_CFI(ret_from_kernel_thread) ENTRY_CFI(ret_from_kernel_thread)
/* Call schedule_tail first though */ /* Call schedule_tail first though */
BL schedule_tail, %r2 BL schedule_tail, %r2
nop nop
...@@ -817,8 +815,9 @@ ENTRY_CFI(_switch_to) ...@@ -817,8 +815,9 @@ ENTRY_CFI(_switch_to)
LDREG TASK_THREAD_INFO(%r25), %r25 LDREG TASK_THREAD_INFO(%r25), %r25
bv %r0(%r2) bv %r0(%r2)
mtctl %r25,%cr30 mtctl %r25,%cr30
ENDPROC_CFI(_switch_to)
_switch_to_ret: ENTRY_CFI(_switch_to_ret)
mtctl %r0, %cr0 /* Needed for single stepping */ mtctl %r0, %cr0 /* Needed for single stepping */
callee_rest callee_rest
callee_rest_float callee_rest_float
...@@ -826,7 +825,7 @@ _switch_to_ret: ...@@ -826,7 +825,7 @@ _switch_to_ret:
LDREG -RP_OFFSET(%r30), %r2 LDREG -RP_OFFSET(%r30), %r2
bv %r0(%r2) bv %r0(%r2)
copy %r26, %r28 copy %r26, %r28
ENDPROC_CFI(_switch_to) ENDPROC_CFI(_switch_to_ret)
/* /*
* Common rfi return path for interruptions, kernel execve, and * Common rfi return path for interruptions, kernel execve, and
...@@ -887,12 +886,14 @@ ENTRY_CFI(syscall_exit_rfi) ...@@ -887,12 +886,14 @@ ENTRY_CFI(syscall_exit_rfi)
STREG %r19,PT_SR5(%r16) STREG %r19,PT_SR5(%r16)
STREG %r19,PT_SR6(%r16) STREG %r19,PT_SR6(%r16)
STREG %r19,PT_SR7(%r16) STREG %r19,PT_SR7(%r16)
ENDPROC_CFI(syscall_exit_rfi)
intr_return: ENTRY_CFI(intr_return)
/* check for reschedule */ /* check for reschedule */
mfctl %cr30,%r1 mfctl %cr30,%r1
LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */ LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */ bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
ENDPROC_CFI(intr_return)
.import do_notify_resume,code .import do_notify_resume,code
intr_check_sig: intr_check_sig:
...@@ -1048,7 +1049,6 @@ intr_extint: ...@@ -1048,7 +1049,6 @@ intr_extint:
b do_cpu_irq_mask b do_cpu_irq_mask
ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */ ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
ENDPROC_CFI(syscall_exit_rfi)
/* Generic interruptions (illegal insn, unaligned, page fault, etc) */ /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
...@@ -1999,12 +1999,9 @@ ENDPROC_CFI(syscall_exit) ...@@ -1999,12 +1999,9 @@ ENDPROC_CFI(syscall_exit)
.align L1_CACHE_BYTES .align L1_CACHE_BYTES
.globl mcount .globl mcount
.type mcount, @function .type mcount, @function
ENTRY(mcount) ENTRY_CFI(mcount, caller)
_mcount: _mcount:
.export _mcount,data .export _mcount,data
.proc
.callinfo caller,frame=0
.entry
/* /*
* The 64bit mcount() function pointer needs 4 dwords, of which the * The 64bit mcount() function pointer needs 4 dwords, of which the
* first two are free. We optimize it here and put 2 instructions for * first two are free. We optimize it here and put 2 instructions for
...@@ -2026,18 +2023,13 @@ ftrace_stub: ...@@ -2026,18 +2023,13 @@ ftrace_stub:
.dword mcount .dword mcount
.dword 0 /* code in head.S puts value of global gp here */ .dword 0 /* code in head.S puts value of global gp here */
#endif #endif
.exit ENDPROC_CFI(mcount)
.procend
ENDPROC(mcount)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
.align 8 .align 8
.globl return_to_handler .globl return_to_handler
.type return_to_handler, @function .type return_to_handler, @function
ENTRY_CFI(return_to_handler) ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
.proc
.callinfo caller,frame=FRAME_SIZE
.entry
.export parisc_return_to_handler,data .export parisc_return_to_handler,data
parisc_return_to_handler: parisc_return_to_handler:
copy %r3,%r1 copy %r3,%r1
...@@ -2076,8 +2068,6 @@ parisc_return_to_handler: ...@@ -2076,8 +2068,6 @@ parisc_return_to_handler:
bv %r0(%rp) bv %r0(%rp)
#endif #endif
LDREGM -FRAME_SIZE(%sp),%r3 LDREGM -FRAME_SIZE(%sp),%r3
.exit
.procend
ENDPROC_CFI(return_to_handler) ENDPROC_CFI(return_to_handler)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
...@@ -2087,31 +2077,30 @@ ENDPROC_CFI(return_to_handler) ...@@ -2087,31 +2077,30 @@ ENDPROC_CFI(return_to_handler)
#ifdef CONFIG_IRQSTACKS #ifdef CONFIG_IRQSTACKS
/* void call_on_stack(unsigned long param1, void *func, /* void call_on_stack(unsigned long param1, void *func,
unsigned long new_stack) */ unsigned long new_stack) */
ENTRY_CFI(call_on_stack) ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
copy %sp, %r1 copy %sp, %r1
/* Regarding the HPPA calling conventions for function pointers, /* Regarding the HPPA calling conventions for function pointers,
we assume the PIC register is not changed across call. For we assume the PIC register is not changed across call. For
CONFIG_64BIT, the argument pointer is left to point at the CONFIG_64BIT, the argument pointer is left to point at the
argument region allocated for the call to call_on_stack. */ argument region allocated for the call to call_on_stack. */
/* Switch to new stack. We allocate two frames. */
ldo 2*FRAME_SIZE(%arg2), %sp
# ifdef CONFIG_64BIT # ifdef CONFIG_64BIT
/* Switch to new stack. We allocate two 128 byte frames. */
ldo 256(%arg2), %sp
/* Save previous stack pointer and return pointer in frame marker */ /* Save previous stack pointer and return pointer in frame marker */
STREG %rp, -144(%sp) STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
/* Calls always use function descriptor */ /* Calls always use function descriptor */
LDREG 16(%arg1), %arg1 LDREG 16(%arg1), %arg1
bve,l (%arg1), %rp bve,l (%arg1), %rp
STREG %r1, -136(%sp) STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
LDREG -144(%sp), %rp LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
bve (%rp) bve (%rp)
LDREG -136(%sp), %sp LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
# else # else
/* Switch to new stack. We allocate two 64 byte frames. */
ldo 128(%arg2), %sp
/* Save previous stack pointer and return pointer in frame marker */ /* Save previous stack pointer and return pointer in frame marker */
STREG %r1, -68(%sp) STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
STREG %rp, -84(%sp) STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
/* Calls use function descriptor if PLABEL bit is set */ /* Calls use function descriptor if PLABEL bit is set */
bb,>=,n %arg1, 30, 1f bb,>=,n %arg1, 30, 1f
depwi 0,31,2, %arg1 depwi 0,31,2, %arg1
...@@ -2119,9 +2108,9 @@ ENTRY_CFI(call_on_stack) ...@@ -2119,9 +2108,9 @@ ENTRY_CFI(call_on_stack)
1: 1:
be,l 0(%sr4,%arg1), %sr0, %r31 be,l 0(%sr4,%arg1), %sr0, %r31
copy %r31, %rp copy %r31, %rp
LDREG -84(%sp), %rp LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
bv (%rp) bv (%rp)
LDREG -68(%sp), %sp LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
# endif /* CONFIG_64BIT */ # endif /* CONFIG_64BIT */
ENDPROC_CFI(call_on_stack) ENDPROC_CFI(call_on_stack)
#endif /* CONFIG_IRQSTACKS */ #endif /* CONFIG_IRQSTACKS */
......
...@@ -44,10 +44,6 @@ ...@@ -44,10 +44,6 @@
.align 16 .align 16
ENTRY_CFI(flush_tlb_all_local) ENTRY_CFI(flush_tlb_all_local)
.proc
.callinfo NO_CALLS
.entry
/* /*
* The pitlbe and pdtlbe instructions should only be used to * The pitlbe and pdtlbe instructions should only be used to
* flush the entire tlb. Also, there needs to be no intervening * flush the entire tlb. Also, there needs to be no intervening
...@@ -189,18 +185,11 @@ fdtdone: ...@@ -189,18 +185,11 @@ fdtdone:
2: bv %r0(%r2) 2: bv %r0(%r2)
nop nop
.exit
.procend
ENDPROC_CFI(flush_tlb_all_local) ENDPROC_CFI(flush_tlb_all_local)
.import cache_info,data .import cache_info,data
ENTRY_CFI(flush_instruction_cache_local) ENTRY_CFI(flush_instruction_cache_local)
.proc
.callinfo NO_CALLS
.entry
load32 cache_info, %r1 load32 cache_info, %r1
/* Flush Instruction Cache */ /* Flush Instruction Cache */
...@@ -256,18 +245,11 @@ fisync: ...@@ -256,18 +245,11 @@ fisync:
mtsm %r22 /* restore I-bit */ mtsm %r22 /* restore I-bit */
bv %r0(%r2) bv %r0(%r2)
nop nop
.exit
.procend
ENDPROC_CFI(flush_instruction_cache_local) ENDPROC_CFI(flush_instruction_cache_local)
.import cache_info, data .import cache_info, data
ENTRY_CFI(flush_data_cache_local) ENTRY_CFI(flush_data_cache_local)
.proc
.callinfo NO_CALLS
.entry
load32 cache_info, %r1 load32 cache_info, %r1
/* Flush Data Cache */ /* Flush Data Cache */
...@@ -324,9 +306,6 @@ fdsync: ...@@ -324,9 +306,6 @@ fdsync:
mtsm %r22 /* restore I-bit */ mtsm %r22 /* restore I-bit */
bv %r0(%r2) bv %r0(%r2)
nop nop
.exit
.procend
ENDPROC_CFI(flush_data_cache_local) ENDPROC_CFI(flush_data_cache_local)
/* Macros to serialize TLB purge operations on SMP. */ /* Macros to serialize TLB purge operations on SMP. */
...@@ -362,10 +341,6 @@ ENDPROC_CFI(flush_data_cache_local) ...@@ -362,10 +341,6 @@ ENDPROC_CFI(flush_data_cache_local)
/* Clear page using kernel mapping. */ /* Clear page using kernel mapping. */
ENTRY_CFI(clear_page_asm) ENTRY_CFI(clear_page_asm)
.proc
.callinfo NO_CALLS
.entry
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
/* Unroll the loop. */ /* Unroll the loop. */
...@@ -424,18 +399,11 @@ ENTRY_CFI(clear_page_asm) ...@@ -424,18 +399,11 @@ ENTRY_CFI(clear_page_asm)
#endif #endif
bv %r0(%r2) bv %r0(%r2)
nop nop
.exit
.procend
ENDPROC_CFI(clear_page_asm) ENDPROC_CFI(clear_page_asm)
/* Copy page using kernel mapping. */ /* Copy page using kernel mapping. */
ENTRY_CFI(copy_page_asm) ENTRY_CFI(copy_page_asm)
.proc
.callinfo NO_CALLS
.entry
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
/* PA8x00 CPUs can consume 2 loads or 1 store per cycle. /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
* Unroll the loop by hand and arrange insn appropriately. * Unroll the loop by hand and arrange insn appropriately.
...@@ -542,9 +510,6 @@ ENTRY_CFI(copy_page_asm) ...@@ -542,9 +510,6 @@ ENTRY_CFI(copy_page_asm)
#endif #endif
bv %r0(%r2) bv %r0(%r2)
nop nop
.exit
.procend
ENDPROC_CFI(copy_page_asm) ENDPROC_CFI(copy_page_asm)
/* /*
...@@ -598,10 +563,6 @@ ENDPROC_CFI(copy_page_asm) ...@@ -598,10 +563,6 @@ ENDPROC_CFI(copy_page_asm)
*/ */
ENTRY_CFI(copy_user_page_asm) ENTRY_CFI(copy_user_page_asm)
.proc
.callinfo NO_CALLS
.entry
/* Convert virtual `to' and `from' addresses to physical addresses. /* Convert virtual `to' and `from' addresses to physical addresses.
Move `from' physical address to non shadowed register. */ Move `from' physical address to non shadowed register. */
ldil L%(__PAGE_OFFSET), %r1 ldil L%(__PAGE_OFFSET), %r1
...@@ -750,16 +711,9 @@ ENTRY_CFI(copy_user_page_asm) ...@@ -750,16 +711,9 @@ ENTRY_CFI(copy_user_page_asm)
bv %r0(%r2) bv %r0(%r2)
nop nop
.exit
.procend
ENDPROC_CFI(copy_user_page_asm) ENDPROC_CFI(copy_user_page_asm)
ENTRY_CFI(clear_user_page_asm) ENTRY_CFI(clear_user_page_asm)
.proc
.callinfo NO_CALLS
.entry
tophys_r1 %r26 tophys_r1 %r26
ldil L%(TMPALIAS_MAP_START), %r28 ldil L%(TMPALIAS_MAP_START), %r28
...@@ -836,16 +790,9 @@ ENTRY_CFI(clear_user_page_asm) ...@@ -836,16 +790,9 @@ ENTRY_CFI(clear_user_page_asm)
bv %r0(%r2) bv %r0(%r2)
nop nop
.exit
.procend
ENDPROC_CFI(clear_user_page_asm) ENDPROC_CFI(clear_user_page_asm)
ENTRY_CFI(flush_dcache_page_asm) ENTRY_CFI(flush_dcache_page_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%(TMPALIAS_MAP_START), %r28 ldil L%(TMPALIAS_MAP_START), %r28
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#if (TMPALIAS_MAP_START >= 0x80000000) #if (TMPALIAS_MAP_START >= 0x80000000)
...@@ -903,16 +850,9 @@ ENTRY_CFI(flush_dcache_page_asm) ...@@ -903,16 +850,9 @@ ENTRY_CFI(flush_dcache_page_asm)
sync sync
bv %r0(%r2) bv %r0(%r2)
nop nop
.exit
.procend
ENDPROC_CFI(flush_dcache_page_asm) ENDPROC_CFI(flush_dcache_page_asm)
ENTRY_CFI(flush_icache_page_asm) ENTRY_CFI(flush_icache_page_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%(TMPALIAS_MAP_START), %r28 ldil L%(TMPALIAS_MAP_START), %r28
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#if (TMPALIAS_MAP_START >= 0x80000000) #if (TMPALIAS_MAP_START >= 0x80000000)
...@@ -977,16 +917,9 @@ ENTRY_CFI(flush_icache_page_asm) ...@@ -977,16 +917,9 @@ ENTRY_CFI(flush_icache_page_asm)
sync sync
bv %r0(%r2) bv %r0(%r2)
nop nop
.exit
.procend
ENDPROC_CFI(flush_icache_page_asm) ENDPROC_CFI(flush_icache_page_asm)
ENTRY_CFI(flush_kernel_dcache_page_asm) ENTRY_CFI(flush_kernel_dcache_page_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%dcache_stride, %r1 ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23 ldw R%dcache_stride(%r1), %r23
...@@ -1020,16 +953,9 @@ ENTRY_CFI(flush_kernel_dcache_page_asm) ...@@ -1020,16 +953,9 @@ ENTRY_CFI(flush_kernel_dcache_page_asm)
sync sync
bv %r0(%r2) bv %r0(%r2)
nop nop
.exit
.procend
ENDPROC_CFI(flush_kernel_dcache_page_asm) ENDPROC_CFI(flush_kernel_dcache_page_asm)
ENTRY_CFI(purge_kernel_dcache_page_asm) ENTRY_CFI(purge_kernel_dcache_page_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%dcache_stride, %r1 ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23 ldw R%dcache_stride(%r1), %r23
...@@ -1062,16 +988,9 @@ ENTRY_CFI(purge_kernel_dcache_page_asm) ...@@ -1062,16 +988,9 @@ ENTRY_CFI(purge_kernel_dcache_page_asm)
sync sync
bv %r0(%r2) bv %r0(%r2)
nop nop
.exit
.procend
ENDPROC_CFI(purge_kernel_dcache_page_asm) ENDPROC_CFI(purge_kernel_dcache_page_asm)
ENTRY_CFI(flush_user_dcache_range_asm) ENTRY_CFI(flush_user_dcache_range_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%dcache_stride, %r1 ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23 ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21 ldo -1(%r23), %r21
...@@ -1083,16 +1002,9 @@ ENTRY_CFI(flush_user_dcache_range_asm) ...@@ -1083,16 +1002,9 @@ ENTRY_CFI(flush_user_dcache_range_asm)
sync sync
bv %r0(%r2) bv %r0(%r2)
nop nop
.exit
.procend
ENDPROC_CFI(flush_user_dcache_range_asm) ENDPROC_CFI(flush_user_dcache_range_asm)
ENTRY_CFI(flush_kernel_dcache_range_asm) ENTRY_CFI(flush_kernel_dcache_range_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%dcache_stride, %r1 ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23 ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21 ldo -1(%r23), %r21
...@@ -1105,16 +1017,9 @@ ENTRY_CFI(flush_kernel_dcache_range_asm) ...@@ -1105,16 +1017,9 @@ ENTRY_CFI(flush_kernel_dcache_range_asm)
syncdma syncdma
bv %r0(%r2) bv %r0(%r2)
nop nop
.exit
.procend
ENDPROC_CFI(flush_kernel_dcache_range_asm) ENDPROC_CFI(flush_kernel_dcache_range_asm)
ENTRY_CFI(purge_kernel_dcache_range_asm) ENTRY_CFI(purge_kernel_dcache_range_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%dcache_stride, %r1 ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23 ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21 ldo -1(%r23), %r21
...@@ -1127,16 +1032,9 @@ ENTRY_CFI(purge_kernel_dcache_range_asm) ...@@ -1127,16 +1032,9 @@ ENTRY_CFI(purge_kernel_dcache_range_asm)
syncdma syncdma
bv %r0(%r2) bv %r0(%r2)
nop nop
.exit
.procend
ENDPROC_CFI(purge_kernel_dcache_range_asm) ENDPROC_CFI(purge_kernel_dcache_range_asm)
ENTRY_CFI(flush_user_icache_range_asm) ENTRY_CFI(flush_user_icache_range_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%icache_stride, %r1 ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r23 ldw R%icache_stride(%r1), %r23
ldo -1(%r23), %r21 ldo -1(%r23), %r21
...@@ -1148,16 +1046,9 @@ ENTRY_CFI(flush_user_icache_range_asm) ...@@ -1148,16 +1046,9 @@ ENTRY_CFI(flush_user_icache_range_asm)
sync sync
bv %r0(%r2) bv %r0(%r2)
nop nop
.exit
.procend
ENDPROC_CFI(flush_user_icache_range_asm) ENDPROC_CFI(flush_user_icache_range_asm)
ENTRY_CFI(flush_kernel_icache_page) ENTRY_CFI(flush_kernel_icache_page)
.proc
.callinfo NO_CALLS
.entry
ldil L%icache_stride, %r1 ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r23 ldw R%icache_stride(%r1), %r23
...@@ -1191,16 +1082,9 @@ ENTRY_CFI(flush_kernel_icache_page) ...@@ -1191,16 +1082,9 @@ ENTRY_CFI(flush_kernel_icache_page)
sync sync
bv %r0(%r2) bv %r0(%r2)
nop nop
.exit
.procend
ENDPROC_CFI(flush_kernel_icache_page) ENDPROC_CFI(flush_kernel_icache_page)
ENTRY_CFI(flush_kernel_icache_range_asm) ENTRY_CFI(flush_kernel_icache_range_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%icache_stride, %r1 ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r23 ldw R%icache_stride(%r1), %r23
ldo -1(%r23), %r21 ldo -1(%r23), %r21
...@@ -1212,8 +1096,6 @@ ENTRY_CFI(flush_kernel_icache_range_asm) ...@@ -1212,8 +1096,6 @@ ENTRY_CFI(flush_kernel_icache_range_asm)
sync sync
bv %r0(%r2) bv %r0(%r2)
nop nop
.exit
.procend
ENDPROC_CFI(flush_kernel_icache_range_asm) ENDPROC_CFI(flush_kernel_icache_range_asm)
__INIT __INIT
...@@ -1223,10 +1105,6 @@ ENDPROC_CFI(flush_kernel_icache_range_asm) ...@@ -1223,10 +1105,6 @@ ENDPROC_CFI(flush_kernel_icache_range_asm)
*/ */
.align 256 .align 256
ENTRY_CFI(disable_sr_hashing_asm) ENTRY_CFI(disable_sr_hashing_asm)
.proc
.callinfo NO_CALLS
.entry
/* /*
* Switch to real mode * Switch to real mode
*/ */
...@@ -1308,9 +1186,6 @@ srdis_done: ...@@ -1308,9 +1186,6 @@ srdis_done:
2: bv %r0(%r2) 2: bv %r0(%r2)
nop nop
.exit
.procend
ENDPROC_CFI(disable_sr_hashing_asm) ENDPROC_CFI(disable_sr_hashing_asm)
.end .end
...@@ -302,7 +302,7 @@ get_wchan(struct task_struct *p) ...@@ -302,7 +302,7 @@ get_wchan(struct task_struct *p)
ip = info.ip; ip = info.ip;
if (!in_sched_functions(ip)) if (!in_sched_functions(ip))
return ip; return ip;
} while (count++ < 16); } while (count++ < MAX_UNWIND_ENTRIES);
return 0; return 0;
} }
......
...@@ -35,12 +35,6 @@ real32_stack: ...@@ -35,12 +35,6 @@ real32_stack:
real64_stack: real64_stack:
.block 8192 .block 8192
#ifdef CONFIG_64BIT
# define REG_SZ 8
#else
# define REG_SZ 4
#endif
#define N_SAVED_REGS 9 #define N_SAVED_REGS 9
save_cr_space: save_cr_space:
......
...@@ -172,7 +172,7 @@ static void do_show_stack(struct unwind_frame_info *info) ...@@ -172,7 +172,7 @@ static void do_show_stack(struct unwind_frame_info *info)
int i = 1; int i = 1;
printk(KERN_CRIT "Backtrace:\n"); printk(KERN_CRIT "Backtrace:\n");
while (i <= 16) { while (i <= MAX_UNWIND_ENTRIES) {
if (unwind_once(info) < 0 || info->ip == 0) if (unwind_once(info) < 0 || info->ip == 0)
break; break;
......
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/kallsyms.h>
#include <linux/sort.h> #include <linux/sort.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
...@@ -117,7 +116,8 @@ unwind_table_init(struct unwind_table *table, const char *name, ...@@ -117,7 +116,8 @@ unwind_table_init(struct unwind_table *table, const char *name,
for (; start <= end; start++) { for (; start <= end; start++) {
if (start < end && if (start < end &&
start->region_end > (start+1)->region_start) { start->region_end > (start+1)->region_start) {
printk("WARNING: Out of order unwind entry! %p and %p\n", start, start+1); pr_warn("Out of order unwind entry! %px and %px\n",
start, start+1);
} }
start->region_start += base_addr; start->region_start += base_addr;
...@@ -203,26 +203,61 @@ int __init unwind_init(void) ...@@ -203,26 +203,61 @@ int __init unwind_init(void)
return 0; return 0;
} }
#ifdef CONFIG_64BIT
#define get_func_addr(fptr) fptr[2]
#else
#define get_func_addr(fptr) fptr[0]
#endif
static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size) static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
{ {
extern void handle_interruption(int, struct pt_regs *); /*
static unsigned long *hi = (unsigned long *)&handle_interruption; * We have to use void * instead of a function pointer, because
* function pointers aren't a pointer to the function on 64-bit.
if (pc == get_func_addr(hi)) { * Make them const so the compiler knows they live in .text
*/
extern void * const handle_interruption;
extern void * const ret_from_kernel_thread;
extern void * const syscall_exit;
extern void * const intr_return;
extern void * const _switch_to_ret;
#ifdef CONFIG_IRQSTACKS
extern void * const call_on_stack;
#endif /* CONFIG_IRQSTACKS */
if (pc == (unsigned long) &handle_interruption) {
struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN); struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
dbg("Unwinding through handle_interruption()\n"); dbg("Unwinding through handle_interruption()\n");
info->prev_sp = regs->gr[30]; info->prev_sp = regs->gr[30];
info->prev_ip = regs->iaoq[0]; info->prev_ip = regs->iaoq[0];
return 1;
}
if (pc == (unsigned long) &ret_from_kernel_thread ||
pc == (unsigned long) &syscall_exit) {
info->prev_sp = info->prev_ip = 0;
return 1;
}
if (pc == (unsigned long) &intr_return) {
struct pt_regs *regs;
dbg("Found intr_return()\n");
regs = (struct pt_regs *)(info->sp - PT_SZ_ALGN);
info->prev_sp = regs->gr[30];
info->prev_ip = regs->iaoq[0];
info->rp = regs->gr[2];
return 1; return 1;
} }
if (pc == (unsigned long) &_switch_to_ret) {
info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
return 1;
}
#ifdef CONFIG_IRQSTACKS
if (pc == (unsigned long) &call_on_stack) {
info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ);
info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET);
return 1;
}
#endif
return 0; return 0;
} }
...@@ -238,34 +273,8 @@ static void unwind_frame_regs(struct unwind_frame_info *info) ...@@ -238,34 +273,8 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
if (e == NULL) { if (e == NULL) {
unsigned long sp; unsigned long sp;
dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip); dbg("Cannot find unwind entry for %pS; forced unwinding\n",
(void *) info->ip);
#ifdef CONFIG_KALLSYMS
/* Handle some frequent special cases.... */
{
char symname[KSYM_NAME_LEN];
char *modname;
kallsyms_lookup(info->ip, NULL, NULL, &modname,
symname);
dbg("info->ip = 0x%lx, name = %s\n", info->ip, symname);
if (strcmp(symname, "_switch_to_ret") == 0) {
info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
dbg("_switch_to_ret @ %lx - setting "
"prev_sp=%lx prev_ip=%lx\n",
info->ip, info->prev_sp,
info->prev_ip);
return;
} else if (strcmp(symname, "ret_from_kernel_thread") == 0 ||
strcmp(symname, "syscall_exit") == 0) {
info->prev_ip = info->prev_sp = 0;
return;
}
}
#endif
/* Since we are doing the unwinding blind, we don't know if /* Since we are doing the unwinding blind, we don't know if
we are adjusting the stack correctly or extracting the rp we are adjusting the stack correctly or extracting the rp
......
...@@ -64,9 +64,6 @@ ...@@ -64,9 +64,6 @@
*/ */
ENTRY_CFI(lclear_user) ENTRY_CFI(lclear_user)
.proc
.callinfo NO_CALLS
.entry
comib,=,n 0,%r25,$lclu_done comib,=,n 0,%r25,$lclu_done
get_sr get_sr
$lclu_loop: $lclu_loop:
...@@ -81,13 +78,9 @@ $lclu_done: ...@@ -81,13 +78,9 @@ $lclu_done:
ldo 1(%r25),%r25 ldo 1(%r25),%r25
ASM_EXCEPTIONTABLE_ENTRY(1b,2b) ASM_EXCEPTIONTABLE_ENTRY(1b,2b)
.exit
ENDPROC_CFI(lclear_user) ENDPROC_CFI(lclear_user)
.procend
/* /*
* long lstrnlen_user(char *s, long n) * long lstrnlen_user(char *s, long n)
* *
...@@ -97,9 +90,6 @@ ENDPROC_CFI(lclear_user) ...@@ -97,9 +90,6 @@ ENDPROC_CFI(lclear_user)
*/ */
ENTRY_CFI(lstrnlen_user) ENTRY_CFI(lstrnlen_user)
.proc
.callinfo NO_CALLS
.entry
comib,= 0,%r25,$lslen_nzero comib,= 0,%r25,$lslen_nzero
copy %r26,%r24 copy %r26,%r24
get_sr get_sr
...@@ -111,7 +101,6 @@ $lslen_loop: ...@@ -111,7 +101,6 @@ $lslen_loop:
$lslen_done: $lslen_done:
bv %r0(%r2) bv %r0(%r2)
sub %r26,%r24,%r28 sub %r26,%r24,%r28
.exit
$lslen_nzero: $lslen_nzero:
b $lslen_done b $lslen_done
...@@ -125,9 +114,6 @@ $lslen_nzero: ...@@ -125,9 +114,6 @@ $lslen_nzero:
ENDPROC_CFI(lstrnlen_user) ENDPROC_CFI(lstrnlen_user)
.procend
/* /*
* unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) * unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
...@@ -186,10 +172,6 @@ ENDPROC_CFI(lstrnlen_user) ...@@ -186,10 +172,6 @@ ENDPROC_CFI(lstrnlen_user)
save_len = r31 save_len = r31
ENTRY_CFI(pa_memcpy) ENTRY_CFI(pa_memcpy)
.proc
.callinfo NO_CALLS
.entry
/* Last destination address */ /* Last destination address */
add dst,len,end add dst,len,end
...@@ -439,9 +421,6 @@ ENTRY_CFI(pa_memcpy) ...@@ -439,9 +421,6 @@ ENTRY_CFI(pa_memcpy)
b .Lcopy_done b .Lcopy_done
10: stw,ma t1,4(dstspc,dst) 10: stw,ma t1,4(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done) ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
.exit
ENDPROC_CFI(pa_memcpy) ENDPROC_CFI(pa_memcpy)
.procend
.end .end
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment