Commit 997b611b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'parisc-4.9-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull parisc updates from Helge Deller:
 "Changes include:

   - Fix boot of 32bit SMP kernel (initial kernel mapping was too small)

   - Added hardened usercopy checks

   - Drop bootmem and switch to memblock and NO_BOOTMEM implementation

   - Drop the BROKEN_RODATA config option (and thus remove the relevant
     code from the generic headers and files because parisc was the last
     architecture which used this config option)

   - Improve segfault reporting by printing human readable error strings

   - Various smaller changes, e.g. dwarf debug support for assembly
     code, update comments regarding copy_user_page_asm, switch to
     kmalloc_array()"

* 'parisc-4.9-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
  parisc: Increase KERNEL_INITIAL_SIZE for 32-bit SMP kernels
  parisc: Drop bootmem and switch to memblock
  parisc: Add hardened usercopy feature
  parisc: Add cfi_startproc and cfi_endproc to assembly code
  parisc: Move hpmc stack into page aligned bss section
  parisc: Fix self-detected CPU stall warnings on Mako machines
  parisc: Report trap type as human readable string
  parisc: Update comment regarding implementation of copy_user_page_asm
  parisc: Use kmalloc_array() in add_system_map_addresses()
  parisc: Check return value of smp_boot_one_cpu()
  parisc: Drop BROKEN_RODATA config option
parents 2c34ff14 690d097c
...@@ -10,12 +10,13 @@ config PARISC ...@@ -10,12 +10,13 @@ config PARISC
select RTC_CLASS select RTC_CLASS
select RTC_DRV_GENERIC select RTC_DRV_GENERIC
select INIT_ALL_POSSIBLE select INIT_ALL_POSSIBLE
select HAVE_MEMBLOCK
select NO_BOOTMEM
select BUG select BUG
select BUILDTIME_EXTABLE_SORT select BUILDTIME_EXTABLE_SORT
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select GENERIC_ATOMIC64 if !64BIT select GENERIC_ATOMIC64 if !64BIT
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select BROKEN_RODATA
select GENERIC_IRQ_PROBE select GENERIC_IRQ_PROBE
select GENERIC_PCI_IOMAP select GENERIC_PCI_IOMAP
select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_HAVE_NMI_SAFE_CMPXCHG
...@@ -24,6 +25,7 @@ config PARISC ...@@ -24,6 +25,7 @@ config PARISC
select SYSCTL_ARCH_UNALIGN_ALLOW select SYSCTL_ARCH_UNALIGN_ALLOW
select SYSCTL_EXCEPTION_TRACE select SYSCTL_EXCEPTION_TRACE
select HAVE_MOD_ARCH_SPECIFIC select HAVE_MOD_ARCH_SPECIFIC
select HAVE_ARCH_HARDENED_USERCOPY
select VIRT_TO_BUS select VIRT_TO_BUS
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
select CLONE_BACKWARDS select CLONE_BACKWARDS
......
/*
* Copyright (C) 2016 Helge Deller <deller@gmx.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASM_PARISC_DWARF_H
#define _ASM_PARISC_DWARF_H
#ifdef __ASSEMBLY__
#define CFI_STARTPROC .cfi_startproc
#define CFI_ENDPROC .cfi_endproc
#define CFI_DEF_CFA .cfi_def_cfa
#define CFI_REGISTER .cfi_register
#define CFI_REL_OFFSET .cfi_rel_offset
#define CFI_UNDEFINED .cfi_undefined
#endif /* __ASSEMBLY__ */
#endif /* _ASM_PARISC_DWARF_H */
#ifndef __ASM_PARISC_LINKAGE_H #ifndef __ASM_PARISC_LINKAGE_H
#define __ASM_PARISC_LINKAGE_H #define __ASM_PARISC_LINKAGE_H
#include <asm/dwarf.h>
#ifndef __ALIGN #ifndef __ALIGN
#define __ALIGN .align 4 #define __ALIGN .align 4
#define __ALIGN_STR ".align 4" #define __ALIGN_STR ".align 4"
...@@ -10,6 +12,8 @@ ...@@ -10,6 +12,8 @@
* In parisc assembly a semicolon marks a comment while a * In parisc assembly a semicolon marks a comment while a
* exclamation mark is used to separate independent lines. * exclamation mark is used to separate independent lines.
*/ */
#define ASM_NL !
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
#define ENTRY(name) \ #define ENTRY(name) \
...@@ -26,6 +30,14 @@ ...@@ -26,6 +30,14 @@
END(name) END(name)
#endif #endif
#define ENTRY_CFI(name) \
ENTRY(name) ASM_NL\
CFI_STARTPROC
#define ENDPROC_CFI(name) \
ENDPROC(name) ASM_NL\
CFI_ENDPROC
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __ASM_PARISC_LINKAGE_H */ #endif /* __ASM_PARISC_LINKAGE_H */
...@@ -83,7 +83,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) ...@@ -83,7 +83,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e)) printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
/* This is the size of the initially mapped kernel memory */ /* This is the size of the initially mapped kernel memory */
#ifdef CONFIG_64BIT #if defined(CONFIG_64BIT) || defined(CONFIG_SMP)
#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */ #define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
#else #else
#define KERNEL_INITIAL_ORDER 24 /* 1<<24 = 16MB */ #define KERNEL_INITIAL_ORDER 24 /* 1<<24 = 16MB */
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/thread_info.h>
#define VERIFY_READ 0 #define VERIFY_READ 0
#define VERIFY_WRITE 1 #define VERIFY_WRITE 1
...@@ -201,10 +202,12 @@ extern long lstrnlen_user(const char __user *, long); ...@@ -201,10 +202,12 @@ extern long lstrnlen_user(const char __user *, long);
#define clear_user lclear_user #define clear_user lclear_user
#define __clear_user lclear_user #define __clear_user lclear_user
unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len); unsigned long __must_check __copy_to_user(void __user *dst, const void *src,
#define __copy_to_user copy_to_user unsigned long len);
unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long len); unsigned long __must_check __copy_from_user(void *dst, const void __user *src,
unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len); unsigned long len);
unsigned long copy_in_user(void __user *dst, const void __user *src,
unsigned long len);
#define __copy_in_user copy_in_user #define __copy_in_user copy_in_user
#define __copy_to_user_inatomic __copy_to_user #define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user #define __copy_from_user_inatomic __copy_from_user
...@@ -217,23 +220,40 @@ static inline void copy_user_overflow(int size, unsigned long count) ...@@ -217,23 +220,40 @@ static inline void copy_user_overflow(int size, unsigned long count)
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
} }
static inline unsigned long __must_check copy_from_user(void *to, static __always_inline unsigned long __must_check
const void __user *from, copy_from_user(void *to, const void __user *from, unsigned long n)
unsigned long n)
{ {
int sz = __compiletime_object_size(to); int sz = __compiletime_object_size(to);
unsigned long ret = n; unsigned long ret = n;
if (likely(sz == -1 || sz >= n)) if (likely(sz < 0 || sz >= n)) {
ret = __copy_from_user(to, from, n); check_object_size(to, n, false);
else if (!__builtin_constant_p(n)) ret = __copy_from_user(to, from, n);
} else if (!__builtin_constant_p(n))
copy_user_overflow(sz, n); copy_user_overflow(sz, n);
else else
__bad_copy_user(); __bad_copy_user();
if (unlikely(ret)) if (unlikely(ret))
memset(to + (n - ret), 0, ret); memset(to + (n - ret), 0, ret);
return ret;
return ret;
}
static __always_inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
int sz = __compiletime_object_size(from);
if (likely(sz < 0 || sz >= n)) {
check_object_size(from, n, true);
n = __copy_to_user(to, from, n);
} else if (!__builtin_constant_p(n))
copy_user_overflow(sz, n);
else
__bad_copy_user();
return n;
} }
struct pt_regs; struct pt_regs;
......
...@@ -766,7 +766,7 @@ ENTRY(end_fault_vector) ...@@ -766,7 +766,7 @@ ENTRY(end_fault_vector)
* copy_thread moved args into task save area. * copy_thread moved args into task save area.
*/ */
ENTRY(ret_from_kernel_thread) ENTRY_CFI(ret_from_kernel_thread)
/* Call schedule_tail first though */ /* Call schedule_tail first though */
BL schedule_tail, %r2 BL schedule_tail, %r2
...@@ -782,7 +782,7 @@ ENTRY(ret_from_kernel_thread) ...@@ -782,7 +782,7 @@ ENTRY(ret_from_kernel_thread)
copy %r31, %r2 copy %r31, %r2
b finish_child_return b finish_child_return
nop nop
ENDPROC(ret_from_kernel_thread) ENDPROC_CFI(ret_from_kernel_thread)
/* /*
...@@ -790,7 +790,7 @@ ENDPROC(ret_from_kernel_thread) ...@@ -790,7 +790,7 @@ ENDPROC(ret_from_kernel_thread)
* struct task_struct *next) * struct task_struct *next)
* *
* switch kernel stacks and return prev */ * switch kernel stacks and return prev */
ENTRY(_switch_to) ENTRY_CFI(_switch_to)
STREG %r2, -RP_OFFSET(%r30) STREG %r2, -RP_OFFSET(%r30)
callee_save_float callee_save_float
...@@ -815,7 +815,7 @@ _switch_to_ret: ...@@ -815,7 +815,7 @@ _switch_to_ret:
LDREG -RP_OFFSET(%r30), %r2 LDREG -RP_OFFSET(%r30), %r2
bv %r0(%r2) bv %r0(%r2)
copy %r26, %r28 copy %r26, %r28
ENDPROC(_switch_to) ENDPROC_CFI(_switch_to)
/* /*
* Common rfi return path for interruptions, kernel execve, and * Common rfi return path for interruptions, kernel execve, and
...@@ -833,7 +833,7 @@ ENDPROC(_switch_to) ...@@ -833,7 +833,7 @@ ENDPROC(_switch_to)
.align PAGE_SIZE .align PAGE_SIZE
ENTRY(syscall_exit_rfi) ENTRY_CFI(syscall_exit_rfi)
mfctl %cr30,%r16 mfctl %cr30,%r16
LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */ LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */
ldo TASK_REGS(%r16),%r16 ldo TASK_REGS(%r16),%r16
...@@ -1037,12 +1037,12 @@ intr_extint: ...@@ -1037,12 +1037,12 @@ intr_extint:
b do_cpu_irq_mask b do_cpu_irq_mask
ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */ ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
ENDPROC(syscall_exit_rfi) ENDPROC_CFI(syscall_exit_rfi)
/* Generic interruptions (illegal insn, unaligned, page fault, etc) */ /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
ENTRY(intr_save) /* for os_hpmc */ ENTRY_CFI(intr_save) /* for os_hpmc */
mfsp %sr7,%r16 mfsp %sr7,%r16
cmpib,COND(=),n 0,%r16,1f cmpib,COND(=),n 0,%r16,1f
get_stack_use_cr30 get_stack_use_cr30
...@@ -1117,7 +1117,7 @@ skip_save_ior: ...@@ -1117,7 +1117,7 @@ skip_save_ior:
b handle_interruption b handle_interruption
ldo R%intr_check_sig(%r2), %r2 ldo R%intr_check_sig(%r2), %r2
ENDPROC(intr_save) ENDPROC_CFI(intr_save)
/* /*
...@@ -1720,7 +1720,7 @@ dtlb_fault: ...@@ -1720,7 +1720,7 @@ dtlb_fault:
.endm .endm
.macro fork_like name .macro fork_like name
ENTRY(sys_\name\()_wrapper) ENTRY_CFI(sys_\name\()_wrapper)
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
ldo TASK_REGS(%r1),%r1 ldo TASK_REGS(%r1),%r1
reg_save %r1 reg_save %r1
...@@ -1728,7 +1728,7 @@ ENTRY(sys_\name\()_wrapper) ...@@ -1728,7 +1728,7 @@ ENTRY(sys_\name\()_wrapper)
ldil L%sys_\name, %r31 ldil L%sys_\name, %r31
be R%sys_\name(%sr4,%r31) be R%sys_\name(%sr4,%r31)
STREG %r28, PT_CR27(%r1) STREG %r28, PT_CR27(%r1)
ENDPROC(sys_\name\()_wrapper) ENDPROC_CFI(sys_\name\()_wrapper)
.endm .endm
fork_like clone fork_like clone
...@@ -1736,7 +1736,7 @@ fork_like fork ...@@ -1736,7 +1736,7 @@ fork_like fork
fork_like vfork fork_like vfork
/* Set the return value for the child */ /* Set the return value for the child */
ENTRY(child_return) ENTRY_CFI(child_return)
BL schedule_tail, %r2 BL schedule_tail, %r2
nop nop
finish_child_return: finish_child_return:
...@@ -1748,9 +1748,9 @@ finish_child_return: ...@@ -1748,9 +1748,9 @@ finish_child_return:
reg_restore %r1 reg_restore %r1
b syscall_exit b syscall_exit
copy %r0,%r28 copy %r0,%r28
ENDPROC(child_return) ENDPROC_CFI(child_return)
ENTRY(sys_rt_sigreturn_wrapper) ENTRY_CFI(sys_rt_sigreturn_wrapper)
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
ldo TASK_REGS(%r26),%r26 /* get pt regs */ ldo TASK_REGS(%r26),%r26 /* get pt regs */
/* Don't save regs, we are going to restore them from sigcontext. */ /* Don't save regs, we are going to restore them from sigcontext. */
...@@ -1778,9 +1778,9 @@ ENTRY(sys_rt_sigreturn_wrapper) ...@@ -1778,9 +1778,9 @@ ENTRY(sys_rt_sigreturn_wrapper)
*/ */
bv %r0(%r2) bv %r0(%r2)
LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */ LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
ENDPROC(sys_rt_sigreturn_wrapper) ENDPROC_CFI(sys_rt_sigreturn_wrapper)
ENTRY(syscall_exit) ENTRY_CFI(syscall_exit)
/* NOTE: Not all syscalls exit this way. rt_sigreturn will exit /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
* via syscall_exit_rfi if the signal was received while the process * via syscall_exit_rfi if the signal was received while the process
* was running. * was running.
...@@ -1979,7 +1979,7 @@ syscall_do_resched: ...@@ -1979,7 +1979,7 @@ syscall_do_resched:
#else #else
nop nop
#endif #endif
ENDPROC(syscall_exit) ENDPROC_CFI(syscall_exit)
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
...@@ -2023,7 +2023,7 @@ ENDPROC(mcount) ...@@ -2023,7 +2023,7 @@ ENDPROC(mcount)
.align 8 .align 8
.globl return_to_handler .globl return_to_handler
.type return_to_handler, @function .type return_to_handler, @function
ENTRY(return_to_handler) ENTRY_CFI(return_to_handler)
.proc .proc
.callinfo caller,frame=FRAME_SIZE .callinfo caller,frame=FRAME_SIZE
.entry .entry
...@@ -2067,7 +2067,7 @@ parisc_return_to_handler: ...@@ -2067,7 +2067,7 @@ parisc_return_to_handler:
LDREGM -FRAME_SIZE(%sp),%r3 LDREGM -FRAME_SIZE(%sp),%r3
.exit .exit
.procend .procend
ENDPROC(return_to_handler) ENDPROC_CFI(return_to_handler)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
...@@ -2076,7 +2076,7 @@ ENDPROC(return_to_handler) ...@@ -2076,7 +2076,7 @@ ENDPROC(return_to_handler)
#ifdef CONFIG_IRQSTACKS #ifdef CONFIG_IRQSTACKS
/* void call_on_stack(unsigned long param1, void *func, /* void call_on_stack(unsigned long param1, void *func,
unsigned long new_stack) */ unsigned long new_stack) */
ENTRY(call_on_stack) ENTRY_CFI(call_on_stack)
copy %sp, %r1 copy %sp, %r1
/* Regarding the HPPA calling conventions for function pointers, /* Regarding the HPPA calling conventions for function pointers,
...@@ -2112,10 +2112,10 @@ ENTRY(call_on_stack) ...@@ -2112,10 +2112,10 @@ ENTRY(call_on_stack)
bv (%rp) bv (%rp)
LDREG -68(%sp), %sp LDREG -68(%sp), %sp
# endif /* CONFIG_64BIT */ # endif /* CONFIG_64BIT */
ENDPROC(call_on_stack) ENDPROC_CFI(call_on_stack)
#endif /* CONFIG_IRQSTACKS */ #endif /* CONFIG_IRQSTACKS */
get_register: ENTRY_CFI(get_register)
/* /*
* get_register is used by the non access tlb miss handlers to * get_register is used by the non access tlb miss handlers to
* copy the value of the general register specified in r8 into * copy the value of the general register specified in r8 into
...@@ -2192,9 +2192,10 @@ get_register: ...@@ -2192,9 +2192,10 @@ get_register:
copy %r30,%r1 copy %r30,%r1
bv %r0(%r25) /* r31 */ bv %r0(%r25) /* r31 */
copy %r31,%r1 copy %r31,%r1
ENDPROC_CFI(get_register)
set_register: ENTRY_CFI(set_register)
/* /*
* set_register is used by the non access tlb miss handlers to * set_register is used by the non access tlb miss handlers to
* copy the value of r1 into the general register specified in * copy the value of r1 into the general register specified in
...@@ -2266,4 +2267,5 @@ set_register: ...@@ -2266,4 +2267,5 @@ set_register:
copy %r1,%r30 copy %r1,%r30
bv %r0(%r25) /* r31 */ bv %r0(%r25) /* r31 */
copy %r1,%r31 copy %r1,%r31
ENDPROC_CFI(set_register)
...@@ -41,12 +41,12 @@ ...@@ -41,12 +41,12 @@
*/ */
.level 1.1 .level 1.1
.data
#include <asm/assembly.h> #include <asm/assembly.h>
#include <asm/pdc.h> #include <asm/pdc.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h>
/* /*
* stack for os_hpmc, the HPMC handler. * stack for os_hpmc, the HPMC handler.
...@@ -55,22 +55,26 @@ ...@@ -55,22 +55,26 @@
* IODC requires 7K byte stack. That leaves 1K byte for os_hpmc. * IODC requires 7K byte stack. That leaves 1K byte for os_hpmc.
*/ */
__PAGE_ALIGNED_BSS
.align 4096 .align 4096
hpmc_stack: hpmc_stack:
.block 16384 .block 16384
#define HPMC_IODC_BUF_SIZE 0x8000 #define HPMC_IODC_BUF_SIZE 0x8000
__PAGE_ALIGNED_BSS
.align 4096 .align 4096
hpmc_iodc_buf: hpmc_iodc_buf:
.block HPMC_IODC_BUF_SIZE .block HPMC_IODC_BUF_SIZE
.section .bss
.align 8 .align 8
hpmc_raddr: hpmc_raddr:
.block 128 .block 128
#define HPMC_PIM_DATA_SIZE 896 /* Enough to hold all architected 2.0 state */ #define HPMC_PIM_DATA_SIZE 896 /* Enough to hold all architected 2.0 state */
.section .bss
.align 8 .align 8
ENTRY(hpmc_pim_data) ENTRY(hpmc_pim_data)
.block HPMC_PIM_DATA_SIZE .block HPMC_PIM_DATA_SIZE
...@@ -79,7 +83,7 @@ END(hpmc_pim_data) ...@@ -79,7 +83,7 @@ END(hpmc_pim_data)
.text .text
.import intr_save, code .import intr_save, code
ENTRY(os_hpmc) ENTRY_CFI(os_hpmc)
.os_hpmc: .os_hpmc:
/* /*
...@@ -295,11 +299,11 @@ os_hpmc_6: ...@@ -295,11 +299,11 @@ os_hpmc_6:
b . b .
nop nop
ENDPROC(os_hpmc) ENDPROC_CFI(os_hpmc)
.os_hpmc_end: .os_hpmc_end:
nop
.data
.align 4 __INITRODATA
.export os_hpmc_size .export os_hpmc_size
os_hpmc_size: os_hpmc_size:
.word .os_hpmc_end-.os_hpmc .word .os_hpmc_end-.os_hpmc
...@@ -506,7 +506,7 @@ add_system_map_addresses(struct parisc_device *dev, int num_addrs, ...@@ -506,7 +506,7 @@ add_system_map_addresses(struct parisc_device *dev, int num_addrs,
long status; long status;
struct pdc_system_map_addr_info addr_result; struct pdc_system_map_addr_info addr_result;
dev->addr = kmalloc(num_addrs * sizeof(unsigned long), GFP_KERNEL); dev->addr = kmalloc_array(num_addrs, sizeof(*dev->addr), GFP_KERNEL);
if(!dev->addr) { if(!dev->addr) {
printk(KERN_ERR "%s %s(): memory allocation failure\n", printk(KERN_ERR "%s %s(): memory allocation failure\n",
__FILE__, __func__); __FILE__, __func__);
......
...@@ -41,7 +41,7 @@ ...@@ -41,7 +41,7 @@
.text .text
.align 128 .align 128
ENTRY(flush_tlb_all_local) ENTRY_CFI(flush_tlb_all_local)
.proc .proc
.callinfo NO_CALLS .callinfo NO_CALLS
.entry .entry
...@@ -190,11 +190,11 @@ fdtdone: ...@@ -190,11 +190,11 @@ fdtdone:
.exit .exit
.procend .procend
ENDPROC(flush_tlb_all_local) ENDPROC_CFI(flush_tlb_all_local)
.import cache_info,data .import cache_info,data
ENTRY(flush_instruction_cache_local) ENTRY_CFI(flush_instruction_cache_local)
.proc .proc
.callinfo NO_CALLS .callinfo NO_CALLS
.entry .entry
...@@ -257,11 +257,11 @@ fisync: ...@@ -257,11 +257,11 @@ fisync:
.exit .exit
.procend .procend
ENDPROC(flush_instruction_cache_local) ENDPROC_CFI(flush_instruction_cache_local)
.import cache_info, data .import cache_info, data
ENTRY(flush_data_cache_local) ENTRY_CFI(flush_data_cache_local)
.proc .proc
.callinfo NO_CALLS .callinfo NO_CALLS
.entry .entry
...@@ -325,7 +325,7 @@ fdsync: ...@@ -325,7 +325,7 @@ fdsync:
.exit .exit
.procend .procend
ENDPROC(flush_data_cache_local) ENDPROC_CFI(flush_data_cache_local)
.align 16 .align 16
...@@ -356,7 +356,7 @@ ENDPROC(flush_data_cache_local) ...@@ -356,7 +356,7 @@ ENDPROC(flush_data_cache_local)
/* Clear page using kernel mapping. */ /* Clear page using kernel mapping. */
ENTRY(clear_page_asm) ENTRY_CFI(clear_page_asm)
.proc .proc
.callinfo NO_CALLS .callinfo NO_CALLS
.entry .entry
...@@ -422,11 +422,11 @@ ENTRY(clear_page_asm) ...@@ -422,11 +422,11 @@ ENTRY(clear_page_asm)
.exit .exit
.procend .procend
ENDPROC(clear_page_asm) ENDPROC_CFI(clear_page_asm)
/* Copy page using kernel mapping. */ /* Copy page using kernel mapping. */
ENTRY(copy_page_asm) ENTRY_CFI(copy_page_asm)
.proc .proc
.callinfo NO_CALLS .callinfo NO_CALLS
.entry .entry
...@@ -540,7 +540,7 @@ ENTRY(copy_page_asm) ...@@ -540,7 +540,7 @@ ENTRY(copy_page_asm)
.exit .exit
.procend .procend
ENDPROC(copy_page_asm) ENDPROC_CFI(copy_page_asm)
/* /*
* NOTE: Code in clear_user_page has a hard coded dependency on the * NOTE: Code in clear_user_page has a hard coded dependency on the
...@@ -573,11 +573,17 @@ ENDPROC(copy_page_asm) ...@@ -573,11 +573,17 @@ ENDPROC(copy_page_asm)
.endm .endm
/* /*
* We can't do this since copy_user_page is used to bring in * copy_user_page_asm() performs a page copy using mappings
* file data that might have instructions. Since the data would * equivalent to the user page mappings. It can be used to
* then need to be flushed out so the i-fetch can see it, it * implement copy_user_page() but unfortunately both the `from'
* makes more sense to just copy through the kernel translation * and `to' pages need to be flushed through mappings equivalent
* and flush it. * to the user mappings after the copy because the kernel accesses
* the `from' page through the kmap kernel mapping and the `to'
* page needs to be flushed since code can be copied. As a
* result, this implementation is less efficient than the simpler
* copy using the kernel mapping. It only needs the `from' page
* to flushed via the user mapping. The kunmap routines handle
* the flushes needed for the kernel mapping.
* *
* I'm still keeping this around because it may be possible to * I'm still keeping this around because it may be possible to
* use it if more information is passed into copy_user_page(). * use it if more information is passed into copy_user_page().
...@@ -586,7 +592,7 @@ ENDPROC(copy_page_asm) ...@@ -586,7 +592,7 @@ ENDPROC(copy_page_asm)
* *
*/ */
ENTRY(copy_user_page_asm) ENTRY_CFI(copy_user_page_asm)
.proc .proc
.callinfo NO_CALLS .callinfo NO_CALLS
.entry .entry
...@@ -742,9 +748,9 @@ ENTRY(copy_user_page_asm) ...@@ -742,9 +748,9 @@ ENTRY(copy_user_page_asm)
.exit .exit
.procend .procend
ENDPROC(copy_user_page_asm) ENDPROC_CFI(copy_user_page_asm)
ENTRY(clear_user_page_asm) ENTRY_CFI(clear_user_page_asm)
.proc .proc
.callinfo NO_CALLS .callinfo NO_CALLS
.entry .entry
...@@ -828,9 +834,9 @@ ENTRY(clear_user_page_asm) ...@@ -828,9 +834,9 @@ ENTRY(clear_user_page_asm)
.exit .exit
.procend .procend
ENDPROC(clear_user_page_asm) ENDPROC_CFI(clear_user_page_asm)
ENTRY(flush_dcache_page_asm) ENTRY_CFI(flush_dcache_page_asm)
.proc .proc
.callinfo NO_CALLS .callinfo NO_CALLS
.entry .entry
...@@ -904,9 +910,9 @@ ENTRY(flush_dcache_page_asm) ...@@ -904,9 +910,9 @@ ENTRY(flush_dcache_page_asm)
.exit .exit
.procend .procend
ENDPROC(flush_dcache_page_asm) ENDPROC_CFI(flush_dcache_page_asm)
ENTRY(flush_icache_page_asm) ENTRY_CFI(flush_icache_page_asm)
.proc .proc
.callinfo NO_CALLS .callinfo NO_CALLS
.entry .entry
...@@ -982,9 +988,9 @@ ENTRY(flush_icache_page_asm) ...@@ -982,9 +988,9 @@ ENTRY(flush_icache_page_asm)
.exit .exit
.procend .procend
ENDPROC(flush_icache_page_asm) ENDPROC_CFI(flush_icache_page_asm)
ENTRY(flush_kernel_dcache_page_asm) ENTRY_CFI(flush_kernel_dcache_page_asm)
.proc .proc
.callinfo NO_CALLS .callinfo NO_CALLS
.entry .entry
...@@ -1025,9 +1031,9 @@ ENTRY(flush_kernel_dcache_page_asm) ...@@ -1025,9 +1031,9 @@ ENTRY(flush_kernel_dcache_page_asm)
.exit .exit
.procend .procend
ENDPROC(flush_kernel_dcache_page_asm) ENDPROC_CFI(flush_kernel_dcache_page_asm)
ENTRY(purge_kernel_dcache_page_asm) ENTRY_CFI(purge_kernel_dcache_page_asm)
.proc .proc
.callinfo NO_CALLS .callinfo NO_CALLS
.entry .entry
...@@ -1067,9 +1073,9 @@ ENTRY(purge_kernel_dcache_page_asm) ...@@ -1067,9 +1073,9 @@ ENTRY(purge_kernel_dcache_page_asm)
.exit .exit
.procend .procend
ENDPROC(purge_kernel_dcache_page_asm) ENDPROC_CFI(purge_kernel_dcache_page_asm)
ENTRY(flush_user_dcache_range_asm) ENTRY_CFI(flush_user_dcache_range_asm)
.proc .proc
.callinfo NO_CALLS .callinfo NO_CALLS
.entry .entry
...@@ -1088,9 +1094,9 @@ ENTRY(flush_user_dcache_range_asm) ...@@ -1088,9 +1094,9 @@ ENTRY(flush_user_dcache_range_asm)
.exit .exit
.procend .procend
ENDPROC(flush_user_dcache_range_asm) ENDPROC_CFI(flush_user_dcache_range_asm)
ENTRY(flush_kernel_dcache_range_asm) ENTRY_CFI(flush_kernel_dcache_range_asm)
.proc .proc
.callinfo NO_CALLS .callinfo NO_CALLS
.entry .entry
...@@ -1110,9 +1116,9 @@ ENTRY(flush_kernel_dcache_range_asm) ...@@ -1110,9 +1116,9 @@ ENTRY(flush_kernel_dcache_range_asm)
.exit .exit
.procend .procend
ENDPROC(flush_kernel_dcache_range_asm) ENDPROC_CFI(flush_kernel_dcache_range_asm)
ENTRY(flush_user_icache_range_asm) ENTRY_CFI(flush_user_icache_range_asm)
.proc .proc
.callinfo NO_CALLS .callinfo NO_CALLS
.entry .entry
...@@ -1131,9 +1137,9 @@ ENTRY(flush_user_icache_range_asm) ...@@ -1131,9 +1137,9 @@ ENTRY(flush_user_icache_range_asm)
.exit .exit
.procend .procend
ENDPROC(flush_user_icache_range_asm) ENDPROC_CFI(flush_user_icache_range_asm)
ENTRY(flush_kernel_icache_page) ENTRY_CFI(flush_kernel_icache_page)
.proc .proc
.callinfo NO_CALLS .callinfo NO_CALLS
.entry .entry
...@@ -1174,9 +1180,9 @@ ENTRY(flush_kernel_icache_page) ...@@ -1174,9 +1180,9 @@ ENTRY(flush_kernel_icache_page)
.exit .exit
.procend .procend
ENDPROC(flush_kernel_icache_page) ENDPROC_CFI(flush_kernel_icache_page)
ENTRY(flush_kernel_icache_range_asm) ENTRY_CFI(flush_kernel_icache_range_asm)
.proc .proc
.callinfo NO_CALLS .callinfo NO_CALLS
.entry .entry
...@@ -1194,13 +1200,13 @@ ENTRY(flush_kernel_icache_range_asm) ...@@ -1194,13 +1200,13 @@ ENTRY(flush_kernel_icache_range_asm)
nop nop
.exit .exit
.procend .procend
ENDPROC(flush_kernel_icache_range_asm) ENDPROC_CFI(flush_kernel_icache_range_asm)
/* align should cover use of rfi in disable_sr_hashing_asm and /* align should cover use of rfi in disable_sr_hashing_asm and
* srdis_done. * srdis_done.
*/ */
.align 256 .align 256
ENTRY(disable_sr_hashing_asm) ENTRY_CFI(disable_sr_hashing_asm)
.proc .proc
.callinfo NO_CALLS .callinfo NO_CALLS
.entry .entry
...@@ -1289,6 +1295,6 @@ srdis_done: ...@@ -1289,6 +1295,6 @@ srdis_done:
.exit .exit
.procend .procend
ENDPROC(disable_sr_hashing_asm) ENDPROC_CFI(disable_sr_hashing_asm)
.end .end
...@@ -61,7 +61,7 @@ save_cr_end: ...@@ -61,7 +61,7 @@ save_cr_end:
* iodc_fn is the IODC function to call * iodc_fn is the IODC function to call
*/ */
ENTRY(real32_call_asm) ENTRY_CFI(real32_call_asm)
STREG %rp, -RP_OFFSET(%sp) /* save RP */ STREG %rp, -RP_OFFSET(%sp) /* save RP */
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
callee_save callee_save
...@@ -119,14 +119,14 @@ ric_ret: ...@@ -119,14 +119,14 @@ ric_ret:
LDREG -RP_OFFSET(%sp), %rp /* restore RP */ LDREG -RP_OFFSET(%sp), %rp /* restore RP */
bv 0(%rp) bv 0(%rp)
nop nop
ENDPROC(real32_call_asm) ENDPROC_CFI(real32_call_asm)
# define PUSH_CR(r, where) mfctl r, %r1 ! STREG,ma %r1, REG_SZ(where) # define PUSH_CR(r, where) mfctl r, %r1 ! STREG,ma %r1, REG_SZ(where)
# define POP_CR(r, where) LDREG,mb -REG_SZ(where), %r1 ! mtctl %r1, r # define POP_CR(r, where) LDREG,mb -REG_SZ(where), %r1 ! mtctl %r1, r
.text .text
save_control_regs: ENTRY_CFI(save_control_regs)
load32 PA(save_cr_space), %r28 load32 PA(save_cr_space), %r28
PUSH_CR(%cr24, %r28) PUSH_CR(%cr24, %r28)
PUSH_CR(%cr25, %r28) PUSH_CR(%cr25, %r28)
...@@ -139,8 +139,9 @@ save_control_regs: ...@@ -139,8 +139,9 @@ save_control_regs:
PUSH_CR(%cr15, %r28) PUSH_CR(%cr15, %r28)
bv 0(%r2) bv 0(%r2)
nop nop
ENDPROC_CFI(save_control_regs)
restore_control_regs: ENTRY_CFI(restore_control_regs)
load32 PA(save_cr_end), %r26 load32 PA(save_cr_end), %r26
POP_CR(%cr15, %r26) POP_CR(%cr15, %r26)
POP_CR(%cr31, %r26) POP_CR(%cr31, %r26)
...@@ -153,13 +154,14 @@ restore_control_regs: ...@@ -153,13 +154,14 @@ restore_control_regs:
POP_CR(%cr24, %r26) POP_CR(%cr24, %r26)
bv 0(%r2) bv 0(%r2)
nop nop
ENDPROC_CFI(restore_control_regs)
/* rfi_virt2real() and rfi_real2virt() could perhaps be adapted for /* rfi_virt2real() and rfi_real2virt() could perhaps be adapted for
* more general-purpose use by the several places which need RFIs * more general-purpose use by the several places which need RFIs
*/ */
.text .text
.align 128 .align 128
rfi_virt2real: ENTRY_CFI(rfi_virt2real)
/* switch to real mode... */ /* switch to real mode... */
rsm PSW_SM_I,%r0 rsm PSW_SM_I,%r0
load32 PA(rfi_v2r_1), %r1 load32 PA(rfi_v2r_1), %r1
...@@ -191,10 +193,11 @@ rfi_v2r_1: ...@@ -191,10 +193,11 @@ rfi_v2r_1:
tophys_r1 %r2 tophys_r1 %r2
bv 0(%r2) bv 0(%r2)
nop nop
ENDPROC_CFI(rfi_virt2real)
.text .text
.align 128 .align 128
rfi_real2virt: ENTRY_CFI(rfi_real2virt)
rsm PSW_SM_I,%r0 rsm PSW_SM_I,%r0
load32 (rfi_r2v_1), %r1 load32 (rfi_r2v_1), %r1
nop nop
...@@ -225,6 +228,7 @@ rfi_r2v_1: ...@@ -225,6 +228,7 @@ rfi_r2v_1:
tovirt_r1 %r2 tovirt_r1 %r2
bv 0(%r2) bv 0(%r2)
nop nop
ENDPROC_CFI(rfi_real2virt)
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
...@@ -238,7 +242,7 @@ rfi_r2v_1: ...@@ -238,7 +242,7 @@ rfi_r2v_1:
* arg0p points to where saved arg values may be found * arg0p points to where saved arg values may be found
* iodc_fn is the IODC function to call * iodc_fn is the IODC function to call
*/ */
ENTRY(real64_call_asm) ENTRY_CFI(real64_call_asm)
std %rp, -0x10(%sp) /* save RP */ std %rp, -0x10(%sp) /* save RP */
std %sp, -8(%arg0) /* save SP on real-mode stack */ std %sp, -8(%arg0) /* save SP on real-mode stack */
copy %arg0, %sp /* adopt the real-mode SP */ copy %arg0, %sp /* adopt the real-mode SP */
...@@ -284,7 +288,7 @@ r64_ret: ...@@ -284,7 +288,7 @@ r64_ret:
ldd -0x10(%sp), %rp /* restore RP */ ldd -0x10(%sp), %rp /* restore RP */
bv 0(%rp) bv 0(%rp)
nop nop
ENDPROC(real64_call_asm) ENDPROC_CFI(real64_call_asm)
#endif #endif
...@@ -293,12 +297,12 @@ ENDPROC(real64_call_asm) ...@@ -293,12 +297,12 @@ ENDPROC(real64_call_asm)
** GCC 3.3 and later has a new function in libgcc.a for ** GCC 3.3 and later has a new function in libgcc.a for
** comparing function pointers. ** comparing function pointers.
*/ */
ENTRY(__canonicalize_funcptr_for_compare) ENTRY_CFI(__canonicalize_funcptr_for_compare)
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
bve (%r2) bve (%r2)
#else #else
bv %r0(%r2) bv %r0(%r2)
#endif #endif
copy %r26,%r28 copy %r26,%r28
ENDPROC(__canonicalize_funcptr_for_compare) ENDPROC_CFI(__canonicalize_funcptr_for_compare)
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <linux/export.h> #include <linux/export.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/sections.h>
#include <asm/pdc.h> #include <asm/pdc.h>
#include <asm/led.h> #include <asm/led.h>
#include <asm/machdep.h> /* for pa7300lc_init() proto */ #include <asm/machdep.h> /* for pa7300lc_init() proto */
...@@ -140,6 +141,13 @@ void __init setup_arch(char **cmdline_p) ...@@ -140,6 +141,13 @@ void __init setup_arch(char **cmdline_p)
#endif #endif
printk(KERN_CONT ".\n"); printk(KERN_CONT ".\n");
/*
* Check if initial kernel page mappings are sufficient.
* panic early if not, else we may access kernel functions
* and variables which can't be reached.
*/
if (__pa((unsigned long) &_end) >= KERNEL_INITIAL_SIZE)
panic("KERNEL_INITIAL_ORDER too small!");
pdc_console_init(); pdc_console_init();
......
...@@ -412,8 +412,8 @@ void smp_cpus_done(unsigned int cpu_max) ...@@ -412,8 +412,8 @@ void smp_cpus_done(unsigned int cpu_max)
int __cpu_up(unsigned int cpu, struct task_struct *tidle) int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{ {
if (cpu != 0 && cpu < parisc_max_cpus) if (cpu != 0 && cpu < parisc_max_cpus && smp_boot_one_cpu(cpu, tidle))
smp_boot_one_cpu(cpu, tidle); return -ENOSYS;
return cpu_online(cpu) ? 0 : -ENOSYS; return cpu_online(cpu) ? 0 : -ENOSYS;
} }
......
...@@ -226,12 +226,6 @@ void __init start_cpu_itimer(void) ...@@ -226,12 +226,6 @@ void __init start_cpu_itimer(void)
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
unsigned long next_tick = mfctl(16) + clocktick; unsigned long next_tick = mfctl(16) + clocktick;
#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT)
/* With multiple 64bit CPUs online, the cr16's are not syncronized. */
if (cpu != 0)
clear_sched_clock_stable();
#endif
mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */ mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */
per_cpu(cpu_data, cpu).it_value = next_tick; per_cpu(cpu_data, cpu).it_value = next_tick;
......
...@@ -138,8 +138,6 @@ SECTIONS ...@@ -138,8 +138,6 @@ SECTIONS
/* BSS */ /* BSS */
BSS_SECTION(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE) BSS_SECTION(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE)
/* bootmap is allocated in setup_bootmem() directly behind bss. */
. = ALIGN(HUGEPAGE_SIZE); . = ALIGN(HUGEPAGE_SIZE);
_end = . ; _end = . ;
......
...@@ -65,34 +65,34 @@ ...@@ -65,34 +65,34 @@
.section .fixup, "ax" .section .fixup, "ax"
/* get_user() fixups, store -EFAULT in r8, and 0 in r9 */ /* get_user() fixups, store -EFAULT in r8, and 0 in r9 */
ENTRY(fixup_get_user_skip_1) ENTRY_CFI(fixup_get_user_skip_1)
get_fault_ip %r1,%r8 get_fault_ip %r1,%r8
ldo 4(%r1), %r1 ldo 4(%r1), %r1
ldi -EFAULT, %r8 ldi -EFAULT, %r8
bv %r0(%r1) bv %r0(%r1)
copy %r0, %r9 copy %r0, %r9
ENDPROC(fixup_get_user_skip_1) ENDPROC_CFI(fixup_get_user_skip_1)
ENTRY(fixup_get_user_skip_2) ENTRY_CFI(fixup_get_user_skip_2)
get_fault_ip %r1,%r8 get_fault_ip %r1,%r8
ldo 8(%r1), %r1 ldo 8(%r1), %r1
ldi -EFAULT, %r8 ldi -EFAULT, %r8
bv %r0(%r1) bv %r0(%r1)
copy %r0, %r9 copy %r0, %r9
ENDPROC(fixup_get_user_skip_2) ENDPROC_CFI(fixup_get_user_skip_2)
/* put_user() fixups, store -EFAULT in r8 */ /* put_user() fixups, store -EFAULT in r8 */
ENTRY(fixup_put_user_skip_1) ENTRY_CFI(fixup_put_user_skip_1)
get_fault_ip %r1,%r8 get_fault_ip %r1,%r8
ldo 4(%r1), %r1 ldo 4(%r1), %r1
bv %r0(%r1) bv %r0(%r1)
ldi -EFAULT, %r8 ldi -EFAULT, %r8
ENDPROC(fixup_put_user_skip_1) ENDPROC_CFI(fixup_put_user_skip_1)
ENTRY(fixup_put_user_skip_2) ENTRY_CFI(fixup_put_user_skip_2)
get_fault_ip %r1,%r8 get_fault_ip %r1,%r8
ldo 8(%r1), %r1 ldo 8(%r1), %r1
bv %r0(%r1) bv %r0(%r1)
ldi -EFAULT, %r8 ldi -EFAULT, %r8
ENDPROC(fixup_put_user_skip_2) ENDPROC_CFI(fixup_put_user_skip_2)
...@@ -67,7 +67,7 @@ ...@@ -67,7 +67,7 @@
* otherwise, returns number of bytes not transferred. * otherwise, returns number of bytes not transferred.
*/ */
ENTRY(lclear_user) ENTRY_CFI(lclear_user)
.proc .proc
.callinfo NO_CALLS .callinfo NO_CALLS
.entry .entry
...@@ -81,7 +81,7 @@ $lclu_done: ...@@ -81,7 +81,7 @@ $lclu_done:
bv %r0(%r2) bv %r0(%r2)
copy %r25,%r28 copy %r25,%r28
.exit .exit
ENDPROC(lclear_user) ENDPROC_CFI(lclear_user)
.section .fixup,"ax" .section .fixup,"ax"
2: fixup_branch $lclu_done 2: fixup_branch $lclu_done
...@@ -100,7 +100,7 @@ ENDPROC(lclear_user) ...@@ -100,7 +100,7 @@ ENDPROC(lclear_user)
* else strlen + 1 (i.e. includes zero byte). * else strlen + 1 (i.e. includes zero byte).
*/ */
ENTRY(lstrnlen_user) ENTRY_CFI(lstrnlen_user)
.proc .proc
.callinfo NO_CALLS .callinfo NO_CALLS
.entry .entry
...@@ -120,7 +120,7 @@ $lslen_done: ...@@ -120,7 +120,7 @@ $lslen_done:
$lslen_nzero: $lslen_nzero:
b $lslen_done b $lslen_done
ldo 1(%r26),%r26 /* special case for N == 0 */ ldo 1(%r26),%r26 /* special case for N == 0 */
ENDPROC(lstrnlen_user) ENDPROC_CFI(lstrnlen_user)
.section .fixup,"ax" .section .fixup,"ax"
3: fixup_branch $lslen_done 3: fixup_branch $lslen_done
......
...@@ -489,20 +489,23 @@ static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) ...@@ -489,20 +489,23 @@ static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
} }
#ifdef __KERNEL__ #ifdef __KERNEL__
unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len) unsigned long __copy_to_user(void __user *dst, const void *src,
unsigned long len)
{ {
mtsp(get_kernel_space(), 1); mtsp(get_kernel_space(), 1);
mtsp(get_user_space(), 2); mtsp(get_user_space(), 2);
return pa_memcpy((void __force *)dst, src, len); return pa_memcpy((void __force *)dst, src, len);
} }
EXPORT_SYMBOL(__copy_to_user);
EXPORT_SYMBOL(__copy_from_user); unsigned long __copy_from_user(void *dst, const void __user *src,
unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long len) unsigned long len)
{ {
mtsp(get_user_space(), 1); mtsp(get_user_space(), 1);
mtsp(get_kernel_space(), 2); mtsp(get_kernel_space(), 2);
return pa_memcpy(dst, (void __force *)src, len); return pa_memcpy(dst, (void __force *)src, len);
} }
EXPORT_SYMBOL(__copy_from_user);
unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len) unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len)
{ {
...@@ -520,8 +523,6 @@ void * memcpy(void * dst,const void *src, size_t count) ...@@ -520,8 +523,6 @@ void * memcpy(void * dst,const void *src, size_t count)
return dst; return dst;
} }
EXPORT_SYMBOL(copy_to_user);
EXPORT_SYMBOL(copy_from_user);
EXPORT_SYMBOL(copy_in_user); EXPORT_SYMBOL(copy_in_user);
EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memcpy);
......
...@@ -167,6 +167,43 @@ int fixup_exception(struct pt_regs *regs) ...@@ -167,6 +167,43 @@ int fixup_exception(struct pt_regs *regs)
return 0; return 0;
} }
/*
* parisc hardware trap list
*
* Documented in section 3 "Addressing and Access Control" of the
* "PA-RISC 1.1 Architecture and Instruction Set Reference Manual"
* https://parisc.wiki.kernel.org/index.php/File:Pa11_acd.pdf
*
* For implementation see handle_interruption() in traps.c
*/
static const char * const trap_description[] = {
[1] "High-priority machine check (HPMC)",
[2] "Power failure interrupt",
[3] "Recovery counter trap",
[5] "Low-priority machine check",
[6] "Instruction TLB miss fault",
[7] "Instruction access rights / protection trap",
[8] "Illegal instruction trap",
[9] "Break instruction trap",
[10] "Privileged operation trap",
[11] "Privileged register trap",
[12] "Overflow trap",
[13] "Conditional trap",
[14] "FP Assist Exception trap",
[15] "Data TLB miss fault",
[16] "Non-access ITLB miss fault",
[17] "Non-access DTLB miss fault",
[18] "Data memory protection/unaligned access trap",
[19] "Data memory break trap",
[20] "TLB dirty bit trap",
[21] "Page reference trap",
[22] "Assist emulation trap",
[25] "Taken branch trap",
[26] "Data memory access rights trap",
[27] "Data memory protection ID trap",
[28] "Unaligned data reference trap",
};
/* /*
* Print out info about fatal segfaults, if the show_unhandled_signals * Print out info about fatal segfaults, if the show_unhandled_signals
* sysctl is set: * sysctl is set:
...@@ -176,6 +213,8 @@ show_signal_msg(struct pt_regs *regs, unsigned long code, ...@@ -176,6 +213,8 @@ show_signal_msg(struct pt_regs *regs, unsigned long code,
unsigned long address, struct task_struct *tsk, unsigned long address, struct task_struct *tsk,
struct vm_area_struct *vma) struct vm_area_struct *vma)
{ {
const char *trap_name = NULL;
if (!unhandled_signal(tsk, SIGSEGV)) if (!unhandled_signal(tsk, SIGSEGV))
return; return;
...@@ -186,8 +225,15 @@ show_signal_msg(struct pt_regs *regs, unsigned long code, ...@@ -186,8 +225,15 @@ show_signal_msg(struct pt_regs *regs, unsigned long code,
pr_warn("do_page_fault() command='%s' type=%lu address=0x%08lx", pr_warn("do_page_fault() command='%s' type=%lu address=0x%08lx",
tsk->comm, code, address); tsk->comm, code, address);
print_vma_addr(KERN_CONT " in ", regs->iaoq[0]); print_vma_addr(KERN_CONT " in ", regs->iaoq[0]);
if (code < ARRAY_SIZE(trap_description))
trap_name = trap_description[code];
pr_warn(KERN_CONT " trap #%lu: %s%c", code,
trap_name ? trap_name : "unknown",
vma ? ',':'\n');
if (vma) if (vma)
pr_warn(" vm_start = 0x%08lx, vm_end = 0x%08lx\n", pr_warn(KERN_CONT " vm_start = 0x%08lx, vm_end = 0x%08lx\n",
vma->vm_start, vma->vm_end); vma->vm_start, vma->vm_end);
show_regs(regs); show_regs(regs);
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -79,6 +80,34 @@ static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly; ...@@ -79,6 +80,34 @@ static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly;
physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly; physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly;
int npmem_ranges __read_mostly; int npmem_ranges __read_mostly;
/*
* get_memblock() allocates pages via memblock.
* We can't use memblock_find_in_range(0, KERNEL_INITIAL_SIZE) here since it
* doesn't allocate from bottom to top which is needed because we only created
* the initial mapping up to KERNEL_INITIAL_SIZE in the assembly bootup code.
*/
static void * __init get_memblock(unsigned long size)
{
static phys_addr_t search_addr __initdata;
phys_addr_t phys;
if (!search_addr)
search_addr = PAGE_ALIGN(__pa((unsigned long) &_end));
search_addr = ALIGN(search_addr, size);
while (!memblock_is_region_memory(search_addr, size) ||
memblock_is_region_reserved(search_addr, size)) {
search_addr += size;
}
phys = search_addr;
if (phys)
memblock_reserve(phys, size);
else
panic("get_memblock() failed.\n");
return __va(phys);
}
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#define MAX_MEM (~0UL) #define MAX_MEM (~0UL)
#else /* !CONFIG_64BIT */ #else /* !CONFIG_64BIT */
...@@ -118,11 +147,7 @@ static void __init mem_limit_func(void) ...@@ -118,11 +147,7 @@ static void __init mem_limit_func(void)
static void __init setup_bootmem(void) static void __init setup_bootmem(void)
{ {
unsigned long bootmap_size;
unsigned long mem_max; unsigned long mem_max;
unsigned long bootmap_pages;
unsigned long bootmap_start_pfn;
unsigned long bootmap_pfn;
#ifndef CONFIG_DISCONTIGMEM #ifndef CONFIG_DISCONTIGMEM
physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1]; physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
int npmem_holes; int npmem_holes;
...@@ -178,33 +203,29 @@ static void __init setup_bootmem(void) ...@@ -178,33 +203,29 @@ static void __init setup_bootmem(void)
} }
#endif #endif
if (npmem_ranges > 1) { /* Print the memory ranges */
pr_info("Memory Ranges:\n");
/* Print the memory ranges */
printk(KERN_INFO "Memory Ranges:\n"); for (i = 0; i < npmem_ranges; i++) {
struct resource *res = &sysram_resources[i];
unsigned long start;
unsigned long size;
for (i = 0; i < npmem_ranges; i++) { size = (pmem_ranges[i].pages << PAGE_SHIFT);
unsigned long start; start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
unsigned long size; pr_info("%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
i, start, start + (size - 1), size >> 20);
size = (pmem_ranges[i].pages << PAGE_SHIFT); /* request memory resource */
start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
printk(KERN_INFO "%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
i,start, start + (size - 1), size >> 20);
}
}
sysram_resource_count = npmem_ranges;
for (i = 0; i < sysram_resource_count; i++) {
struct resource *res = &sysram_resources[i];
res->name = "System RAM"; res->name = "System RAM";
res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT; res->start = start;
res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1; res->end = start + size - 1;
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
request_resource(&iomem_resource, res); request_resource(&iomem_resource, res);
} }
sysram_resource_count = npmem_ranges;
/* /*
* For 32 bit kernels we limit the amount of memory we can * For 32 bit kernels we limit the amount of memory we can
* support, in order to preserve enough kernel address space * support, in order to preserve enough kernel address space
...@@ -263,16 +284,9 @@ static void __init setup_bootmem(void) ...@@ -263,16 +284,9 @@ static void __init setup_bootmem(void)
} }
#endif #endif
bootmap_pages = 0;
for (i = 0; i < npmem_ranges; i++)
bootmap_pages += bootmem_bootmap_pages(pmem_ranges[i].pages);
bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT;
#ifdef CONFIG_DISCONTIGMEM #ifdef CONFIG_DISCONTIGMEM
for (i = 0; i < MAX_PHYSMEM_RANGES; i++) { for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
memset(NODE_DATA(i), 0, sizeof(pg_data_t)); memset(NODE_DATA(i), 0, sizeof(pg_data_t));
NODE_DATA(i)->bdata = &bootmem_node_data[i];
} }
memset(pfnnid_map, 0xff, sizeof(pfnnid_map)); memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
...@@ -284,28 +298,24 @@ static void __init setup_bootmem(void) ...@@ -284,28 +298,24 @@ static void __init setup_bootmem(void)
/* /*
* Initialize and free the full range of memory in each range. * Initialize and free the full range of memory in each range.
* Note that the only writing these routines do are to the bootmap,
* and we've made sure to locate the bootmap properly so that they
* won't be writing over anything important.
*/ */
bootmap_pfn = bootmap_start_pfn;
max_pfn = 0; max_pfn = 0;
for (i = 0; i < npmem_ranges; i++) { for (i = 0; i < npmem_ranges; i++) {
unsigned long start_pfn; unsigned long start_pfn;
unsigned long npages; unsigned long npages;
unsigned long start;
unsigned long size;
start_pfn = pmem_ranges[i].start_pfn; start_pfn = pmem_ranges[i].start_pfn;
npages = pmem_ranges[i].pages; npages = pmem_ranges[i].pages;
bootmap_size = init_bootmem_node(NODE_DATA(i), start = start_pfn << PAGE_SHIFT;
bootmap_pfn, size = npages << PAGE_SHIFT;
start_pfn,
(start_pfn + npages) ); /* add system RAM memblock */
free_bootmem_node(NODE_DATA(i), memblock_add(start, size);
(start_pfn << PAGE_SHIFT),
(npages << PAGE_SHIFT) );
bootmap_pfn += (bootmap_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
if ((start_pfn + npages) > max_pfn) if ((start_pfn + npages) > max_pfn)
max_pfn = start_pfn + npages; max_pfn = start_pfn + npages;
} }
...@@ -317,32 +327,22 @@ static void __init setup_bootmem(void) ...@@ -317,32 +327,22 @@ static void __init setup_bootmem(void)
*/ */
max_low_pfn = max_pfn; max_low_pfn = max_pfn;
/* bootmap sizing messed up? */
BUG_ON((bootmap_pfn - bootmap_start_pfn) != bootmap_pages);
/* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */ /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
#define PDC_CONSOLE_IO_IODC_SIZE 32768 #define PDC_CONSOLE_IO_IODC_SIZE 32768
reserve_bootmem_node(NODE_DATA(0), 0UL, memblock_reserve(0UL, (unsigned long)(PAGE0->mem_free +
(unsigned long)(PAGE0->mem_free + PDC_CONSOLE_IO_IODC_SIZE));
PDC_CONSOLE_IO_IODC_SIZE), BOOTMEM_DEFAULT); memblock_reserve(__pa(KERNEL_BINARY_TEXT_START),
reserve_bootmem_node(NODE_DATA(0), __pa(KERNEL_BINARY_TEXT_START), (unsigned long)(_end - KERNEL_BINARY_TEXT_START));
(unsigned long)(_end - KERNEL_BINARY_TEXT_START),
BOOTMEM_DEFAULT);
reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT),
BOOTMEM_DEFAULT);
#ifndef CONFIG_DISCONTIGMEM #ifndef CONFIG_DISCONTIGMEM
/* reserve the holes */ /* reserve the holes */
for (i = 0; i < npmem_holes; i++) { for (i = 0; i < npmem_holes; i++) {
reserve_bootmem_node(NODE_DATA(0), memblock_reserve((pmem_holes[i].start_pfn << PAGE_SHIFT),
(pmem_holes[i].start_pfn << PAGE_SHIFT), (pmem_holes[i].pages << PAGE_SHIFT));
(pmem_holes[i].pages << PAGE_SHIFT),
BOOTMEM_DEFAULT);
} }
#endif #endif
...@@ -360,8 +360,7 @@ static void __init setup_bootmem(void) ...@@ -360,8 +360,7 @@ static void __init setup_bootmem(void)
initrd_below_start_ok = 1; initrd_below_start_ok = 1;
printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max); printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
reserve_bootmem_node(NODE_DATA(0), __pa(initrd_start), memblock_reserve(__pa(initrd_start), initrd_reserve);
initrd_reserve, BOOTMEM_DEFAULT);
} }
} }
#endif #endif
...@@ -439,7 +438,7 @@ static void __init map_pages(unsigned long start_vaddr, ...@@ -439,7 +438,7 @@ static void __init map_pages(unsigned long start_vaddr,
*/ */
if (!pmd) { if (!pmd) {
pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE << PMD_ORDER); pmd = (pmd_t *) get_memblock(PAGE_SIZE << PMD_ORDER);
pmd = (pmd_t *) __pa(pmd); pmd = (pmd_t *) __pa(pmd);
} }
...@@ -458,8 +457,7 @@ static void __init map_pages(unsigned long start_vaddr, ...@@ -458,8 +457,7 @@ static void __init map_pages(unsigned long start_vaddr,
pg_table = (pte_t *)pmd_address(*pmd); pg_table = (pte_t *)pmd_address(*pmd);
if (!pg_table) { if (!pg_table) {
pg_table = (pte_t *) pg_table = (pte_t *) get_memblock(PAGE_SIZE);
alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE);
pg_table = (pte_t *) __pa(pg_table); pg_table = (pte_t *) __pa(pg_table);
} }
...@@ -737,7 +735,7 @@ static void __init pagetable_init(void) ...@@ -737,7 +735,7 @@ static void __init pagetable_init(void)
} }
#endif #endif
empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); empty_zero_page = get_memblock(PAGE_SIZE);
} }
static void __init gateway_init(void) static void __init gateway_init(void)
......
...@@ -41,21 +41,10 @@ ...@@ -41,21 +41,10 @@
discard it in modules) */ discard it in modules) */
#define __init __section(.init.text) __cold notrace #define __init __section(.init.text) __cold notrace
#define __initdata __section(.init.data) #define __initdata __section(.init.data)
#define __initconst __constsection(.init.rodata) #define __initconst __section(.init.rodata)
#define __exitdata __section(.exit.data) #define __exitdata __section(.exit.data)
#define __exit_call __used __section(.exitcall.exit) #define __exit_call __used __section(.exitcall.exit)
/*
* Some architecture have tool chains which do not handle rodata attributes
* correctly. For those disable special sections for const, so that other
* architectures can annotate correctly.
*/
#ifdef CONFIG_BROKEN_RODATA
#define __constsection(x)
#else
#define __constsection(x) __section(x)
#endif
/* /*
* modpost check for section mismatches during the kernel build. * modpost check for section mismatches during the kernel build.
* A section mismatch happens when there are references from a * A section mismatch happens when there are references from a
...@@ -75,7 +64,7 @@ ...@@ -75,7 +64,7 @@
*/ */
#define __ref __section(.ref.text) noinline #define __ref __section(.ref.text) noinline
#define __refdata __section(.ref.data) #define __refdata __section(.ref.data)
#define __refconst __constsection(.ref.rodata) #define __refconst __section(.ref.rodata)
#ifdef MODULE #ifdef MODULE
#define __exitused #define __exitused
...@@ -88,10 +77,10 @@ ...@@ -88,10 +77,10 @@
/* Used for MEMORY_HOTPLUG */ /* Used for MEMORY_HOTPLUG */
#define __meminit __section(.meminit.text) __cold notrace #define __meminit __section(.meminit.text) __cold notrace
#define __meminitdata __section(.meminit.data) #define __meminitdata __section(.meminit.data)
#define __meminitconst __constsection(.meminit.rodata) #define __meminitconst __section(.meminit.rodata)
#define __memexit __section(.memexit.text) __exitused __cold notrace #define __memexit __section(.memexit.text) __exitused __cold notrace
#define __memexitdata __section(.memexit.data) #define __memexitdata __section(.memexit.data)
#define __memexitconst __constsection(.memexit.rodata) #define __memexitconst __section(.memexit.rodata)
/* For assembly routines */ /* For assembly routines */
#define __HEAD .section ".head.text","ax" #define __HEAD .section ".head.text","ax"
......
...@@ -2118,12 +2118,6 @@ config PADATA ...@@ -2118,12 +2118,6 @@ config PADATA
depends on SMP depends on SMP
bool bool
# Can be selected by architectures with broken toolchains
# that get confused by correct const<->read_only section
# mappings
config BROKEN_RODATA
bool
config ASN1 config ASN1
tristate tristate
help help
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment