Commit 5ce2955e authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus

Pull MIPS fixes from Ralf Baechle:
 "Various fixes across the tree.  The modpost error due to
  virt_addr_valid() not being usable from modules required a number of
  preparatory cleanups so a clean fix was possible."

* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus:
  MIPS: 64-bit: Fix build if !CONFIG_MODULES
  MIPS: Wire up finit_module syscall.
  MIPS: Fix modpost error in modules attepting to use virt_addr_valid().
  MIPS: page.h: Remove now unnecessary #ifndef __ASSEMBLY__ wrapper.
  MIPS: Switch remaining assembler PAGE_SIZE users to <asm/asm-offsets.h>.
  MIPS: Include PAGE_S{IZE,HIFT} in <asm/offset.h>.
  MIPS: Don't include <asm/page.h> unnecessarily.
  MIPS: Fix comment.
  Revert "MIPS: Optimise TLB handlers for MIPS32/64 R2 cores."
  MIPS: perf: Fix build failure in XLP perf support.
  MIPS: Alchemy: Make 32kHz and r4k timer coexist peacefully
parents d0631c6e 2f12fb20
......@@ -39,8 +39,8 @@ config MIPS
select GENERIC_CLOCKEVENTS
select GENERIC_CMOS_UPDATE
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_REL
select MODULES_USE_ELF_RELA if 64BIT
select MODULES_USE_ELF_REL if MODULES
select MODULES_USE_ELF_RELA if MODULES && 64BIT
menu "Machine selection"
......
......@@ -53,7 +53,7 @@ static struct clocksource au1x_counter1_clocksource = {
.read = au1x_counter1_read,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.rating = 100,
.rating = 1500,
};
static int au1x_rtcmatch2_set_next_event(unsigned long delta,
......@@ -84,7 +84,7 @@ static irqreturn_t au1x_rtcmatch2_irq(int irq, void *dev_id)
static struct clock_event_device au1x_rtcmatch2_clockdev = {
.name = "rtcmatch2",
.features = CLOCK_EVT_FEAT_ONESHOT,
.rating = 100,
.rating = 1500,
.set_next_event = au1x_rtcmatch2_set_next_event,
.set_mode = au1x_rtcmatch2_set_mode,
.cpumask = cpu_all_mask,
......@@ -158,20 +158,6 @@ static int __init alchemy_time_init(unsigned int m2int)
return -1;
}
static void __init alchemy_setup_c0timer(void)
{
/*
* MIPS kernel assigns 'au1k_wait' to 'cpu_wait' before this
* function is called. Because the Alchemy counters are unusable
* the C0 timekeeping code is installed and use of the 'wait'
* instruction must be prohibited, which is done most easily by
* assigning NULL to cpu_wait.
*/
cpu_wait = NULL;
r4k_clockevent_init();
init_r4k_clocksource();
}
static int alchemy_m2inttab[] __initdata = {
AU1000_RTC_MATCH2_INT,
AU1500_RTC_MATCH2_INT,
......@@ -186,8 +172,7 @@ void __init plat_time_init(void)
int t;
t = alchemy_get_cputype();
if (t == ALCHEMY_CPU_UNKNOWN)
alchemy_setup_c0timer();
else if (alchemy_time_init(alchemy_m2inttab[t]))
alchemy_setup_c0timer();
if (t == ALCHEMY_CPU_UNKNOWN ||
alchemy_time_init(alchemy_m2inttab[t]))
cpu_wait = NULL; /* wait doesn't work with r4k timer */
}
......@@ -45,8 +45,6 @@
#define HUGETLB_PAGE_ORDER ({BUILD_BUG(); 0; })
#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
#ifndef __ASSEMBLY__
#include <linux/pfn.h>
#include <asm/io.h>
......@@ -139,8 +137,6 @@ typedef struct { unsigned long pgprot; } pgprot_t;
*/
#define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t)))
#endif /* !__ASSEMBLY__ */
/*
* __pa()/__va() should be used only during mem init.
*/
......@@ -202,7 +198,10 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#endif
#define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(virt_to_phys(kaddr)))
#define virt_addr_valid(kaddr) pfn_valid(PFN_DOWN(virt_to_phys(kaddr)))
extern int __virt_addr_valid(const volatile void *kaddr);
#define virt_addr_valid(kaddr) \
__virt_addr_valid((const volatile void *) (kaddr))
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
......
......@@ -29,9 +29,10 @@ struct thread_info {
__u32 cpu; /* current CPU */
int preempt_count; /* 0 => preemptable, <0 => BUG */
mm_segment_t addr_limit; /* thread address space:
0-0xBFFFFFFF for user-thead
0-0xFFFFFFFF for kernel-thread
mm_segment_t addr_limit; /*
* thread address space limit:
* 0x7fffffff for user-thead
* 0xffffffff for kernel-thread
*/
struct restart_block restart_block;
struct pt_regs *regs;
......
......@@ -368,16 +368,17 @@
#define __NR_process_vm_readv (__NR_Linux + 345)
#define __NR_process_vm_writev (__NR_Linux + 346)
#define __NR_kcmp (__NR_Linux + 347)
#define __NR_finit_module (__NR_Linux + 348)
/*
* Offset of the last Linux o32 flavoured syscall
*/
#define __NR_Linux_syscalls 347
#define __NR_Linux_syscalls 348
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
#define __NR_O32_Linux_syscalls 347
#define __NR_O32_Linux_syscalls 348
#if _MIPS_SIM == _MIPS_SIM_ABI64
......@@ -692,16 +693,17 @@
#define __NR_process_vm_readv (__NR_Linux + 304)
#define __NR_process_vm_writev (__NR_Linux + 305)
#define __NR_kcmp (__NR_Linux + 306)
#define __NR_finit_module (__NR_Linux + 307)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
#define __NR_Linux_syscalls 306
#define __NR_Linux_syscalls 307
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
#define __NR_64_Linux_syscalls 306
#define __NR_64_Linux_syscalls 307
#if _MIPS_SIM == _MIPS_SIM_NABI32
......@@ -1021,15 +1023,16 @@
#define __NR_process_vm_readv (__NR_Linux + 309)
#define __NR_process_vm_writev (__NR_Linux + 310)
#define __NR_kcmp (__NR_Linux + 311)
#define __NR_finit_module (__NR_Linux + 312)
/*
* Offset of the last N32 flavoured syscall
*/
#define __NR_Linux_syscalls 311
#define __NR_Linux_syscalls 312
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
#define __NR_N32_Linux_syscalls 311
#define __NR_N32_Linux_syscalls 312
#endif /* _UAPI_ASM_UNISTD_H */
......@@ -200,6 +200,9 @@ void output_mm_defines(void)
DEFINE(_PTRS_PER_PMD, PTRS_PER_PMD);
DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE);
BLANK();
DEFINE(_PAGE_SHIFT, PAGE_SHIFT);
DEFINE(_PAGE_SIZE, PAGE_SIZE);
BLANK();
}
#ifdef CONFIG_32BIT
......
......@@ -19,7 +19,6 @@
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
#include <asm/war.h>
#include <asm/page.h>
#include <asm/thread_info.h>
#define PANIC_PIC(msg) \
......@@ -483,8 +482,8 @@ NESTED(nmi_handler, PT_SIZE, sp)
MFC0 k1, CP0_ENTRYHI
andi k1, 0xff /* ASID_MASK */
MFC0 k0, CP0_EPC
PTR_SRL k0, PAGE_SHIFT + 1
PTR_SLL k0, PAGE_SHIFT + 1
PTR_SRL k0, _PAGE_SHIFT + 1
PTR_SLL k0, _PAGE_SHIFT + 1
or k1, k0
MTC0 k1, CP0_ENTRYHI
mtc0_tlbw_hazard
......
......@@ -21,7 +21,6 @@
#include <asm/asmmacro.h>
#include <asm/irqflags.h>
#include <asm/regdef.h>
#include <asm/page.h>
#include <asm/pgtable-bits.h>
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
......
......@@ -15,7 +15,6 @@
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
#include <asm/pgtable-bits.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
......
......@@ -847,7 +847,6 @@ static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CACHE_MISSES] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x1b, CNTR_ALL }, /* PAPI_BR_CN */
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */
[PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
};
/* 24K/34K/1004K cores can share the same cache event map. */
......@@ -1115,24 +1114,12 @@ static const struct mips_perf_event xlp_cache_map
[C(RESULT_ACCESS)] = { 0x2f, CNTR_ALL }, /* PAPI_L1_DCW */
[C(RESULT_MISS)] = { 0x2e, CNTR_ALL }, /* PAPI_L1_STM */
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
[C(RESULT_MISS)] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
[C(LL)] = {
[C(OP_READ)] = {
......@@ -1143,10 +1130,6 @@ static const struct mips_perf_event xlp_cache_map
[C(RESULT_ACCESS)] = { 0x34, CNTR_ALL }, /* PAPI_L2_DCA */
[C(RESULT_MISS)] = { 0x36, CNTR_ALL }, /* PAPI_L2_DCM */
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
[C(DTLB)] = {
/*
......@@ -1154,45 +1137,24 @@ static const struct mips_perf_event xlp_cache_map
* read and write.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { 0x25, CNTR_ALL },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
};
......
......@@ -15,7 +15,6 @@
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
#include <asm/thread_info.h>
......
......@@ -15,7 +15,6 @@
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
#include <asm/pgtable-bits.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
......
......@@ -9,7 +9,6 @@
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/regdef.h>
#include <asm/page.h>
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
#include <asm/addrspace.h>
......@@ -50,7 +49,7 @@ process_entry:
and s3, s2, 0x8
beq s3, zero, process_entry
and s2, s2, ~0x8
li s6, (1 << PAGE_SHIFT) / SZREG
li s6, (1 << _PAGE_SHIFT) / SZREG
copy_word:
/* copy page word by word */
......
......@@ -583,6 +583,7 @@ einval: li v0, -ENOSYS
sys sys_process_vm_readv 6 /* 4345 */
sys sys_process_vm_writev 6
sys sys_kcmp 5
sys sys_finit_module 3
.endm
/* We pre-compute the number of _instruction_ bytes needed to
......
......@@ -422,4 +422,5 @@ sys_call_table:
PTR sys_process_vm_readv
PTR sys_process_vm_writev /* 5305 */
PTR sys_kcmp
PTR sys_finit_module
.size sys_call_table,.-sys_call_table
......@@ -416,4 +416,5 @@ EXPORT(sysn32_call_table)
PTR compat_sys_process_vm_readv
PTR compat_sys_process_vm_writev /* 6310 */
PTR sys_kcmp
PTR sys_finit_module
.size sysn32_call_table,.-sysn32_call_table
......@@ -540,4 +540,5 @@ sys_call_table:
PTR compat_sys_process_vm_readv /* 4345 */
PTR compat_sys_process_vm_writev
PTR sys_kcmp
PTR sys_finit_module
.size sys_call_table,.-sys_call_table
#include <asm/asm-offsets.h>
#include <asm/page.h>
#include <asm/thread_info.h>
#define PAGE_SIZE _PAGE_SIZE
/*
* Put .bss..swapper_pg_dir as the first thing in .bss. This will
* ensure that it has .bss alignment (64K).
......
......@@ -190,3 +190,9 @@ void __iounmap(const volatile void __iomem *addr)
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(__iounmap);
int __virt_addr_valid(const volatile void *kaddr)
{
return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
}
EXPORT_SYMBOL_GPL(__virt_addr_valid);
......@@ -7,7 +7,6 @@
* Copyright (C) 1999 Silicon Graphics, Inc.
*/
#include <asm/mipsregs.h>
#include <asm/page.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
......
......@@ -976,13 +976,6 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
#endif
uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
if (cpu_has_mips_r2) {
uasm_i_ext(p, tmp, tmp, PGDIR_SHIFT, (32 - PGDIR_SHIFT));
uasm_i_ins(p, ptr, tmp, PGD_T_LOG2, (32 - PGDIR_SHIFT));
return;
}
uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
......@@ -1018,15 +1011,6 @@ static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
{
if (cpu_has_mips_r2) {
/* PTE ptr offset is obtained from BadVAddr */
UASM_i_MFC0(p, tmp, C0_BADVADDR);
UASM_i_LW(p, ptr, 0, ptr);
uasm_i_ext(p, tmp, tmp, PAGE_SHIFT+1, PGDIR_SHIFT-PAGE_SHIFT-1);
uasm_i_ins(p, ptr, tmp, PTE_T_LOG2+1, PGDIR_SHIFT-PAGE_SHIFT-1);
return;
}
/*
* Bug workaround for the Nevada. It seems as if under certain
* circumstances the move from cp0_context might produce a
......
......@@ -8,7 +8,6 @@
* Wu Zhangjin <wuzhangjin@gmail.com>
*/
#include <asm/asm-offsets.h>
#include <asm/page.h>
#include <asm/regdef.h>
#include <asm/asm.h>
......@@ -35,7 +34,7 @@ LEAF(swsusp_arch_resume)
0:
PTR_L t1, PBE_ADDRESS(t0) /* source */
PTR_L t2, PBE_ORIG_ADDRESS(t0) /* destination */
PTR_ADDU t3, t1, PAGE_SIZE
PTR_ADDU t3, t1, _PAGE_SIZE
1:
REG_L t8, (t1)
REG_S t8, (t2)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment