Commit 6fd4ce88 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/upstream-linus

* 'upstream' of git://git.linux-mips.org/pub/scm/upstream-linus: (31 commits)
  MIPS: Close races in TLB modify handlers.
  MIPS: Add uasm UASM_i_SRL_SAFE macro.
  MIPS: RB532: Use hex_to_bin()
  MIPS: Enable cpu_has_clo_clz for MIPS Technologies' platforms
  MIPS: PowerTV: Provide cpu-feature-overrides.h
  MIPS: Remove pointless return statement from empty void functions.
  MIPS: Limit fixrange_init() to the FIXMAP region
  MIPS: Install handlers for software IRQs
  MIPS: Move FIXADDR_TOP into spaces.h
  MIPS: Add SYNC after cacheflush
  MIPS: pfn_valid() is broken on low memory HIGHMEM systems
  MIPS: HIGHMEM DMA on noncoherent MIPS32 processors
  MIPS: topdown mmap support
  MIPS: Remove redundant addr_limit assignment on exec.
  MIPS: AR7: Replace __attribute__((__packed__)) with __packed
  MIPS: AR7: Remove 'space before tabs' in platform.c
  MIPS: Lantiq: Add missing clk_enable and clk_disable functions.
  MIPS: AR7: Fix trailing semicolon bug in clock.c
  MAINTAINERS: Update MIPS entry.
  MIPS: BCM63xx: Remove duplicate PERF_IRQSTAT_REG definition
  ...
parents ba5b56cb bf28607f
......@@ -4217,9 +4217,10 @@ F: drivers/usb/image/microtek.*
MIPS
M: Ralf Baechle <ralf@linux-mips.org>
W: http://www.linux-mips.org/
L: linux-mips@linux-mips.org
W: http://www.linux-mips.org/
T: git git://git.linux-mips.org/pub/scm/linux.git
Q: http://patchwork.linux-mips.org/project/linux-mips/list/
S: Supported
F: Documentation/mips/
F: arch/mips/
......
......@@ -16,6 +16,7 @@ platforms += lasat
platforms += loongson
platforms += mipssim
platforms += mti-malta
platforms += netlogic
platforms += pmc-sierra
platforms += pnx833x
platforms += pnx8550
......
......@@ -191,18 +191,6 @@ endif
#
include $(srctree)/arch/mips/Kbuild.platforms
#
# NETLOGIC SOC Common (common)
#
cflags-$(CONFIG_NLM_COMMON) += -I$(srctree)/arch/mips/include/asm/mach-netlogic
cflags-$(CONFIG_NLM_COMMON) += -I$(srctree)/arch/mips/include/asm/netlogic
#
# NETLOGIC XLR/XLS SoC, Simulator and boards
#
core-$(CONFIG_NLM_XLR) += arch/mips/netlogic/xlr/
load-$(CONFIG_NLM_XLR_BOARD) += 0xffffffff84000000
cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic
drivers-$(CONFIG_PCI) += arch/mips/pci/
......
......@@ -443,7 +443,7 @@ struct clk *clk_get(struct device *dev, const char *id)
return &vbus_clk;
if (!strcmp(id, "cpu"))
return &cpu_clk;
if (!strcmp(id, "dsp"));
if (!strcmp(id, "dsp"))
return &dsp_clk;
if (!strcmp(id, "vbus"))
return &vbus_clk;
......
......@@ -77,7 +77,7 @@ struct psp_env_chunk {
u16 csum;
u8 len;
char data[11];
} __attribute__ ((packed));
} __packed;
struct psp_var_map_entry {
u8 num;
......
......@@ -14,6 +14,7 @@
#define _ASM_FIXMAP_H
#include <asm/page.h>
#include <spaces.h>
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
#include <asm/kmap_types.h>
......@@ -67,15 +68,6 @@ enum fixed_addresses {
* the start of the fixmap, and leave one page empty
* at the top of mem..
*/
#ifdef CONFIG_BCM63XX
#define FIXADDR_TOP ((unsigned long)(long)(int)0xff000000)
#else
#if defined(CONFIG_CPU_TX39XX) || defined(CONFIG_CPU_TX49XX)
#define FIXADDR_TOP ((unsigned long)(long)(int)(0xff000000 - 0x20000))
#else
#define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000)
#endif
#endif
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
......
......@@ -21,8 +21,6 @@
#ifndef _ASM_GT64120_H
#define _ASM_GT64120_H
#include <linux/clocksource.h>
#include <asm/addrspace.h>
#include <asm/byteorder.h>
......
......@@ -18,7 +18,6 @@
static inline void irq_dispose_mapping(unsigned int virq)
{
return;
}
#ifdef CONFIG_I8259
......
......@@ -89,7 +89,6 @@
/* Interrupt Mask register */
#define PERF_IRQMASK_REG 0xc
#define PERF_IRQSTAT_REG 0x10
/* Interrupt Status register */
#define PERF_IRQSTAT_REG 0x10
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle
* Copyright (C) 2000, 2002 Maciej W. Rozycki
* Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc.
*/
#ifndef _ASM_BCM63XX_SPACES_H
#define _ASM_BCM63XX_SPACES_H
#define FIXADDR_TOP ((unsigned long)(long)(int)0xff000000)
#include <asm/mach-generic/spaces.h>
#endif /* __ASM_BCM63XX_SPACES_H */
......@@ -49,7 +49,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
static inline void plat_extra_sync_for_device(struct device *dev)
{
return;
}
static inline int plat_dma_mapping_error(struct device *dev,
......
......@@ -82,4 +82,8 @@
#define PAGE_OFFSET (CAC_BASE + PHYS_OFFSET)
#endif
#ifndef FIXADDR_TOP
#define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000)
#endif
#endif /* __ASM_MACH_GENERIC_SPACES_H */
......@@ -60,7 +60,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
static inline void plat_extra_sync_for_device(struct device *dev)
{
return;
}
static inline int plat_dma_mapping_error(struct device *dev,
......
......@@ -50,7 +50,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
static inline void plat_extra_sync_for_device(struct device *dev)
{
return;
}
static inline int plat_dma_mapping_error(struct device *dev,
......
......@@ -55,7 +55,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
static inline void plat_extra_sync_for_device(struct device *dev)
{
return;
}
static inline int plat_dma_mapping_error(struct device *dev,
......
......@@ -32,6 +32,7 @@
/* #define cpu_has_vtag_icache ? */
/* #define cpu_has_dc_aliases ? */
/* #define cpu_has_ic_fills_f_dc ? */
#define cpu_has_clo_clz 1
#define cpu_has_nofpuex 0
/* #define cpu_has_64bits ? */
/* #define cpu_has_64bit_zero_reg ? */
......@@ -58,6 +59,7 @@
/* #define cpu_has_vtag_icache ? */
/* #define cpu_has_dc_aliases ? */
/* #define cpu_has_ic_fills_f_dc ? */
#define cpu_has_clo_clz 1
#define cpu_has_nofpuex 0
/* #define cpu_has_64bits ? */
/* #define cpu_has_64bit_zero_reg ? */
......
......@@ -31,6 +31,7 @@
/* #define cpu_has_vtag_icache ? */
/* #define cpu_has_dc_aliases ? */
/* #define cpu_has_ic_fills_f_dc ? */
#define cpu_has_clo_clz 1
#define cpu_has_nofpuex 0
/* #define cpu_has_64bits ? */
/* #define cpu_has_64bit_zero_reg ? */
......@@ -56,6 +57,7 @@
/* #define cpu_has_vtag_icache ? */
/* #define cpu_has_dc_aliases ? */
/* #define cpu_has_ic_fills_f_dc ? */
#define cpu_has_clo_clz 1
#define cpu_has_nofpuex 0
/* #define cpu_has_64bits ? */
/* #define cpu_has_64bit_zero_reg ? */
......
/*
* Copyright (C) 2010 Cisco Systems, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _ASM_MACH_POWERTV_CPU_FEATURE_OVERRIDES_H_
#define _ASM_MACH_POWERTV_CPU_FEATURE_OVERRIDES_H_
#define cpu_has_tlb 1
#define cpu_has_4kex 1
#define cpu_has_3k_cache 0
#define cpu_has_4k_cache 1
#define cpu_has_tx39_cache 0
#define cpu_has_fpu 0
#define cpu_has_counter 1
#define cpu_has_watch 1
#define cpu_has_divec 1
#define cpu_has_vce 0
#define cpu_has_cache_cdex_p 0
#define cpu_has_cache_cdex_s 0
#define cpu_has_mcheck 1
#define cpu_has_ejtag 1
#define cpu_has_llsc 1
#define cpu_has_mips16 0
#define cpu_has_mdmx 0
#define cpu_has_mips3d 0
#define cpu_has_smartmips 0
#define cpu_has_vtag_icache 0
#define cpu_has_dc_aliases 0
#define cpu_has_ic_fills_f_dc 0
#define cpu_has_mips32r1 0
#define cpu_has_mips32r2 1
#define cpu_has_mips64r1 0
#define cpu_has_mips64r2 0
#define cpu_has_dsp 0
#define cpu_has_mipsmt 0
#define cpu_has_userlocal 0
#define cpu_has_nofpuex 0
#define cpu_has_64bits 0
#define cpu_has_64bit_zero_reg 0
#define cpu_has_vint 1
#define cpu_has_veic 1
#define cpu_has_inclusive_pcaches 0
#define cpu_dcache_line_size() 32
#define cpu_icache_line_size() 32
#endif
......@@ -102,7 +102,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
static inline void plat_extra_sync_for_device(struct device *dev)
{
return;
}
static inline int plat_dma_mapping_error(struct device *dev,
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle
* Copyright (C) 2000, 2002 Maciej W. Rozycki
* Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc.
*/
#ifndef _ASM_TX39XX_SPACES_H
#define _ASM_TX39XX_SPACES_H
#define FIXADDR_TOP ((unsigned long)(long)(int)0xfefe0000)
#include <asm/mach-generic/spaces.h>
#endif /* __ASM_TX39XX_SPACES_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle
* Copyright (C) 2000, 2002 Maciej W. Rozycki
* Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc.
*/
#ifndef _ASM_TX49XX_SPACES_H
#define _ASM_TX49XX_SPACES_H
#define FIXADDR_TOP ((unsigned long)(long)(int)0xfefe0000)
#include <asm/mach-generic/spaces.h>
#endif /* __ASM_TX49XX_SPACES_H */
......@@ -414,6 +414,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
* constraints placed on us by the cache architecture.
*/
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
/*
* No page table caches to initialise
......
......@@ -11,6 +11,8 @@
#ifndef __ASM_SMP_OPS_H
#define __ASM_SMP_OPS_H
#include <linux/errno.h>
#ifdef CONFIG_SMP
#include <linux/cpumask.h>
......@@ -56,8 +58,43 @@ static inline void register_smp_ops(struct plat_smp_ops *ops)
#endif /* !CONFIG_SMP */
extern struct plat_smp_ops up_smp_ops;
extern struct plat_smp_ops cmp_smp_ops;
extern struct plat_smp_ops vsmp_smp_ops;
static inline int register_up_smp_ops(void)
{
#ifdef CONFIG_SMP_UP
extern struct plat_smp_ops up_smp_ops;
register_smp_ops(&up_smp_ops);
return 0;
#else
return -ENODEV;
#endif
}
static inline int register_cmp_smp_ops(void)
{
#ifdef CONFIG_MIPS_CMP
extern struct plat_smp_ops cmp_smp_ops;
register_smp_ops(&cmp_smp_ops);
return 0;
#else
return -ENODEV;
#endif
}
static inline int register_vsmp_smp_ops(void)
{
#ifdef CONFIG_MIPS_MT_SMP
extern struct plat_smp_ops vsmp_smp_ops;
register_smp_ops(&vsmp_smp_ops);
return 0;
#else
return -ENODEV;
#endif
}
#endif /* __ASM_SMP_OPS_H */
......@@ -46,6 +46,7 @@ extern void smtc_prepare_cpus(int cpus);
extern void smtc_smp_finish(void);
extern void smtc_boot_secondary(int cpu, struct task_struct *t);
extern void smtc_cpus_done(void);
extern void smtc_init_secondary(void);
/*
......
......@@ -150,6 +150,7 @@ static inline void __uasminit uasm_l##lb(struct uasm_label **lab, u32 *addr) \
# define UASM_i_SLL(buf, rs, rt, sh) uasm_i_dsll(buf, rs, rt, sh)
# define UASM_i_SRA(buf, rs, rt, sh) uasm_i_dsra(buf, rs, rt, sh)
# define UASM_i_SRL(buf, rs, rt, sh) uasm_i_dsrl(buf, rs, rt, sh)
# define UASM_i_SRL_SAFE(buf, rs, rt, sh) uasm_i_dsrl_safe(buf, rs, rt, sh)
# define UASM_i_ROTR(buf, rs, rt, sh) uasm_i_drotr(buf, rs, rt, sh)
# define UASM_i_MFC0(buf, rt, rd...) uasm_i_dmfc0(buf, rt, rd)
# define UASM_i_MTC0(buf, rt, rd...) uasm_i_dmtc0(buf, rt, rd)
......@@ -165,6 +166,7 @@ static inline void __uasminit uasm_l##lb(struct uasm_label **lab, u32 *addr) \
# define UASM_i_SLL(buf, rs, rt, sh) uasm_i_sll(buf, rs, rt, sh)
# define UASM_i_SRA(buf, rs, rt, sh) uasm_i_sra(buf, rs, rt, sh)
# define UASM_i_SRL(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh)
# define UASM_i_SRL_SAFE(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh)
# define UASM_i_ROTR(buf, rs, rt, sh) uasm_i_rotr(buf, rs, rt, sh)
# define UASM_i_MFC0(buf, rt, rd...) uasm_i_mfc0(buf, rt, rd)
# define UASM_i_MTC0(buf, rt, rd...) uasm_i_mtc0(buf, rt, rd)
......
......@@ -363,17 +363,18 @@
#define __NR_open_by_handle_at (__NR_Linux + 340)
#define __NR_clock_adjtime (__NR_Linux + 341)
#define __NR_syncfs (__NR_Linux + 342)
#define __NR_setns (__NR_Linux + 343)
#define __NR_sendmmsg (__NR_Linux + 343)
#define __NR_setns (__NR_Linux + 344)
/*
* Offset of the last Linux o32 flavoured syscall
*/
#define __NR_Linux_syscalls 343
#define __NR_Linux_syscalls 344
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
#define __NR_O32_Linux_syscalls 343
#define __NR_O32_Linux_syscalls 344
#if _MIPS_SIM == _MIPS_SIM_ABI64
......@@ -683,17 +684,18 @@
#define __NR_open_by_handle_at (__NR_Linux + 299)
#define __NR_clock_adjtime (__NR_Linux + 300)
#define __NR_syncfs (__NR_Linux + 301)
#define __NR_setns (__NR_Linux + 302)
#define __NR_sendmmsg (__NR_Linux + 302)
#define __NR_setns (__NR_Linux + 303)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
#define __NR_Linux_syscalls 302
#define __NR_Linux_syscalls 303
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
#define __NR_64_Linux_syscalls 302
#define __NR_64_Linux_syscalls 303
#if _MIPS_SIM == _MIPS_SIM_NABI32
......@@ -1008,17 +1010,18 @@
#define __NR_open_by_handle_at (__NR_Linux + 304)
#define __NR_clock_adjtime (__NR_Linux + 305)
#define __NR_syncfs (__NR_Linux + 306)
#define __NR_setns (__NR_Linux + 307)
#define __NR_sendmmsg (__NR_Linux + 307)
#define __NR_setns (__NR_Linux + 308)
/*
* Offset of the last N32 flavoured syscall
*/
#define __NR_Linux_syscalls 307
#define __NR_Linux_syscalls 308
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
#define __NR_N32_Linux_syscalls 307
#define __NR_N32_Linux_syscalls 308
#ifdef __KERNEL__
......
......@@ -71,7 +71,6 @@ void r4k_wait_irqoff(void)
local_irq_enable();
__asm__(" .globl __pastwait \n"
"__pastwait: \n");
return;
}
/*
......
......@@ -103,13 +103,11 @@ void __init mips_cpu_irq_init(void)
clear_c0_status(ST0_IM);
clear_c0_cause(CAUSEF_IP);
/*
* Only MT is using the software interrupts currently, so we just
* leave them uninitialized for other processors.
*/
if (cpu_has_mipsmt)
/* Software interrupts are used for MT/CMT IPI */
for (i = irq_base; i < irq_base + 2; i++)
irq_set_chip_and_handler(i, &mips_mt_cpu_irq_controller,
irq_set_chip_and_handler(i, cpu_has_mipsmt ?
&mips_mt_cpu_irq_controller :
&mips_cpu_irq_controller,
handle_percpu_irq);
for (i = irq_base + 2; i < irq_base + 8; i++)
......
......@@ -192,8 +192,6 @@ static void mipspmu_event_update(struct perf_event *event,
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
return;
}
static void mipspmu_start(struct perf_event *event, int flags)
......
......@@ -103,7 +103,6 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
__init_dsp();
regs->cp0_epc = pc;
regs->regs[29] = sp;
current_thread_info()->addr_limit = USER_DS;
}
void exit_thread(void)
......
......@@ -589,6 +589,7 @@ einval: li v0, -ENOSYS
sys sys_open_by_handle_at 3 /* 4340 */
sys sys_clock_adjtime 2
sys sys_syncfs 1
sys sys_sendmmsg 4
sys sys_setns 2
.endm
......
......@@ -428,5 +428,6 @@ sys_call_table:
PTR sys_open_by_handle_at
PTR sys_clock_adjtime /* 5300 */
PTR sys_syncfs
PTR sys_sendmmsg
PTR sys_setns
.size sys_call_table,.-sys_call_table
......@@ -428,5 +428,6 @@ EXPORT(sysn32_call_table)
PTR sys_open_by_handle_at
PTR compat_sys_clock_adjtime /* 6305 */
PTR sys_syncfs
PTR compat_sys_sendmmsg
PTR sys_setns
.size sysn32_call_table,.-sysn32_call_table
......@@ -546,5 +546,6 @@ sys_call_table:
PTR compat_sys_open_by_handle_at /* 4340 */
PTR compat_sys_clock_adjtime
PTR sys_syncfs
PTR compat_sys_sendmmsg
PTR sys_setns
.size sys_call_table,.-sys_call_table
......@@ -100,6 +100,19 @@ void clk_put(struct clk *clk)
}
EXPORT_SYMBOL(clk_put);
int clk_enable(struct clk *clk)
{
/* not used */
return 0;
}
EXPORT_SYMBOL(clk_enable);
void clk_disable(struct clk *clk)
{
/* not used */
}
EXPORT_SYMBOL(clk_disable);
static inline u32 ltq_get_counter_resolution(void)
{
u32 res;
......
......@@ -45,8 +45,6 @@ void ec_write(unsigned short addr, unsigned char val)
/* flush the write action */
inb(EC_IO_PORT_DATA);
spin_unlock_irqrestore(&index_access_lock, flags);
return;
}
EXPORT_SYMBOL_GPL(ec_write);
......
......@@ -34,6 +34,7 @@
#include <asm/time.h>
#include <asm/mips-boards/sim.h>
#include <asm/mips-boards/simint.h>
#include <asm/smp-ops.h>
static void __init serial_init(void);
......@@ -59,18 +60,17 @@ void __init prom_init(void)
prom_meminit();
#ifdef CONFIG_MIPS_MT_SMP
if (cpu_has_mipsmt)
register_smp_ops(&vsmp_smp_ops);
else
register_smp_ops(&up_smp_ops);
#endif
if (cpu_has_mipsmt) {
if (!register_vsmp_smp_ops())
return;
#ifdef CONFIG_MIPS_MT_SMTC
if (cpu_has_mipsmt)
register_smp_ops(&ssmtc_smp_ops);
else
register_smp_ops(&up_smp_ops);
return;
#endif
}
register_up_smp_ops();
}
static void __init serial_init(void)
......
......@@ -604,6 +604,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
r4k_blast_scache();
else
blast_scache_range(addr, addr + size);
__sync();
return;
}
......@@ -620,6 +621,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
}
bc_wback_inv(addr, size);
__sync();
}
static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
......@@ -647,6 +649,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
(addr + size - 1) & almask);
blast_inv_scache_range(addr, addr + size);
}
__sync();
return;
}
......@@ -663,6 +666,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
}
bc_inv(addr, size);
__sync();
}
#endif /* CONFIG_DMA_NONCOHERENT */
......
......@@ -15,18 +15,18 @@
#include <linux/scatterlist.h>
#include <linux/string.h>
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <asm/cache.h>
#include <asm/io.h>
#include <dma-coherence.h>
static inline unsigned long dma_addr_to_virt(struct device *dev,
static inline struct page *dma_addr_to_page(struct device *dev,
dma_addr_t dma_addr)
{
unsigned long addr = plat_dma_addr_to_phys(dev, dma_addr);
return (unsigned long)phys_to_virt(addr);
return pfn_to_page(
plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
}
/*
......@@ -148,20 +148,20 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
free_pages(addr, get_order(size));
}
static inline void __dma_sync(unsigned long addr, size_t size,
static inline void __dma_sync_virtual(void *addr, size_t size,
enum dma_data_direction direction)
{
switch (direction) {
case DMA_TO_DEVICE:
dma_cache_wback(addr, size);
dma_cache_wback((unsigned long)addr, size);
break;
case DMA_FROM_DEVICE:
dma_cache_inv(addr, size);
dma_cache_inv((unsigned long)addr, size);
break;
case DMA_BIDIRECTIONAL:
dma_cache_wback_inv(addr, size);
dma_cache_wback_inv((unsigned long)addr, size);
break;
default:
......@@ -169,12 +169,49 @@ static inline void __dma_sync(unsigned long addr, size_t size,
}
}
/*
* A single sg entry may refer to multiple physically contiguous
* pages. But we still need to process highmem pages individually.
* If highmem is not configured then the bulk of this loop gets
* optimized out.
*/
static inline void __dma_sync(struct page *page,
unsigned long offset, size_t size, enum dma_data_direction direction)
{
size_t left = size;
do {
size_t len = left;
if (PageHighMem(page)) {
void *addr;
if (offset + len > PAGE_SIZE) {
if (offset >= PAGE_SIZE) {
page += offset >> PAGE_SHIFT;
offset &= ~PAGE_MASK;
}
len = PAGE_SIZE - offset;
}
addr = kmap_atomic(page);
__dma_sync_virtual(addr + offset, len, direction);
kunmap_atomic(addr);
} else
__dma_sync_virtual(page_address(page) + offset,
size, direction);
offset = 0;
page++;
left -= len;
} while (left);
}
static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
{
if (cpu_is_noncoherent_r10000(dev))
__dma_sync(dma_addr_to_virt(dev, dma_addr), size,
direction);
__dma_sync(dma_addr_to_page(dev, dma_addr),
dma_addr & ~PAGE_MASK, size, direction);
plat_unmap_dma_mem(dev, dma_addr, size, direction);
}
......@@ -185,13 +222,11 @@ static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg,
int i;
for (i = 0; i < nents; i++, sg++) {
unsigned long addr;
addr = (unsigned long) sg_virt(sg);
if (!plat_device_is_coherent(dev) && addr)
__dma_sync(addr, sg->length, direction);
sg->dma_address = plat_map_dma_mem(dev,
(void *)addr, sg->length);
if (!plat_device_is_coherent(dev))
__dma_sync(sg_page(sg), sg->offset, sg->length,
direction);
sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
sg->offset;
}
return nents;
......@@ -201,30 +236,23 @@ static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
unsigned long addr;
addr = (unsigned long) page_address(page) + offset;
if (!plat_device_is_coherent(dev))
__dma_sync(addr, size, direction);
__dma_sync(page, offset, size, direction);
return plat_map_dma_mem(dev, (void *)addr, size);
return plat_map_dma_mem_page(dev, page) + offset;
}
static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nhwentries, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
unsigned long addr;
int i;
for (i = 0; i < nhwentries; i++, sg++) {
if (!plat_device_is_coherent(dev) &&
direction != DMA_TO_DEVICE) {
addr = (unsigned long) sg_virt(sg);
if (addr)
__dma_sync(addr, sg->length, direction);
}
direction != DMA_TO_DEVICE)
__dma_sync(sg_page(sg), sg->offset, sg->length,
direction);
plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
}
}
......@@ -232,24 +260,18 @@ static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
static void mips_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
{
if (cpu_is_noncoherent_r10000(dev)) {
unsigned long addr;
addr = dma_addr_to_virt(dev, dma_handle);
__dma_sync(addr, size, direction);
}
if (cpu_is_noncoherent_r10000(dev))
__dma_sync(dma_addr_to_page(dev, dma_handle),
dma_handle & ~PAGE_MASK, size, direction);
}
static void mips_dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
{
plat_extra_sync_for_device(dev);
if (!plat_device_is_coherent(dev)) {
unsigned long addr;
addr = dma_addr_to_virt(dev, dma_handle);
__dma_sync(addr, size, direction);
}
if (!plat_device_is_coherent(dev))
__dma_sync(dma_addr_to_page(dev, dma_handle),
dma_handle & ~PAGE_MASK, size, direction);
}
static void mips_dma_sync_sg_for_cpu(struct device *dev,
......@@ -260,8 +282,8 @@ static void mips_dma_sync_sg_for_cpu(struct device *dev,
/* Make sure that gcc doesn't leave the empty loop body. */
for (i = 0; i < nelems; i++, sg++) {
if (cpu_is_noncoherent_r10000(dev))
__dma_sync((unsigned long)page_address(sg_page(sg)),
sg->length, direction);
__dma_sync(sg_page(sg), sg->offset, sg->length,
direction);
}
}
......@@ -273,8 +295,8 @@ static void mips_dma_sync_sg_for_device(struct device *dev,
/* Make sure that gcc doesn't leave the empty loop body. */
for (i = 0; i < nelems; i++, sg++) {
if (!plat_device_is_coherent(dev))
__dma_sync((unsigned long)page_address(sg_page(sg)),
sg->length, direction);
__dma_sync(sg_page(sg), sg->offset, sg->length,
direction);
}
}
......@@ -295,7 +317,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
plat_extra_sync_for_device(dev);
if (!plat_device_is_coherent(dev))
__dma_sync((unsigned long)vaddr, size, direction);
__dma_sync_virtual(vaddr, size, direction);
}
EXPORT_SYMBOL(dma_cache_sync);
......
......@@ -277,11 +277,11 @@ void __init fixrange_init(unsigned long start, unsigned long end,
k = __pmd_offset(vaddr);
pgd = pgd_base + i;
for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
pud = (pud_t *)pgd;
for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
pmd = (pmd_t *)pud;
for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
if (pmd_none(*pmd)) {
pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
set_pmd(pmd, __pmd((unsigned long)pte));
......@@ -368,7 +368,7 @@ void __init mem_init(void)
#ifdef CONFIG_DISCONTIGMEM
#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
#endif
max_mapnr = highend_pfn;
max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
#else
max_mapnr = max_low_pfn;
#endif
......
......@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/module.h>
#include <linux/personality.h>
#include <linux/random.h>
#include <linux/sched.h>
......@@ -17,21 +18,65 @@ unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
EXPORT_SYMBOL(shm_align_mask);
/* gap between mmap and stack */
#define MIN_GAP (128*1024*1024UL)
#define MAX_GAP ((TASK_SIZE)/6*5)
static int mmap_is_legacy(void)
{
if (current->personality & ADDR_COMPAT_LAYOUT)
return 1;
if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
return 1;
return sysctl_legacy_va_layout;
}
static unsigned long mmap_base(unsigned long rnd)
{
unsigned long gap = rlimit(RLIMIT_STACK);
if (gap < MIN_GAP)
gap = MIN_GAP;
else if (gap > MAX_GAP)
gap = MAX_GAP;
return PAGE_ALIGN(TASK_SIZE - gap - rnd);
}
static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
unsigned long pgoff)
{
unsigned long base = addr & ~shm_align_mask;
unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
if (base + off <= addr)
return base + off;
return base - off;
}
#define COLOUR_ALIGN(addr,pgoff) \
((((addr) + shm_align_mask) & ~shm_align_mask) + \
(((pgoff) << PAGE_SHIFT) & shm_align_mask))
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
enum mmap_allocation_direction {UP, DOWN};
static unsigned long arch_get_unmapped_area_foo(struct file *filp,
unsigned long addr0, unsigned long len, unsigned long pgoff,
unsigned long flags, enum mmap_allocation_direction dir)
{
struct vm_area_struct * vmm;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long addr = addr0;
int do_color_align;
if (len > TASK_SIZE)
if (unlikely(len > TASK_SIZE))
return -ENOMEM;
if (flags & MAP_FIXED) {
/* Even MAP_FIXED mappings must reside within TASK_SIZE. */
/* Even MAP_FIXED mappings must reside within TASK_SIZE */
if (TASK_SIZE - len < addr)
return -EINVAL;
......@@ -48,32 +93,128 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
/* requesting a specific address */
if (addr) {
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
vmm = find_vma(current->mm, addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vmm || addr + len <= vmm->vm_start))
(!vma || addr + len <= vma->vm_start))
return addr;
}
addr = current->mm->mmap_base;
if (dir == UP) {
addr = mm->mmap_base;
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
/* At this point: (!vmm || addr < vmm->vm_end). */
for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */
if (TASK_SIZE - len < addr)
return -ENOMEM;
if (!vmm || addr + len <= vmm->vm_start)
if (!vma || addr + len <= vma->vm_start)
return addr;
addr = vmm->vm_end;
addr = vma->vm_end;
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
}
} else {
/* check if free_area_cache is useful for us */
if (len <= mm->cached_hole_size) {
mm->cached_hole_size = 0;
mm->free_area_cache = mm->mmap_base;
}
/* either no address requested or can't fit in requested address hole */
addr = mm->free_area_cache;
if (do_color_align) {
unsigned long base =
COLOUR_ALIGN_DOWN(addr - len, pgoff);
addr = base + len;
}
/* make sure it can fit in the remaining address space */
if (likely(addr > len)) {
vma = find_vma(mm, addr - len);
if (!vma || addr <= vma->vm_start) {
/* remember the address as a hint for next time */
return mm->free_area_cache = addr-len;
}
}
if (unlikely(mm->mmap_base < len))
goto bottomup;
addr = mm->mmap_base-len;
if (do_color_align)
addr = COLOUR_ALIGN_DOWN(addr, pgoff);
do {
/*
* Lookup failure means no vma is above this address,
* else if new region fits below vma->vm_start,
* return with success:
*/
vma = find_vma(mm, addr);
if (likely(!vma || addr+len <= vma->vm_start)) {
/* remember the address as a hint for next time */
return mm->free_area_cache = addr;
}
/* remember the largest hole we saw so far */
if (addr + mm->cached_hole_size < vma->vm_start)
mm->cached_hole_size = vma->vm_start - addr;
/* try just below the current vma->vm_start */
addr = vma->vm_start-len;
if (do_color_align)
addr = COLOUR_ALIGN_DOWN(addr, pgoff);
} while (likely(len < vma->vm_start));
bottomup:
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
mm->cached_hole_size = ~0UL;
mm->free_area_cache = TASK_UNMAPPED_BASE;
addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
/*
* Restore the topdown base:
*/
mm->free_area_cache = mm->mmap_base;
mm->cached_hole_size = ~0UL;
return addr;
}
}
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
return arch_get_unmapped_area_foo(filp,
addr0, len, pgoff, flags, UP);
}
/*
* There is no need to export this but sched.h declares the function as
* extern so making it static here results in an error.
*/
unsigned long arch_get_unmapped_area_topdown(struct file *filp,
unsigned long addr0, unsigned long len, unsigned long pgoff,
unsigned long flags)
{
return arch_get_unmapped_area_foo(filp,
addr0, len, pgoff, flags, DOWN);
}
void arch_pick_mmap_layout(struct mm_struct *mm)
......@@ -89,9 +230,15 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
random_factor &= 0xffffffful;
}
if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
mm->get_unmapped_area = arch_get_unmapped_area;
mm->unmap_area = arch_unmap_area;
} else {
mm->mmap_base = mmap_base(random_factor);
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
mm->unmap_area = arch_unmap_area_topdown;
}
}
static inline unsigned long brk_rnd(void)
......
......@@ -52,7 +52,7 @@ void __init pagetable_init(void)
* Fixed mappings:
*/
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
fixrange_init(vaddr, 0, pgd_base);
fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base);
#ifdef CONFIG_HIGHMEM
/*
......
......@@ -76,5 +76,5 @@ void __init pagetable_init(void)
* Fixed mappings:
*/
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
fixrange_init(vaddr, 0, pgd_base);
fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base);
}
......@@ -42,6 +42,18 @@
extern void tlb_do_page_fault_0(void);
extern void tlb_do_page_fault_1(void);
struct work_registers {
int r1;
int r2;
int r3;
};
struct tlb_reg_save {
unsigned long a;
unsigned long b;
} ____cacheline_aligned_in_smp;
static struct tlb_reg_save handler_reg_save[NR_CPUS];
static inline int r45k_bvahwbug(void)
{
......@@ -248,6 +260,73 @@ static int scratch_reg __cpuinitdata;
static int pgd_reg __cpuinitdata;
enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
static struct work_registers __cpuinit build_get_work_registers(u32 **p)
{
struct work_registers r;
int smp_processor_id_reg;
int smp_processor_id_sel;
int smp_processor_id_shift;
if (scratch_reg > 0) {
/* Save in CPU local C0_KScratch? */
UASM_i_MTC0(p, 1, 31, scratch_reg);
r.r1 = K0;
r.r2 = K1;
r.r3 = 1;
return r;
}
if (num_possible_cpus() > 1) {
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
smp_processor_id_shift = 51;
smp_processor_id_reg = 20; /* XContext */
smp_processor_id_sel = 0;
#else
# ifdef CONFIG_32BIT
smp_processor_id_shift = 25;
smp_processor_id_reg = 4; /* Context */
smp_processor_id_sel = 0;
# endif
# ifdef CONFIG_64BIT
smp_processor_id_shift = 26;
smp_processor_id_reg = 4; /* Context */
smp_processor_id_sel = 0;
# endif
#endif
/* Get smp_processor_id */
UASM_i_MFC0(p, K0, smp_processor_id_reg, smp_processor_id_sel);
UASM_i_SRL_SAFE(p, K0, K0, smp_processor_id_shift);
/* handler_reg_save index in K0 */
UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save)));
UASM_i_LA(p, K1, (long)&handler_reg_save);
UASM_i_ADDU(p, K0, K0, K1);
} else {
UASM_i_LA(p, K0, (long)&handler_reg_save);
}
/* K0 now points to save area, save $1 and $2 */
UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0);
UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0);
r.r1 = K1;
r.r2 = 1;
r.r3 = 2;
return r;
}
static void __cpuinit build_restore_work_registers(u32 **p)
{
if (scratch_reg > 0) {
UASM_i_MFC0(p, 1, 31, scratch_reg);
return;
}
/* K0 already points to save area, restore $1 and $2 */
UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0);
UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0);
}
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
/*
......@@ -1160,9 +1239,6 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
memset(relocs, 0, sizeof(relocs));
memset(final_handler, 0, sizeof(final_handler));
if (scratch_reg == 0)
scratch_reg = allocate_kscratch();
if ((scratch_reg > 0 || scratchpad_available()) && use_bbit_insns()) {
htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
scratch_reg);
......@@ -1462,21 +1538,27 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
*/
static void __cpuinit
build_pte_present(u32 **p, struct uasm_reloc **r,
unsigned int pte, unsigned int ptr, enum label_id lid)
int pte, int ptr, int scratch, enum label_id lid)
{
int t = scratch >= 0 ? scratch : pte;
if (kernel_uses_smartmips_rixi) {
if (use_bbit_insns()) {
uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
uasm_i_nop(p);
} else {
uasm_i_andi(p, pte, pte, _PAGE_PRESENT);
uasm_il_beqz(p, r, pte, lid);
uasm_i_andi(p, t, pte, _PAGE_PRESENT);
uasm_il_beqz(p, r, t, lid);
if (pte == t)
/* You lose the SMP race :-(*/
iPTE_LW(p, pte, ptr);
}
} else {
uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
uasm_il_bnez(p, r, pte, lid);
uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ);
uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ);
uasm_il_bnez(p, r, t, lid);
if (pte == t)
/* You lose the SMP race :-(*/
iPTE_LW(p, pte, ptr);
}
}
......@@ -1497,19 +1579,19 @@ build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
*/
static void __cpuinit
build_pte_writable(u32 **p, struct uasm_reloc **r,
unsigned int pte, unsigned int ptr, enum label_id lid)
unsigned int pte, unsigned int ptr, int scratch,
enum label_id lid)
{
if (use_bbit_insns()) {
uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
uasm_i_nop(p);
uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
uasm_i_nop(p);
} else {
uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
uasm_il_bnez(p, r, pte, lid);
int t = scratch >= 0 ? scratch : pte;
uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE);
uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE);
uasm_il_bnez(p, r, t, lid);
if (pte == t)
/* You lose the SMP race :-(*/
iPTE_LW(p, pte, ptr);
}
else
uasm_i_nop(p);
}
/* Make PTE writable, update software status bits as well, then store
......@@ -1531,14 +1613,18 @@ build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
*/
static void __cpuinit
build_pte_modifiable(u32 **p, struct uasm_reloc **r,
unsigned int pte, unsigned int ptr, enum label_id lid)
unsigned int pte, unsigned int ptr, int scratch,
enum label_id lid)
{
if (use_bbit_insns()) {
uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
uasm_i_nop(p);
} else {
uasm_i_andi(p, pte, pte, _PAGE_WRITE);
uasm_il_beqz(p, r, pte, lid);
int t = scratch >= 0 ? scratch : pte;
uasm_i_andi(p, t, pte, _PAGE_WRITE);
uasm_il_beqz(p, r, t, lid);
if (pte == t)
/* You lose the SMP race :-(*/
iPTE_LW(p, pte, ptr);
}
}
......@@ -1619,7 +1705,7 @@ static void __cpuinit build_r3000_tlb_load_handler(void)
memset(relocs, 0, sizeof(relocs));
build_r3000_tlbchange_handler_head(&p, K0, K1);
build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl);
uasm_i_nop(&p); /* load delay */
build_make_valid(&p, &r, K0, K1);
build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
......@@ -1649,7 +1735,7 @@ static void __cpuinit build_r3000_tlb_store_handler(void)
memset(relocs, 0, sizeof(relocs));
build_r3000_tlbchange_handler_head(&p, K0, K1);
build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs);
build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs);
uasm_i_nop(&p); /* load delay */
build_make_write(&p, &r, K0, K1);
build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
......@@ -1673,13 +1759,14 @@ static void __cpuinit build_r3000_tlb_modify_handler(void)
u32 *p = handle_tlbm;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
struct work_registers wr;
memset(handle_tlbm, 0, sizeof(handle_tlbm));
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
build_r3000_tlbchange_handler_head(&p, K0, K1);
build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm);
build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
uasm_i_nop(&p); /* load delay */
build_make_write(&p, &r, K0, K1);
build_r3000_pte_reload_tlbwi(&p, K0, K1);
......@@ -1702,15 +1789,16 @@ static void __cpuinit build_r3000_tlb_modify_handler(void)
/*
* R4000 style TLB load/store/modify handlers.
*/
static void __cpuinit
static struct work_registers __cpuinit
build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
struct uasm_reloc **r, unsigned int pte,
unsigned int ptr)
struct uasm_reloc **r)
{
struct work_registers wr = build_get_work_registers(p);
#ifdef CONFIG_64BIT
build_get_pmde64(p, l, r, pte, ptr); /* get pmd in ptr */
build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */
#else
build_get_pgde32(p, pte, ptr); /* get pgd in ptr */
build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */
#endif
#ifdef CONFIG_HUGETLB_PAGE
......@@ -1719,21 +1807,22 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
* instead contains the tlb pte. Check the PAGE_HUGE bit and
* see if we need to jump to huge tlb processing.
*/
build_is_huge_pte(p, r, pte, ptr, label_tlb_huge_update);
build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update);
#endif
UASM_i_MFC0(p, pte, C0_BADVADDR);
UASM_i_LW(p, ptr, 0, ptr);
UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
uasm_i_andi(p, pte, pte, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
UASM_i_ADDU(p, ptr, ptr, pte);
UASM_i_MFC0(p, wr.r1, C0_BADVADDR);
UASM_i_LW(p, wr.r2, 0, wr.r2);
UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1);
#ifdef CONFIG_SMP
uasm_l_smp_pgtable_change(l, *p);
#endif
iPTE_LW(p, pte, ptr); /* get even pte */
iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
if (!m4kc_tlbp_war())
build_tlb_probe_entry(p);
return wr;
}
static void __cpuinit
......@@ -1746,6 +1835,7 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
build_update_entries(p, tmp, ptr);
build_tlb_write_entry(p, l, r, tlb_indexed);
uasm_l_leave(l, *p);
build_restore_work_registers(p);
uasm_i_eret(p); /* return from trap */
#ifdef CONFIG_64BIT
......@@ -1758,6 +1848,7 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
u32 *p = handle_tlbl;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
struct work_registers wr;
memset(handle_tlbl, 0, sizeof(handle_tlbl));
memset(labels, 0, sizeof(labels));
......@@ -1777,8 +1868,8 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
/* No need for uasm_i_nop */
}
build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
if (m4kc_tlbp_war())
build_tlb_probe_entry(&p);
......@@ -1788,44 +1879,43 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
* have triggered it. Skip the expensive test..
*/
if (use_bbit_insns()) {
uasm_il_bbit0(&p, &r, K0, ilog2(_PAGE_VALID),
uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
label_tlbl_goaround1);
} else {
uasm_i_andi(&p, K0, K0, _PAGE_VALID);
uasm_il_beqz(&p, &r, K0, label_tlbl_goaround1);
uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1);
}
uasm_i_nop(&p);
uasm_i_tlbr(&p);
/* Examine entrylo 0 or 1 based on ptr. */
if (use_bbit_insns()) {
uasm_i_bbit0(&p, K1, ilog2(sizeof(pte_t)), 8);
uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
} else {
uasm_i_andi(&p, K0, K1, sizeof(pte_t));
uasm_i_beqz(&p, K0, 8);
uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
uasm_i_beqz(&p, wr.r3, 8);
}
UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/
UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */
/* load it in the delay slot*/
UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
/* load it if ptr is odd */
UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
/*
* If the entryLo (now in K0) is valid (bit 1), RI or
* If the entryLo (now in wr.r3) is valid (bit 1), RI or
* XI must have triggered it.
*/
if (use_bbit_insns()) {
uasm_il_bbit1(&p, &r, K0, 1, label_nopage_tlbl);
/* Reload the PTE value */
iPTE_LW(&p, K0, K1);
uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl);
uasm_i_nop(&p);
uasm_l_tlbl_goaround1(&l, p);
} else {
uasm_i_andi(&p, K0, K0, 2);
uasm_il_bnez(&p, &r, K0, label_nopage_tlbl);
uasm_l_tlbl_goaround1(&l, p);
/* Reload the PTE value */
iPTE_LW(&p, K0, K1);
uasm_i_andi(&p, wr.r3, wr.r3, 2);
uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl);
uasm_i_nop(&p);
}
uasm_l_tlbl_goaround1(&l, p);
}
build_make_valid(&p, &r, K0, K1);
build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
build_make_valid(&p, &r, wr.r1, wr.r2);
build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
#ifdef CONFIG_HUGETLB_PAGE
/*
......@@ -1833,8 +1923,8 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
* spots a huge page.
*/
uasm_l_tlb_huge_update(&l, p);
iPTE_LW(&p, K0, K1);
build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
iPTE_LW(&p, wr.r1, wr.r2);
build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
build_tlb_probe_entry(&p);
if (kernel_uses_smartmips_rixi) {
......@@ -1843,50 +1933,51 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
* have triggered it. Skip the expensive test..
*/
if (use_bbit_insns()) {
uasm_il_bbit0(&p, &r, K0, ilog2(_PAGE_VALID),
uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
label_tlbl_goaround2);
} else {
uasm_i_andi(&p, K0, K0, _PAGE_VALID);
uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2);
uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
}
uasm_i_nop(&p);
uasm_i_tlbr(&p);
/* Examine entrylo 0 or 1 based on ptr. */
if (use_bbit_insns()) {
uasm_i_bbit0(&p, K1, ilog2(sizeof(pte_t)), 8);
uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
} else {
uasm_i_andi(&p, K0, K1, sizeof(pte_t));
uasm_i_beqz(&p, K0, 8);
uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
uasm_i_beqz(&p, wr.r3, 8);
}
UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/
UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */
/* load it in the delay slot*/
UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
/* load it if ptr is odd */
UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
/*
* If the entryLo (now in K0) is valid (bit 1), RI or
* If the entryLo (now in wr.r3) is valid (bit 1), RI or
* XI must have triggered it.
*/
if (use_bbit_insns()) {
uasm_il_bbit0(&p, &r, K0, 1, label_tlbl_goaround2);
uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2);
} else {
uasm_i_andi(&p, K0, K0, 2);
uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2);
uasm_i_andi(&p, wr.r3, wr.r3, 2);
uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
}
/* Reload the PTE value */
iPTE_LW(&p, K0, K1);
/*
* We clobbered C0_PAGEMASK, restore it. On the other branch
* it is restored in build_huge_tlb_write_entry.
*/
build_restore_pagemask(&p, &r, K0, label_nopage_tlbl, 0);
build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0);
uasm_l_tlbl_goaround2(&l, p);
}
uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID));
build_huge_handler_tail(&p, &r, &l, K0, K1);
uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
#endif
uasm_l_nopage_tlbl(&l, p);
build_restore_work_registers(&p);
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
uasm_i_nop(&p);
......@@ -1905,17 +1996,18 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
u32 *p = handle_tlbs;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
struct work_registers wr;
memset(handle_tlbs, 0, sizeof(handle_tlbs));
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs);
wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
if (m4kc_tlbp_war())
build_tlb_probe_entry(&p);
build_make_write(&p, &r, K0, K1);
build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
build_make_write(&p, &r, wr.r1, wr.r2);
build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
#ifdef CONFIG_HUGETLB_PAGE
/*
......@@ -1923,15 +2015,16 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
* build_r4000_tlbchange_handler_head spots a huge page.
*/
uasm_l_tlb_huge_update(&l, p);
iPTE_LW(&p, K0, K1);
build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs);
iPTE_LW(&p, wr.r1, wr.r2);
build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
build_tlb_probe_entry(&p);
uasm_i_ori(&p, K0, K0,
uasm_i_ori(&p, wr.r1, wr.r1,
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
build_huge_handler_tail(&p, &r, &l, K0, K1);
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
#endif
uasm_l_nopage_tlbs(&l, p);
build_restore_work_registers(&p);
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
uasm_i_nop(&p);
......@@ -1950,18 +2043,19 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
u32 *p = handle_tlbm;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
struct work_registers wr;
memset(handle_tlbm, 0, sizeof(handle_tlbm));
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm);
wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
if (m4kc_tlbp_war())
build_tlb_probe_entry(&p);
/* Present and writable bits set, set accessed and dirty bits. */
build_make_write(&p, &r, K0, K1);
build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
build_make_write(&p, &r, wr.r1, wr.r2);
build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
#ifdef CONFIG_HUGETLB_PAGE
/*
......@@ -1969,15 +2063,16 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
* build_r4000_tlbchange_handler_head spots a huge page.
*/
uasm_l_tlb_huge_update(&l, p);
iPTE_LW(&p, K0, K1);
build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm);
iPTE_LW(&p, wr.r1, wr.r2);
build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
build_tlb_probe_entry(&p);
uasm_i_ori(&p, K0, K0,
uasm_i_ori(&p, wr.r1, wr.r1,
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
build_huge_handler_tail(&p, &r, &l, K0, K1);
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
#endif
uasm_l_nopage_tlbm(&l, p);
build_restore_work_registers(&p);
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
uasm_i_nop(&p);
......@@ -2036,6 +2131,7 @@ void __cpuinit build_tlb_refill_handler(void)
default:
if (!run_once) {
scratch_reg = allocate_kscratch();
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
build_r4000_setup_pgd();
#endif
......
......@@ -28,6 +28,7 @@
#include <asm/io.h>
#include <asm/system.h>
#include <asm/cacheflush.h>
#include <asm/smp-ops.h>
#include <asm/traps.h>
#include <asm/gcmpregs.h>
......@@ -358,15 +359,14 @@ void __init prom_init(void)
#ifdef CONFIG_SERIAL_8250_CONSOLE
console_config();
#endif
#ifdef CONFIG_MIPS_CMP
/* Early detection of CMP support */
if (gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ))
register_smp_ops(&cmp_smp_ops);
else
#endif
#ifdef CONFIG_MIPS_MT_SMP
register_smp_ops(&vsmp_smp_ops);
#endif
if (!register_cmp_smp_ops())
return;
if (!register_vsmp_smp_ops())
return;
#ifdef CONFIG_MIPS_MT_SMTC
register_smp_ops(&msmtc_smp_ops);
#endif
......
......@@ -152,7 +152,7 @@ int plat_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity,
* runtime code can anyway deal with the null set
*/
printk(KERN_WARNING
"IRQ affinity leaves no legal CPU for IRQ %d\n", irq);
"IRQ affinity leaves no legal CPU for IRQ %d\n", d->irq);
/* Do any generic SMTC IRQ affinity setup */
smtc_set_irq_affinity(d->irq, tmask);
......
#
# NETLOGIC includes
#
cflags-$(CONFIG_NLM_COMMON) += -I$(srctree)/arch/mips/include/asm/mach-netlogic
cflags-$(CONFIG_NLM_COMMON) += -I$(srctree)/arch/mips/include/asm/netlogic
#
# NETLOGIC XLR/XLS SoC, Simulator and boards
#
core-$(CONFIG_NLM_XLR) += arch/mips/netlogic/xlr/
load-$(CONFIG_NLM_XLR_BOARD) += 0xffffffff84000000
......@@ -209,7 +209,7 @@ void __init init_xlr_irqs(void)
irq_set_chip_and_handler(i, &xlr_pic, handle_level_irq);
else
irq_set_chip_and_handler(i, &nlm_cpu_intr,
handle_level_irq);
handle_percpu_irq);
}
#ifdef CONFIG_SMP
irq_set_chip_and_handler(IRQ_IPI_SMP_FUNCTION, &nlm_cpu_intr,
......
......@@ -87,17 +87,7 @@ void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc)
/* IRQ_IPI_SMP_RESCHEDULE handler */
void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc)
{
set_need_resched();
}
void nlm_common_ipi_handler(int irq, struct pt_regs *regs)
{
if (irq == IRQ_IPI_SMP_FUNCTION) {
smp_call_function_interrupt();
} else {
/* Announce that we are for reschduling */
set_need_resched();
}
scheduler_ipi();
}
/*
......@@ -122,6 +112,7 @@ void nlm_smp_finish(void)
#ifdef notyet
nlm_common_msgring_cpu_init();
#endif
local_irq_enable();
}
void nlm_cpus_done(void)
......
......@@ -140,6 +140,4 @@ void __init plat_mem_setup(void)
PNX8XXX_UART_LCR_8BIT;
ip3106_baud(UART_BASE, pnx8550_console_port) = 5;
}
return;
}
......@@ -4,7 +4,6 @@
#include <asm/bootinfo.h>
#include <asm/lasat/lasat.h>
#include <asm/gt64120.h>
#include <asm/nile4.h>
#define PCI_ACCESS_READ 0
......
......@@ -228,13 +228,11 @@ void __init prom_init(void)
*/
msp_serial_setup();
#ifdef CONFIG_MIPS_MT_SMP
register_smp_ops(&vsmp_smp_ops);
#endif
if (register_vsmp_smp_ops()) {
#ifdef CONFIG_MIPS_MT_SMTC
register_smp_ops(&msp_smtc_smp_ops);
#endif
}
#ifdef CONFIG_PMCTWILED
/*
......
......@@ -139,6 +139,4 @@ void __init plat_mem_setup(void)
PNX8XXX_UART_LCR_8BIT;
ip3106_baud(UART_BASE, pnx8550_console_port) = 5;
}
return;
}
......@@ -251,28 +251,22 @@ static struct platform_device *rb532_devs[] = {
static void __init parse_mac_addr(char *macstr)
{
int i, j;
unsigned char result, value;
int i, h, l;
for (i = 0; i < 6; i++) {
result = 0;
if (i != 5 && *(macstr + 2) != ':')
return;
for (j = 0; j < 2; j++) {
if (isxdigit(*macstr)
&& (value =
isdigit(*macstr) ? *macstr -
'0' : toupper(*macstr) - 'A' + 10) < 16) {
result = result * 16 + value;
macstr++;
} else
h = hex_to_bin(*macstr++);
if (h == -1)
return;
l = hex_to_bin(*macstr++);
if (l == -1)
return;
}
macstr++;
korina_dev0_data.mac[i] = result;
korina_dev0_data.mac[i] = (h << 4) + l;
}
}
......
......@@ -123,6 +123,13 @@ static int sb1250_set_affinity(struct irq_data *d, const struct cpumask *mask,
}
#endif
static void disable_sb1250_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
sb1250_mask_irq(sb1250_irq_owner[irq], irq);
}
static void enable_sb1250_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
......@@ -180,6 +187,7 @@ static struct irq_chip sb1250_irq_type = {
.name = "SB1250-IMR",
.irq_mask_ack = ack_sb1250_irq,
.irq_unmask = enable_sb1250_irq,
.irq_mask = disable_sb1250_irq,
#ifdef CONFIG_SMP
.irq_set_affinity = sb1250_set_affinity
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment