Commit 3e20a26b authored by Ralf Baechle's avatar Ralf Baechle

Merge branch '4.0-fixes' into mips-for-linux-next

parents 98b0429b 5306a545
......@@ -415,6 +415,7 @@ config MIPS_MALTA
select SYS_HAS_CPU_MIPS32_R1
select SYS_HAS_CPU_MIPS32_R2
select SYS_HAS_CPU_MIPS32_R3_5
select SYS_HAS_CPU_MIPS32_R5
select SYS_HAS_CPU_MIPS32_R6
select SYS_HAS_CPU_MIPS64_R1
select SYS_HAS_CPU_MIPS64_R2
......@@ -424,6 +425,7 @@ config MIPS_MALTA
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_MICROMIPS
select SYS_SUPPORTS_MIPS_CMP
......@@ -1638,6 +1640,33 @@ config CPU_MIPS32_3_5_EVA
One of its primary benefits is an increase in the maximum size
of lowmem (up to 3GB). If unsure, say 'N' here.
config CPU_MIPS32_R5_FEATURES
bool "MIPS32 Release 5 Features"
depends on SYS_HAS_CPU_MIPS32_R5
depends on CPU_MIPS32_R2
help
Choose this option to build a kernel for release 2 or later of the
MIPS32 architecture including features from release 5 such as
support for Extended Physical Addressing (XPA).
config CPU_MIPS32_R5_XPA
bool "Extended Physical Addressing (XPA)"
depends on CPU_MIPS32_R5_FEATURES
depends on !EVA
depends on !PAGE_SIZE_4KB
depends on SYS_SUPPORTS_HIGHMEM
select XPA
select HIGHMEM
select ARCH_PHYS_ADDR_T_64BIT
default n
help
Choose this option if you want to enable the Extended Physical
Addressing (XPA) on your MIPS32 core (such as P5600 series). The
benefit is to increase physical addressing equal to or greater
than 40 bits. Note that this has the side effect of turning on
64-bit addressing which in turn makes the PTEs 64-bit in size.
If unsure, say 'N' here.
if CPU_LOONGSON2F
config CPU_NOP_WORKAROUNDS
bool
......@@ -1741,6 +1770,9 @@ config SYS_HAS_CPU_MIPS32_R2
config SYS_HAS_CPU_MIPS32_R3_5
bool
config SYS_HAS_CPU_MIPS32_R5
bool
config SYS_HAS_CPU_MIPS32_R6
bool
......@@ -1878,6 +1910,9 @@ config CPU_MIPSR6
config EVA
bool
config XPA
bool
config SYS_SUPPORTS_32BIT_KERNEL
bool
config SYS_SUPPORTS_64BIT_KERNEL
......@@ -2114,7 +2149,7 @@ config MIPSR2_TO_R6_EMULATOR
help
Choose this option if you want to run non-R6 MIPS userland code.
Even if you say 'Y' here, the emulator will still be disabled by
default. You can enable it using the 'mipsr2emul' kernel option.
default. You can enable it using the 'mipsr2emu' kernel option.
The only reason this is a build-time option is to save ~14K from the
final kernel image.
comment "MIPS R2-to-R6 emulator is only available for UP kernels"
......@@ -2184,7 +2219,7 @@ config MIPS_CMP
config MIPS_CPS
bool "MIPS Coherent Processing System support"
depends on SYS_SUPPORTS_MIPS_CPS
depends on SYS_SUPPORTS_MIPS_CPS && !64BIT
select MIPS_CM
select MIPS_CPC
select MIPS_CPS_PM if HOTPLUG_CPU
......
......@@ -197,11 +197,17 @@ endif
# Warning: the 64-bit MIPS architecture does not support the `smartmips' extension
# Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has
# been fixed properly.
mips-cflags := "$(cflags-y)"
cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,$(mips-cflags),-msmartmips) -Wa,--no-warn
cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,$(mips-cflags),-mmicromips)
mips-cflags := $(cflags-y)
ifeq ($(CONFIG_CPU_HAS_SMARTMIPS),y)
smartmips-ase := $(call cc-option-yn,$(mips-cflags) -msmartmips)
cflags-$(smartmips-ase) += -msmartmips -Wa,--no-warn
endif
ifeq ($(CONFIG_CPU_MICROMIPS),y)
micromips-ase := $(call cc-option-yn,$(mips-cflags) -mmicromips)
cflags-$(micromips-ase) += -mmicromips
endif
ifeq ($(CONFIG_CPU_HAS_MSA),y)
toolchain-msa := $(call cc-option-yn,-$(mips-cflags),mhard-float -mfp64 -Wa$(comma)-mmsa)
toolchain-msa := $(call cc-option-yn,$(mips-cflags) -mhard-float -mfp64 -Wa$(comma)-mmsa)
cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA
endif
......
......@@ -235,8 +235,8 @@ static __init const struct bcm47xx_board_type *bcm47xx_board_get_nvram(void)
}
if (bcm47xx_nvram_getenv("hardware_version", buf1, sizeof(buf1)) >= 0 &&
bcm47xx_nvram_getenv("boardtype", buf2, sizeof(buf2)) >= 0) {
for (e2 = bcm47xx_board_list_boot_hw; e2->value1; e2++) {
bcm47xx_nvram_getenv("boardnum", buf2, sizeof(buf2)) >= 0) {
for (e2 = bcm47xx_board_list_hw_version_num; e2->value1; e2++) {
if (!strstarts(buf1, e2->value1) &&
!strcmp(buf2, e2->value2))
return &e2->board;
......
......@@ -17,7 +17,6 @@
#include <bcm63xx_cpu.h>
#include <bcm63xx_io.h>
#include <bcm63xx_regs.h>
#include <bcm63xx_gpio.h>
void __init prom_init(void)
{
......@@ -53,9 +52,6 @@ void __init prom_init(void)
reg &= ~mask;
bcm_perf_writel(reg, PERF_CKCTL_REG);
/* register gpiochip */
bcm63xx_gpio_init();
/* do low level board init */
board_prom_init();
......
......@@ -20,6 +20,7 @@
#include <bcm63xx_cpu.h>
#include <bcm63xx_regs.h>
#include <bcm63xx_io.h>
#include <bcm63xx_gpio.h>
void bcm63xx_machine_halt(void)
{
......@@ -160,6 +161,9 @@ void __init plat_mem_setup(void)
int __init bcm63xx_register_devices(void)
{
/* register gpiochip */
bcm63xx_gpio_init();
return board_register_devices();
}
......
......@@ -306,7 +306,7 @@ void __init plat_swiotlb_setup(void)
swiotlbsize = 64 * (1<<20);
}
#endif
#ifdef CONFIG_USB_OCTEON_OHCI
#ifdef CONFIG_USB_OHCI_HCD_PLATFORM
/* OCTEON II ohci is only 32-bit. */
if (OCTEON_IS_OCTEON2() && max_addr >= 0x100000000ul)
swiotlbsize = 64 * (1<<20);
......
......@@ -11,6 +11,36 @@
#define __ASM_ASM_EVA_H
#ifndef __ASSEMBLY__
/* Kernel variants */
#define kernel_cache(op, base) "cache " op ", " base "\n"
#define kernel_ll(reg, addr) "ll " reg ", " addr "\n"
#define kernel_sc(reg, addr) "sc " reg ", " addr "\n"
#define kernel_lw(reg, addr) "lw " reg ", " addr "\n"
#define kernel_lwl(reg, addr) "lwl " reg ", " addr "\n"
#define kernel_lwr(reg, addr) "lwr " reg ", " addr "\n"
#define kernel_lh(reg, addr) "lh " reg ", " addr "\n"
#define kernel_lb(reg, addr) "lb " reg ", " addr "\n"
#define kernel_lbu(reg, addr) "lbu " reg ", " addr "\n"
#define kernel_sw(reg, addr) "sw " reg ", " addr "\n"
#define kernel_swl(reg, addr) "swl " reg ", " addr "\n"
#define kernel_swr(reg, addr) "swr " reg ", " addr "\n"
#define kernel_sh(reg, addr) "sh " reg ", " addr "\n"
#define kernel_sb(reg, addr) "sb " reg ", " addr "\n"
#ifdef CONFIG_32BIT
/*
* No 'sd' or 'ld' instructions in 32-bit but the code will
* do the correct thing
*/
#define kernel_sd(reg, addr) user_sw(reg, addr)
#define kernel_ld(reg, addr) user_lw(reg, addr)
#else
#define kernel_sd(reg, addr) "sd " reg", " addr "\n"
#define kernel_ld(reg, addr) "ld " reg", " addr "\n"
#endif /* CONFIG_32BIT */
#ifdef CONFIG_EVA
#define __BUILD_EVA_INSN(insn, reg, addr) \
......@@ -41,37 +71,60 @@
#else
#define user_cache(op, base) "cache " op ", " base "\n"
#define user_ll(reg, addr) "ll " reg ", " addr "\n"
#define user_sc(reg, addr) "sc " reg ", " addr "\n"
#define user_lw(reg, addr) "lw " reg ", " addr "\n"
#define user_lwl(reg, addr) "lwl " reg ", " addr "\n"
#define user_lwr(reg, addr) "lwr " reg ", " addr "\n"
#define user_lh(reg, addr) "lh " reg ", " addr "\n"
#define user_lb(reg, addr) "lb " reg ", " addr "\n"
#define user_lbu(reg, addr) "lbu " reg ", " addr "\n"
#define user_sw(reg, addr) "sw " reg ", " addr "\n"
#define user_swl(reg, addr) "swl " reg ", " addr "\n"
#define user_swr(reg, addr) "swr " reg ", " addr "\n"
#define user_sh(reg, addr) "sh " reg ", " addr "\n"
#define user_sb(reg, addr) "sb " reg ", " addr "\n"
#define user_cache(op, base) kernel_cache(op, base)
#define user_ll(reg, addr) kernel_ll(reg, addr)
#define user_sc(reg, addr) kernel_sc(reg, addr)
#define user_lw(reg, addr) kernel_lw(reg, addr)
#define user_lwl(reg, addr) kernel_lwl(reg, addr)
#define user_lwr(reg, addr) kernel_lwr(reg, addr)
#define user_lh(reg, addr) kernel_lh(reg, addr)
#define user_lb(reg, addr) kernel_lb(reg, addr)
#define user_lbu(reg, addr) kernel_lbu(reg, addr)
#define user_sw(reg, addr) kernel_sw(reg, addr)
#define user_swl(reg, addr) kernel_swl(reg, addr)
#define user_swr(reg, addr) kernel_swr(reg, addr)
#define user_sh(reg, addr) kernel_sh(reg, addr)
#define user_sb(reg, addr) kernel_sb(reg, addr)
#ifdef CONFIG_32BIT
/*
* No 'sd' or 'ld' instructions in 32-bit but the code will
* do the correct thing
*/
#define user_sd(reg, addr) user_sw(reg, addr)
#define user_ld(reg, addr) user_lw(reg, addr)
#define user_sd(reg, addr) kernel_sw(reg, addr)
#define user_ld(reg, addr) kernel_lw(reg, addr)
#else
#define user_sd(reg, addr) "sd " reg", " addr "\n"
#define user_ld(reg, addr) "ld " reg", " addr "\n"
#define user_sd(reg, addr) kernel_sd(reg, addr)
#define user_ld(reg, addr) kernel_ld(reg, addr)
#endif /* CONFIG_32BIT */
#endif /* CONFIG_EVA */
#else /* __ASSEMBLY__ */
#define kernel_cache(op, base) cache op, base
#define kernel_ll(reg, addr) ll reg, addr
#define kernel_sc(reg, addr) sc reg, addr
#define kernel_lw(reg, addr) lw reg, addr
#define kernel_lwl(reg, addr) lwl reg, addr
#define kernel_lwr(reg, addr) lwr reg, addr
#define kernel_lh(reg, addr) lh reg, addr
#define kernel_lb(reg, addr) lb reg, addr
#define kernel_lbu(reg, addr) lbu reg, addr
#define kernel_sw(reg, addr) sw reg, addr
#define kernel_swl(reg, addr) swl reg, addr
#define kernel_swr(reg, addr) swr reg, addr
#define kernel_sh(reg, addr) sh reg, addr
#define kernel_sb(reg, addr) sb reg, addr
#ifdef CONFIG_32BIT
/*
* No 'sd' or 'ld' instructions in 32-bit but the code will
* do the correct thing
*/
#define kernel_sd(reg, addr) user_sw(reg, addr)
#define kernel_ld(reg, addr) user_lw(reg, addr)
#else
#define kernel_sd(reg, addr) sd reg, addr
#define kernel_ld(reg, addr) ld reg, addr
#endif /* CONFIG_32BIT */
#ifdef CONFIG_EVA
#define __BUILD_EVA_INSN(insn, reg, addr) \
......@@ -101,31 +154,27 @@
#define user_sd(reg, addr) user_sw(reg, addr)
#else
#define user_cache(op, base) cache op, base
#define user_ll(reg, addr) ll reg, addr
#define user_sc(reg, addr) sc reg, addr
#define user_lw(reg, addr) lw reg, addr
#define user_lwl(reg, addr) lwl reg, addr
#define user_lwr(reg, addr) lwr reg, addr
#define user_lh(reg, addr) lh reg, addr
#define user_lb(reg, addr) lb reg, addr
#define user_lbu(reg, addr) lbu reg, addr
#define user_sw(reg, addr) sw reg, addr
#define user_swl(reg, addr) swl reg, addr
#define user_swr(reg, addr) swr reg, addr
#define user_sh(reg, addr) sh reg, addr
#define user_sb(reg, addr) sb reg, addr
#define user_cache(op, base) kernel_cache(op, base)
#define user_ll(reg, addr) kernel_ll(reg, addr)
#define user_sc(reg, addr) kernel_sc(reg, addr)
#define user_lw(reg, addr) kernel_lw(reg, addr)
#define user_lwl(reg, addr) kernel_lwl(reg, addr)
#define user_lwr(reg, addr) kernel_lwr(reg, addr)
#define user_lh(reg, addr) kernel_lh(reg, addr)
#define user_lb(reg, addr) kernel_lb(reg, addr)
#define user_lbu(reg, addr) kernel_lbu(reg, addr)
#define user_sw(reg, addr) kernel_sw(reg, addr)
#define user_swl(reg, addr) kernel_swl(reg, addr)
#define user_swr(reg, addr) kernel_swr(reg, addr)
#define user_sh(reg, addr) kernel_sh(reg, addr)
#define user_sb(reg, addr) kernel_sb(reg, addr)
#ifdef CONFIG_32BIT
/*
* No 'sd' or 'ld' instructions in 32-bit but the code will
* do the correct thing
*/
#define user_sd(reg, addr) user_sw(reg, addr)
#define user_ld(reg, addr) user_lw(reg, addr)
#define user_sd(reg, addr) kernel_sw(reg, addr)
#define user_ld(reg, addr) kernel_lw(reg, addr)
#else
#define user_sd(reg, addr) sd reg, addr
#define user_ld(reg, addr) ld reg, addr
#define user_sd(reg, addr) kernel_sd(reg, addr)
#define user_ld(reg, addr) kernel_sd(reg, addr)
#endif /* CONFIG_32BIT */
#endif /* CONFIG_EVA */
......
......@@ -29,6 +29,20 @@
* - flush_icache_all() flush the entire instruction cache
* - flush_data_cache_page() flushes a page from the data cache
*/
/*
* This flag is used to indicate that the page pointed to by a pte
* is dirty and requires cleaning before returning it to the user.
*/
#define PG_dcache_dirty PG_arch_1
#define Page_dcache_dirty(page) \
test_bit(PG_dcache_dirty, &(page)->flags)
#define SetPageDcacheDirty(page) \
set_bit(PG_dcache_dirty, &(page)->flags)
#define ClearPageDcacheDirty(page) \
clear_bit(PG_dcache_dirty, &(page)->flags)
extern void (*flush_cache_all)(void);
extern void (*__flush_cache_all)(void);
extern void (*flush_cache_mm)(struct mm_struct *mm);
......@@ -37,13 +51,15 @@ extern void (*flush_cache_range)(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
extern void __flush_dcache_page(struct page *page);
extern void __flush_icache_page(struct vm_area_struct *vma, struct page *page);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
static inline void flush_dcache_page(struct page *page)
{
if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc)
if (cpu_has_dc_aliases)
__flush_dcache_page(page);
else if (!cpu_has_ic_fills_f_dc)
SetPageDcacheDirty(page);
}
#define flush_dcache_mmap_lock(mapping) do { } while (0)
......@@ -61,6 +77,11 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
static inline void flush_icache_page(struct vm_area_struct *vma,
struct page *page)
{
if (!cpu_has_ic_fills_f_dc && (vma->vm_flags & VM_EXEC) &&
Page_dcache_dirty(page)) {
__flush_icache_page(vma, page);
ClearPageDcacheDirty(page);
}
}
extern void (*flush_icache_range)(unsigned long start, unsigned long end);
......@@ -95,19 +116,6 @@ extern void (*flush_icache_all)(void);
extern void (*local_flush_data_cache_page)(void * addr);
extern void (*flush_data_cache_page)(unsigned long addr);
/*
* This flag is used to indicate that the page pointed to by a pte
* is dirty and requires cleaning before returning it to the user.
*/
#define PG_dcache_dirty PG_arch_1
#define Page_dcache_dirty(page) \
test_bit(PG_dcache_dirty, &(page)->flags)
#define SetPageDcacheDirty(page) \
set_bit(PG_dcache_dirty, &(page)->flags)
#define ClearPageDcacheDirty(page) \
clear_bit(PG_dcache_dirty, &(page)->flags)
/* Run kernel code uncached, useful for cache probing functions. */
unsigned long run_uncached(void *func);
......
......@@ -140,6 +140,9 @@
# endif
#endif
#ifndef cpu_has_xpa
#define cpu_has_xpa (cpu_data[0].options & MIPS_CPU_XPA)
#endif
#ifndef cpu_has_vtag_icache
#define cpu_has_vtag_icache (cpu_data[0].icache.flags & MIPS_CACHE_VTAG)
#endif
......@@ -239,8 +242,39 @@
/* MIPSR2 and MIPSR6 have a lot of similarities */
#define cpu_has_mips_r2_r6 (cpu_has_mips_r2 | cpu_has_mips_r6)
/*
* cpu_has_mips_r2_exec_hazard - return if IHB is required on current processor
*
* Returns non-zero value if the current processor implementation requires
* an IHB instruction to deal with an instruction hazard as per MIPS R2
* architecture specification, zero otherwise.
*/
#ifndef cpu_has_mips_r2_exec_hazard
#define cpu_has_mips_r2_exec_hazard (cpu_has_mips_r2 | cpu_has_mips_r6)
#define cpu_has_mips_r2_exec_hazard \
({ \
int __res; \
\
switch (current_cpu_type()) { \
case CPU_M14KC: \
case CPU_74K: \
case CPU_1074K: \
case CPU_PROAPTIV: \
case CPU_P5600: \
case CPU_M5150: \
case CPU_QEMU_GENERIC: \
case CPU_CAVIUM_OCTEON: \
case CPU_CAVIUM_OCTEON_PLUS: \
case CPU_CAVIUM_OCTEON2: \
case CPU_CAVIUM_OCTEON3: \
__res = 0; \
break; \
\
default: \
__res = 1; \
} \
\
__res; \
})
#endif
/*
......
......@@ -377,7 +377,8 @@ enum cpu_type_enum {
#define MIPS_CPU_MAAR 0x400000000ull /* MAAR(I) registers are present */
#define MIPS_CPU_FRE 0x800000000ull /* FRE & UFE bits implemented */
#define MIPS_CPU_RW_LLB 0x1000000000ull /* LLADDR/LLB writes are allowed */
#define MIPS_CPU_CDMM 0x2000000000ull /* CPU has Common Device Memory Map */
#define MIPS_CPU_XPA 0x2000000000ull /* CPU supports Extended Physical Addressing */
#define MIPS_CPU_CDMM 0x4000000000ull /* CPU has Common Device Memory Map */
/*
* CPU ASE encodings
......
......@@ -297,6 +297,9 @@ do { \
if (personality(current->personality) != PER_LINUX) \
set_personality(PER_LINUX); \
\
clear_thread_flag(TIF_HYBRID_FPREGS); \
set_thread_flag(TIF_32BIT_FPREGS); \
\
mips_set_personality_fp(state); \
\
current->thread.abi = &mips_abi; \
......@@ -324,6 +327,8 @@ do { \
do { \
set_thread_flag(TIF_32BIT_REGS); \
set_thread_flag(TIF_32BIT_ADDR); \
clear_thread_flag(TIF_HYBRID_FPREGS); \
set_thread_flag(TIF_32BIT_FPREGS); \
\
mips_set_personality_fp(state); \
\
......
......@@ -50,7 +50,6 @@
#define cpu_has_mips32r2 0
#define cpu_has_mips64r1 0
#define cpu_has_mips64r2 1
#define cpu_has_mips_r2_exec_hazard 0
#define cpu_has_dsp 0
#define cpu_has_dsp2 0
#define cpu_has_mipsmt 0
......
......@@ -11,9 +11,6 @@
#include <linux/pci.h>
/* Some PCI cards require delays when accessing config space. */
#define PCI_CONFIG_SPACE_DELAY 10000
/*
* The physical memory base mapped by BAR1. 256MB at the end of the
* first 4GB.
......
......@@ -105,13 +105,16 @@ static inline void pmd_clear(pmd_t *pmdp)
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pte_pfn(x) ((unsigned long)((x).pte_high >> 6))
#define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT))
static inline pte_t
pfn_pte(unsigned long pfn, pgprot_t prot)
{
pte_t pte;
pte.pte_high = (pfn << 6) | (pgprot_val(prot) & 0x3f);
pte.pte_low = pgprot_val(prot);
pte.pte_low = (pfn >> _PAGE_PRESENT_SHIFT) |
(pgprot_val(prot) & ~_PFNX_MASK);
pte.pte_high = (pfn << _PFN_SHIFT) |
(pgprot_val(prot) & ~_PFN_MASK);
return pte;
}
......@@ -166,9 +169,9 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
/* Swap entries must have VALID and GLOBAL bits cleared. */
#define __swp_type(x) (((x).val >> 2) & 0x1f)
#define __swp_offset(x) ((x).val >> 7)
#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 7) })
#define __swp_type(x) (((x).val >> 4) & 0x1f)
#define __swp_offset(x) ((x).val >> 9)
#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 4) | ((offset) << 9) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
......
......@@ -37,7 +37,11 @@
/*
* The following bits are implemented by the TLB hardware
*/
#define _PAGE_GLOBAL_SHIFT 0
#define _PAGE_NO_EXEC_SHIFT 0
#define _PAGE_NO_EXEC (1 << _PAGE_NO_EXEC_SHIFT)
#define _PAGE_NO_READ_SHIFT (_PAGE_NO_EXEC_SHIFT + 1)
#define _PAGE_NO_READ (1 << _PAGE_NO_READ_SHIFT)
#define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1)
#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1)
#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
......@@ -49,7 +53,7 @@
/*
* The following bits are implemented in software
*/
#define _PAGE_PRESENT_SHIFT (_CACHE_SHIFT + 3)
#define _PAGE_PRESENT_SHIFT (24)
#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1)
#define _PAGE_READ (1 << _PAGE_READ_SHIFT)
......@@ -62,6 +66,11 @@
#define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
/*
* Bits for extended EntryLo0/EntryLo1 registers
*/
#define _PFNX_MASK 0xffffff
#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
/*
......@@ -95,11 +104,7 @@
#else
/*
* When using the RI/XI bit support, we have 13 bits of flags below
* the physical address. The RI/XI bits are placed such that a SRL 5
* can strip off the software bits, then a ROTR 2 can move the RI/XI
* into bits [63:62]. This also limits physical address to 56 bits,
* which is more than we need right now.
* Below are the "Normal" R4K cases
*/
/*
......@@ -107,38 +112,59 @@
*/
#define _PAGE_PRESENT_SHIFT 0
#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
#define _PAGE_READ_SHIFT (cpu_has_rixi ? _PAGE_PRESENT_SHIFT : _PAGE_PRESENT_SHIFT + 1)
#define _PAGE_READ ({BUG_ON(cpu_has_rixi); 1 << _PAGE_READ_SHIFT; })
/* R2 or later cores check for RI/XI support to determine _PAGE_READ */
#ifdef CONFIG_CPU_MIPSR2
#define _PAGE_WRITE_SHIFT (_PAGE_PRESENT_SHIFT + 1)
#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
#else
#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1)
#define _PAGE_READ (1 << _PAGE_READ_SHIFT)
#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1)
#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
#endif
#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1)
#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/* huge tlb page */
#if defined(CONFIG_64BIT) && defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
/* Huge TLB page */
#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1)
#define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT)
#define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT + 1)
#define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT)
/* Only R2 or newer cores have the XI bit */
#ifdef CONFIG_CPU_MIPSR2
#define _PAGE_NO_EXEC_SHIFT (_PAGE_SPLITTING_SHIFT + 1)
#else
#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT)
#define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */
#define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT)
#define _PAGE_SPLITTING ({BUG(); 1; }) /* Dummy value */
#endif
#define _PAGE_GLOBAL_SHIFT (_PAGE_SPLITTING_SHIFT + 1)
#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
#endif /* CONFIG_CPU_MIPSR2 */
/* Page cannot be executed */
#define _PAGE_NO_EXEC_SHIFT (cpu_has_rixi ? _PAGE_SPLITTING_SHIFT + 1 : _PAGE_SPLITTING_SHIFT)
#define _PAGE_NO_EXEC ({BUG_ON(!cpu_has_rixi); 1 << _PAGE_NO_EXEC_SHIFT; })
#endif /* CONFIG_64BIT && CONFIG_MIPS_HUGE_TLB_SUPPORT */
/* Page cannot be read */
#define _PAGE_NO_READ_SHIFT (cpu_has_rixi ? _PAGE_NO_EXEC_SHIFT + 1 : _PAGE_NO_EXEC_SHIFT)
#define _PAGE_NO_READ ({BUG_ON(!cpu_has_rixi); 1 << _PAGE_NO_READ_SHIFT; })
#ifdef CONFIG_CPU_MIPSR2
/* XI - page cannot be executed */
#ifndef _PAGE_NO_EXEC_SHIFT
#define _PAGE_NO_EXEC_SHIFT (_PAGE_MODIFIED_SHIFT + 1)
#endif
#define _PAGE_NO_EXEC (cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0)
/* RI - page cannot be read */
#define _PAGE_READ_SHIFT (_PAGE_NO_EXEC_SHIFT + 1)
#define _PAGE_READ (cpu_has_rixi ? 0 : (1 << _PAGE_READ_SHIFT))
#define _PAGE_NO_READ_SHIFT _PAGE_READ_SHIFT
#define _PAGE_NO_READ (cpu_has_rixi ? (1 << _PAGE_READ_SHIFT) : 0)
#define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1)
#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
#else /* !CONFIG_CPU_MIPSR2 */
#define _PAGE_GLOBAL_SHIFT (_PAGE_MODIFIED_SHIFT + 1)
#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
#endif /* CONFIG_CPU_MIPSR2 */
#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1)
#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1)
......@@ -150,18 +176,26 @@
#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */
#ifndef _PAGE_NO_EXEC
#define _PAGE_NO_EXEC 0
#endif
#ifndef _PAGE_NO_READ
#define _PAGE_NO_READ 0
#endif
#define _PAGE_SILENT_READ _PAGE_VALID
#define _PAGE_SILENT_WRITE _PAGE_DIRTY
#define _PFN_MASK (~((1 << (_PFN_SHIFT)) - 1))
#ifndef _PAGE_NO_READ
#define _PAGE_NO_READ ({BUG(); 0; })
#define _PAGE_NO_READ_SHIFT ({BUG(); 0; })
#endif
#ifndef _PAGE_NO_EXEC
#define _PAGE_NO_EXEC ({BUG(); 0; })
#endif
/*
* The final layouts of the PTE bits are:
*
* 64-bit, R1 or earlier: CCC D V G [S H] M A W R P
* 32-bit, R1 or earler: CCC D V G M A W R P
* 64-bit, R2 or later: CCC D V G RI/R XI [S H] M A W P
* 32-bit, R2 or later: CCC D V G RI/R XI M A W P
*/
#ifndef __ASSEMBLY__
......@@ -171,6 +205,7 @@
*/
static inline uint64_t pte_to_entrylo(unsigned long pte_val)
{
#ifdef CONFIG_CPU_MIPSR2
if (cpu_has_rixi) {
int sa;
#ifdef CONFIG_32BIT
......@@ -186,6 +221,7 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val)
return (pte_val >> _PAGE_GLOBAL_SHIFT) |
((pte_val & (_PAGE_NO_EXEC | _PAGE_NO_READ)) << sa);
}
#endif
return pte_val >> _PAGE_GLOBAL_SHIFT;
}
......@@ -245,7 +281,7 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val)
#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT)
#endif
#define __READABLE (_PAGE_SILENT_READ | _PAGE_ACCESSED | (cpu_has_rixi ? 0 : _PAGE_READ))
#define __READABLE (_PAGE_SILENT_READ | _PAGE_READ | _PAGE_ACCESSED)
#define __WRITEABLE (_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED)
#define _PAGE_CHG_MASK (_PAGE_ACCESSED | _PAGE_MODIFIED | \
......
......@@ -24,17 +24,17 @@ struct mm_struct;
struct vm_area_struct;
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | (cpu_has_rixi ? 0 : _PAGE_READ) | \
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | \
_page_cachable_default)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \
(cpu_has_rixi ? _PAGE_NO_EXEC : 0) | _page_cachable_default)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_NO_EXEC | \
_page_cachable_default)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | \
_page_cachable_default)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
_PAGE_GLOBAL | _page_cachable_default)
#define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
_PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
#define PAGE_USERIO __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \
#define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
_page_cachable_default)
#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
__WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
......@@ -127,13 +127,9 @@ do { \
} \
} while(0)
extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
pte_t pteval);
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
#define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
#define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL))
#define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
static inline void set_pte(pte_t *ptep, pte_t pte)
......@@ -142,18 +138,17 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
smp_wmb();
ptep->pte_low = pte.pte_low;
if (pte.pte_low & _PAGE_GLOBAL) {
if (pte.pte_high & _PAGE_GLOBAL) {
pte_t *buddy = ptep_buddy(ptep);
/*
* Make sure the buddy is global too (if it's !none,
* it better already be global)
*/
if (pte_none(*buddy)) {
buddy->pte_low |= _PAGE_GLOBAL;
if (pte_none(*buddy))
buddy->pte_high |= _PAGE_GLOBAL;
}
}
}
#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
......@@ -161,8 +156,8 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
htw_stop();
/* Preserve global status for the pair */
if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
null.pte_low = null.pte_high = _PAGE_GLOBAL;
if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
null.pte_high = _PAGE_GLOBAL;
set_pte_at(mm, addr, ptep, null);
htw_start();
......@@ -192,6 +187,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
}
#endif
}
#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
......@@ -242,21 +238,21 @@ static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
static inline pte_t pte_wrprotect(pte_t pte)
{
pte.pte_low &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
pte.pte_low &= ~_PAGE_WRITE;
pte.pte_high &= ~_PAGE_SILENT_WRITE;
return pte;
}
static inline pte_t pte_mkclean(pte_t pte)
{
pte.pte_low &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
pte.pte_low &= ~_PAGE_MODIFIED;
pte.pte_high &= ~_PAGE_SILENT_WRITE;
return pte;
}
static inline pte_t pte_mkold(pte_t pte)
{
pte.pte_low &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
pte.pte_low &= ~_PAGE_ACCESSED;
pte.pte_high &= ~_PAGE_SILENT_READ;
return pte;
}
......@@ -264,30 +260,24 @@ static inline pte_t pte_mkold(pte_t pte)
static inline pte_t pte_mkwrite(pte_t pte)
{
pte.pte_low |= _PAGE_WRITE;
if (pte.pte_low & _PAGE_MODIFIED) {
pte.pte_low |= _PAGE_SILENT_WRITE;
if (pte.pte_low & _PAGE_MODIFIED)
pte.pte_high |= _PAGE_SILENT_WRITE;
}
return pte;
}
static inline pte_t pte_mkdirty(pte_t pte)
{
pte.pte_low |= _PAGE_MODIFIED;
if (pte.pte_low & _PAGE_WRITE) {
pte.pte_low |= _PAGE_SILENT_WRITE;
if (pte.pte_low & _PAGE_WRITE)
pte.pte_high |= _PAGE_SILENT_WRITE;
}
return pte;
}
static inline pte_t pte_mkyoung(pte_t pte)
{
pte.pte_low |= _PAGE_ACCESSED;
if (pte.pte_low & _PAGE_READ) {
pte.pte_low |= _PAGE_SILENT_READ;
if (pte.pte_low & _PAGE_READ)
pte.pte_high |= _PAGE_SILENT_READ;
}
return pte;
}
#else
......@@ -332,13 +322,13 @@ static inline pte_t pte_mkdirty(pte_t pte)
static inline pte_t pte_mkyoung(pte_t pte)
{
pte_val(pte) |= _PAGE_ACCESSED;
if (cpu_has_rixi) {
#ifdef CONFIG_CPU_MIPSR2
if (!(pte_val(pte) & _PAGE_NO_READ))
pte_val(pte) |= _PAGE_SILENT_READ;
} else {
else
#endif
if (pte_val(pte) & _PAGE_READ)
pte_val(pte) |= _PAGE_SILENT_READ;
}
return pte;
}
......@@ -391,10 +381,10 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte.pte_low &= _PAGE_CHG_MASK;
pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
pte.pte_low |= pgprot_val(newprot);
pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK;
pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK;
return pte;
}
#else
......@@ -407,12 +397,15 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
pte_t pte);
extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
pte_t pte);
static inline void update_mmu_cache(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
pte_t pte = *ptep;
__update_tlb(vma, address, pte);
__update_cache(vma, address, pte);
}
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
......@@ -534,13 +527,13 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
{
pmd_val(pmd) |= _PAGE_ACCESSED;
if (cpu_has_rixi) {
#ifdef CONFIG_CPU_MIPSR2
if (!(pmd_val(pmd) & _PAGE_NO_READ))
pmd_val(pmd) |= _PAGE_SILENT_READ;
} else {
else
#endif
if (pmd_val(pmd) & _PAGE_READ)
pmd_val(pmd) |= _PAGE_SILENT_READ;
}
return pmd;
}
......
......@@ -12,6 +12,8 @@
#ifndef _ASM_R4KCACHE_H
#define _ASM_R4KCACHE_H
#include <linux/stringify.h>
#include <asm/asm.h>
#include <asm/cacheops.h>
#include <asm/compiler.h>
......@@ -344,7 +346,7 @@ static inline void invalidate_tcache_page(unsigned long addr)
" cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n" \
" cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n" \
" cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n" \
" addiu $1, $0, 0x100 \n" \
" "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x010($1)\n" \
" cache %1, 0x020($1); cache %1, 0x030($1)\n" \
" cache %1, 0x040($1); cache %1, 0x050($1)\n" \
......@@ -368,17 +370,17 @@ static inline void invalidate_tcache_page(unsigned long addr)
" cache %1, 0x040(%0); cache %1, 0x060(%0)\n" \
" cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n" \
" cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n" \
" addiu $1, %0, 0x100\n" \
" "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x020($1)\n" \
" cache %1, 0x040($1); cache %1, 0x060($1)\n" \
" cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
" cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
" addiu $1, $1, 0x100\n" \
" "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x020($1)\n" \
" cache %1, 0x040($1); cache %1, 0x060($1)\n" \
" cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
" cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
" addiu $1, $1, 0x100\n" \
" "__stringify(LONG_ADDIU)" $1, $1, 0x100\n" \
" cache %1, 0x000($1); cache %1, 0x020($1)\n" \
" cache %1, 0x040($1); cache %1, 0x060($1)\n" \
" cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
......@@ -396,25 +398,25 @@ static inline void invalidate_tcache_page(unsigned long addr)
" .set noat\n" \
" cache %1, 0x000(%0); cache %1, 0x040(%0)\n" \
" cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n" \
" addiu $1, %0, 0x100\n" \
" "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x040($1)\n" \
" cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
" addiu $1, %0, 0x100\n" \
" "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x040($1)\n" \
" cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
" addiu $1, %0, 0x100\n" \
" "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x040($1)\n" \
" cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
" addiu $1, %0, 0x100\n" \
" "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x040($1)\n" \
" cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
" addiu $1, %0, 0x100\n" \
" "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x040($1)\n" \
" cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
" addiu $1, %0, 0x100\n" \
" "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x040($1)\n" \
" cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
" addiu $1, %0, 0x100\n" \
" "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x040($1)\n" \
" cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
" .set pop\n" \
......@@ -429,39 +431,38 @@ static inline void invalidate_tcache_page(unsigned long addr)
" .set mips64r6\n" \
" .set noat\n" \
" cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
" addiu $1, %0, 0x100\n" \
" cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
" addiu $1, %0, 0x100\n" \
" cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
" addiu $1, %0, 0x100\n" \
" cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
" addiu $1, %0, 0x100\n" \
" cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
" addiu $1, %0, 0x100\n" \
" cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
" addiu $1, %0, 0x100\n" \
" cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
" addiu $1, %0, 0x100\n" \
" cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
" addiu $1, %0, 0x100\n" \
" cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
" addiu $1, %0, 0x100\n" \
" cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
" addiu $1, %0, 0x100\n" \
" cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
" addiu $1, %0, 0x100\n" \
" cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
" addiu $1, %0, 0x100\n" \
" cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
" addiu $1, %0, 0x100\n" \
" cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
" addiu $1, %0, 0x100\n" \
" cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
" addiu $1, %0, 0x100\n" \
" cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
" addiu $1, %0, 0x100\n" \
" cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
" addiu $1, %0, 0x100\n" \
" "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x080($1)\n" \
" "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x080($1)\n" \
" "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x080($1)\n" \
" "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x080($1)\n" \
" "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x080($1)\n" \
" "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x080($1)\n" \
" "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x080($1)\n" \
" "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x080($1)\n" \
" "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x080($1)\n" \
" "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x080($1)\n" \
" "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x080($1)\n" \
" "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x080($1)\n" \
" "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x080($1)\n" \
" "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x080($1)\n" \
" "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x080($1)\n" \
" "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x080($1)\n" \
" .set pop\n" \
: \
: "r" (base), \
......
......@@ -263,7 +263,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
"1: ll %1, %2 # arch_read_unlock \n"
" addiu %1, 1 \n"
" addiu %1, -1 \n"
" sc %1, %0 \n"
" beqzl %1, 1b \n"
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
......
......@@ -600,6 +600,10 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c)
c->options |= MIPS_CPU_MAAR;
if (config5 & MIPS_CONF5_LLB)
c->options |= MIPS_CPU_RW_LLB;
#ifdef CONFIG_XPA
if (config5 & MIPS_CONF5_MVH)
c->options |= MIPS_CPU_XPA;
#endif
return config5 & MIPS_CONF_M;
}
......
......@@ -10,6 +10,7 @@
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/compiler.h>
#include <asm/regdef.h>
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
......@@ -185,7 +186,7 @@ syscall_exit_work:
* For C code use the inline version named instruction_hazard().
*/
LEAF(mips_ihb)
.set mips32r2
.set MIPS_ISA_LEVEL_RAW
jr.hb ra
nop
END(mips_ihb)
......
......@@ -120,6 +120,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (cpu_has_msa) seq_printf(m, "%s", " msa");
if (cpu_has_eva) seq_printf(m, "%s", " eva");
if (cpu_has_htw) seq_printf(m, "%s", " htw");
if (cpu_has_xpa) seq_printf(m, "%s", " xpa");
seq_printf(m, "\n");
if (cpu_has_mmips) {
......
......@@ -88,6 +88,12 @@ static void __init cps_smp_setup(void)
/* Make core 0 coherent with everything */
write_gcr_cl_coherence(0xff);
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
cpu_set(0, mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
}
static void __init cps_prepare_cpus(unsigned int max_cpus)
......
This diff is collapsed.
......@@ -44,6 +44,7 @@ void mach_irq_dispatch(unsigned int pending)
static struct irqaction cascade_irqaction = {
.handler = no_action,
.flags = IRQF_NO_SUSPEND,
.name = "cascade",
};
......
......@@ -119,36 +119,37 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
EXPORT_SYMBOL(__flush_anon_page);
static void mips_flush_dcache_from_pte(pte_t pteval, unsigned long address)
void __flush_icache_page(struct vm_area_struct *vma, struct page *page)
{
unsigned long addr;
if (PageHighMem(page))
return;
addr = (unsigned long) page_address(page);
flush_data_cache_page(addr);
}
EXPORT_SYMBOL_GPL(__flush_icache_page);
void __update_cache(struct vm_area_struct *vma, unsigned long address,
pte_t pte)
{
struct page *page;
unsigned long pfn = pte_pfn(pteval);
unsigned long pfn, addr;
int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
pfn = pte_pfn(pte);
if (unlikely(!pfn_valid(pfn)))
return;
page = pfn_to_page(pfn);
if (page_mapping(page) && Page_dcache_dirty(page)) {
unsigned long page_addr = (unsigned long) page_address(page);
if (!cpu_has_ic_fills_f_dc ||
pages_do_alias(page_addr, address & PAGE_MASK))
flush_data_cache_page(page_addr);
addr = (unsigned long) page_address(page);
if (exec || pages_do_alias(addr, address & PAGE_MASK))
flush_data_cache_page(addr);
ClearPageDcacheDirty(page);
}
}
void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval)
{
if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc) {
if (pte_present(pteval))
mips_flush_dcache_from_pte(pteval, addr);
}
set_pte(ptep, pteval);
}
unsigned long _page_cachable_default;
EXPORT_SYMBOL(_page_cachable_default);
......
......@@ -96,7 +96,7 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
vaddr = __fix_to_virt(FIX_CMAP_END - idx);
pte = mk_pte(page, prot);
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
entrylo = pte.pte_high;
entrylo = pte_to_entrylo(pte.pte_high);
#else
entrylo = pte_to_entrylo(pte_val(pte));
#endif
......@@ -106,6 +106,11 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
write_c0_entryhi(vaddr & (PAGE_MASK << 1));
write_c0_entrylo0(entrylo);
write_c0_entrylo1(entrylo);
#ifdef CONFIG_XPA
entrylo = (pte.pte_low & _PFNX_MASK);
writex_c0_entrylo0(entrylo);
writex_c0_entrylo1(entrylo);
#endif
tlbidx = read_c0_wired();
write_c0_wired(tlbidx + 1);
write_c0_index(tlbidx);
......
......@@ -333,9 +333,17 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
ptep = pte_offset_map(pmdp, address);
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
#ifdef CONFIG_XPA
write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
ptep++;
write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
#else
write_c0_entrylo0(ptep->pte_high);
ptep++;
write_c0_entrylo1(ptep->pte_high);
#endif
#else
write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
......@@ -355,6 +363,9 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask)
{
#ifdef CONFIG_XPA
panic("Broken for XPA kernels");
#else
unsigned long flags;
unsigned long wired;
unsigned long old_pagemask;
......@@ -383,6 +394,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
write_c0_pagemask(old_pagemask);
local_flush_tlb_all();
local_irq_restore(flags);
#endif
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
......
......@@ -35,6 +35,17 @@
#include <asm/uasm.h>
#include <asm/setup.h>
static int __cpuinitdata mips_xpa_disabled;
static int __init xpa_disable(char *s)
{
mips_xpa_disabled = 1;
return 1;
}
__setup("noxpa", xpa_disable);
/*
* TLB load/store/modify handlers.
*
......@@ -231,14 +242,14 @@ static void output_pgtable_bits_defines(void)
pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT);
#endif
#ifdef CONFIG_CPU_MIPSR2
if (cpu_has_rixi) {
#ifdef _PAGE_NO_EXEC_SHIFT
pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
#endif
#ifdef _PAGE_NO_READ_SHIFT
pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
#endif
}
#endif
pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
......@@ -501,26 +512,9 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
case tlb_indexed: tlbw = uasm_i_tlbwi; break;
}
if (cpu_has_mips_r2_exec_hazard) {
/*
* The architecture spec says an ehb is required here,
* but a number of cores do not have the hazard and
* using an ehb causes an expensive pipeline stall.
*/
switch (current_cpu_type()) {
case CPU_M14KC:
case CPU_74K:
case CPU_1074K:
case CPU_PROAPTIV:
case CPU_P5600:
case CPU_M5150:
case CPU_QEMU_GENERIC:
break;
default:
if (cpu_has_mips_r2_r6) {
if (cpu_has_mips_r2_exec_hazard)
uasm_i_ehb(p);
break;
}
tlbw(p);
return;
}
......@@ -1028,12 +1022,27 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
} else {
int pte_off_even = sizeof(pte_t) / 2;
int pte_off_odd = pte_off_even + sizeof(pte_t);
#ifdef CONFIG_XPA
const int scratch = 1; /* Our extra working register */
/* The pte entries are pre-shifted */
uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
uasm_i_addu(p, scratch, 0, ptep);
#endif
uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */
UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
UASM_i_MTC0(p, ptep, C0_ENTRYLO1);
#ifdef CONFIG_XPA
uasm_i_lw(p, tmp, 0, scratch);
uasm_i_lw(p, ptep, sizeof(pte_t), scratch);
uasm_i_lui(p, scratch, 0xff);
uasm_i_ori(p, scratch, scratch, 0xffff);
uasm_i_and(p, tmp, scratch, tmp);
uasm_i_and(p, ptep, scratch, ptep);
uasm_i_mthc0(p, tmp, C0_ENTRYLO0);
uasm_i_mthc0(p, ptep, C0_ENTRYLO1);
#endif
}
#else
UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
......@@ -1534,8 +1543,14 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
{
#ifdef CONFIG_PHYS_ADDR_T_64BIT
unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
#endif
if (!cpu_has_64bits) {
const int scratch = 1; /* Our extra working register */
uasm_i_lui(p, scratch, (mode >> 16));
uasm_i_or(p, pte, pte, scratch);
} else
#endif
uasm_i_ori(p, pte, pte, mode);
#ifdef CONFIG_SMP
# ifdef CONFIG_PHYS_ADDR_T_64BIT
......@@ -1599,15 +1614,17 @@ build_pte_present(u32 **p, struct uasm_reloc **r,
uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
uasm_i_nop(p);
} else {
uasm_i_andi(p, t, pte, _PAGE_PRESENT);
uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT);
uasm_i_andi(p, t, t, 1);
uasm_il_beqz(p, r, t, lid);
if (pte == t)
/* You lose the SMP race :-(*/
iPTE_LW(p, pte, ptr);
}
} else {
uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ);
uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ);
uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT);
uasm_i_andi(p, t, t, 3);
uasm_i_xori(p, t, t, 3);
uasm_il_bnez(p, r, t, lid);
if (pte == t)
/* You lose the SMP race :-(*/
......@@ -1636,8 +1653,9 @@ build_pte_writable(u32 **p, struct uasm_reloc **r,
{
int t = scratch >= 0 ? scratch : pte;
uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE);
uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE);
uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT);
uasm_i_andi(p, t, t, 5);
uasm_i_xori(p, t, t, 5);
uasm_il_bnez(p, r, t, lid);
if (pte == t)
/* You lose the SMP race :-(*/
......@@ -1673,7 +1691,8 @@ build_pte_modifiable(u32 **p, struct uasm_reloc **r,
uasm_i_nop(p);
} else {
int t = scratch >= 0 ? scratch : pte;
uasm_i_andi(p, t, pte, _PAGE_WRITE);
uasm_i_srl(p, t, pte, _PAGE_WRITE_SHIFT);
uasm_i_andi(p, t, t, 1);
uasm_il_beqz(p, r, t, lid);
if (pte == t)
/* You lose the SMP race :-(*/
......@@ -2286,6 +2305,11 @@ static void config_htw_params(void)
pwsize = ilog2(PTRS_PER_PGD) << MIPS_PWSIZE_GDW_SHIFT;
pwsize |= ilog2(PTRS_PER_PTE) << MIPS_PWSIZE_PTW_SHIFT;
/* If XPA has been enabled, PTEs are 64-bit in size. */
if (read_c0_pagegrain() & PG_ELPA)
pwsize |= 1;
write_c0_pwsize(pwsize);
/* Make sure everything is set before we enable the HTW */
......@@ -2299,6 +2323,28 @@ static void config_htw_params(void)
print_htw_config();
}
static void config_xpa_params(void)
{
#ifdef CONFIG_XPA
unsigned int pagegrain;
if (mips_xpa_disabled) {
pr_info("Extended Physical Addressing (XPA) disabled\n");
return;
}
pagegrain = read_c0_pagegrain();
write_c0_pagegrain(pagegrain | PG_ELPA);
back_to_back_c0_hazard();
pagegrain = read_c0_pagegrain();
if (pagegrain & PG_ELPA)
pr_info("Extended Physical Addressing (XPA) enabled\n");
else
panic("Extended Physical Addressing (XPA) disabled");
#endif
}
void build_tlb_refill_handler(void)
{
/*
......@@ -2363,8 +2409,9 @@ void build_tlb_refill_handler(void)
}
if (cpu_has_local_ebase)
build_r4000_tlb_refill_handler();
if (cpu_has_xpa)
config_xpa_params();
if (cpu_has_htw)
config_htw_params();
}
}
......@@ -54,6 +54,12 @@ fw_memblock_t * __init fw_getmdesc(int eva)
pr_warn("memsize not set in YAMON, set to default (32Mb)\n");
physical_memsize = 0x02000000;
} else {
if (memsize > (256 << 20)) { /* memsize should be capped to 256M */
pr_warn("Unsupported memsize value (0x%lx) detected! "
"Using 0x10000000 (256M) instead\n",
memsize);
memsize = 256 << 20;
}
/* If ememsize is set, then set physical_memsize to that */
physical_memsize = ememsize ? : memsize;
}
......
......@@ -203,6 +203,7 @@ static u8 read_phy_reg(u64 regbase, u32 addr, u32 physel)
static void config_sata_phy(u64 regbase)
{
u32 port, i, reg;
u8 val;
for (port = 0; port < 2; port++) {
for (i = 0, reg = RXCDRCALFOSC0; reg <= CALDUTY; reg++, i++)
......@@ -210,6 +211,18 @@ static void config_sata_phy(u64 regbase)
for (i = 0, reg = RXDPIF; reg <= PPMDRIFTMAX_HI; reg++, i++)
write_phy_reg(regbase, reg, port, sata_phy_config2[i]);
/* Fix for PHY link up failures at lower temperatures */
write_phy_reg(regbase, 0x800F, port, 0x1f);
val = read_phy_reg(regbase, 0x0029, port);
write_phy_reg(regbase, 0x0029, port, val | (0x7 << 1));
val = read_phy_reg(regbase, 0x0056, port);
write_phy_reg(regbase, 0x0056, port, val & ~(1 << 3));
val = read_phy_reg(regbase, 0x0018, port);
write_phy_reg(regbase, 0x0018, port, val & ~(0x7 << 0));
}
}
......
......@@ -43,7 +43,7 @@ obj-$(CONFIG_SIBYTE_BCM1x80) += pci-bcm1480.o pci-bcm1480ht.o
obj-$(CONFIG_SNI_RM) += fixup-sni.o ops-sni.o
obj-$(CONFIG_LANTIQ) += fixup-lantiq.o
obj-$(CONFIG_PCI_LANTIQ) += pci-lantiq.o ops-lantiq.o
obj-$(CONFIG_SOC_RT2880) += pci-rt2880.o
obj-$(CONFIG_SOC_RT288X) += pci-rt2880.o
obj-$(CONFIG_SOC_RT3883) += pci-rt3883.o
obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o
obj-$(CONFIG_TANBAC_TB0226) += fixup-tb0226.o
......
......@@ -214,6 +214,8 @@ const char *octeon_get_pci_interrupts(void)
return "AAABAAAAAAAAAAAAAAAAAAAAAAAAAAAA";
case CVMX_BOARD_TYPE_BBGW_REF:
return "AABCD";
case CVMX_BOARD_TYPE_CUST_DSR1000N:
return "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC";
case CVMX_BOARD_TYPE_THUNDER:
case CVMX_BOARD_TYPE_EBH3000:
default:
......@@ -271,9 +273,6 @@ static int octeon_read_config(struct pci_bus *bus, unsigned int devfn,
pci_addr.s.func = devfn & 0x7;
pci_addr.s.reg = reg;
#if PCI_CONFIG_SPACE_DELAY
udelay(PCI_CONFIG_SPACE_DELAY);
#endif
switch (size) {
case 4:
*val = le32_to_cpu(cvmx_read64_uint32(pci_addr.u64));
......@@ -308,9 +307,6 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
pci_addr.s.func = devfn & 0x7;
pci_addr.s.reg = reg;
#if PCI_CONFIG_SPACE_DELAY
udelay(PCI_CONFIG_SPACE_DELAY);
#endif
switch (size) {
case 4:
cvmx_write64_uint32(pci_addr.u64, cpu_to_le32(val));
......
......@@ -1762,14 +1762,6 @@ static int octeon_pcie_write_config(unsigned int pcie_port, struct pci_bus *bus,
default:
return PCIBIOS_FUNC_NOT_SUPPORTED;
}
#if PCI_CONFIG_SPACE_DELAY
/*
* Delay on writes so that devices have time to come up. Some
* bridges need this to allow time for the secondary busses to
* work
*/
udelay(PCI_CONFIG_SPACE_DELAY);
#endif
return PCIBIOS_SUCCESSFUL;
}
......
......@@ -7,6 +7,11 @@ config CLKEVT_RT3352
select CLKSRC_OF
select CLKSRC_MMIO
config RALINK_ILL_ACC
bool
depends on SOC_RT305X
default y
choice
prompt "Ralink SoC selection"
default SOC_RT305X
......
......@@ -130,6 +130,7 @@ config SSB_DRIVER_MIPS
bool "SSB Broadcom MIPS core driver"
depends on SSB && MIPS
select SSB_SERIAL
select SSB_SFLASH
help
Driver for the Sonics Silicon Backplane attached
Broadcom MIPS core.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment