Commit adb0b25f authored by James Hogan's avatar James Hogan

Merge MIPS prerequisites

Merge in MIPS prerequisites from GVA page tables and GPA page tables
series. The same branch can also merge into the MIPS tree.
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
parents 5a6da5f7 7170bdc7
...@@ -43,21 +43,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) ...@@ -43,21 +43,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
* Initialize a new pgd / pmd table with invalid pointers. * Initialize a new pgd / pmd table with invalid pointers.
*/ */
extern void pgd_init(unsigned long page); extern void pgd_init(unsigned long page);
extern pgd_t *pgd_alloc(struct mm_struct *mm);
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *ret, *init;
ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
if (ret) {
init = pgd_offset(&init_mm, 0UL);
pgd_init((unsigned long)ret);
memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
}
return ret;
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{ {
......
...@@ -147,49 +147,64 @@ static inline void flush_scache_line(unsigned long addr) ...@@ -147,49 +147,64 @@ static inline void flush_scache_line(unsigned long addr)
} }
#define protected_cache_op(op,addr) \ #define protected_cache_op(op,addr) \
({ \
int __err = 0; \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \ " .set push \n" \
" .set noreorder \n" \ " .set noreorder \n" \
" .set "MIPS_ISA_ARCH_LEVEL" \n" \ " .set "MIPS_ISA_ARCH_LEVEL" \n" \
"1: cache %0, (%1) \n" \ "1: cache %1, (%2) \n" \
"2: .set pop \n" \ "2: .set pop \n" \
" .section .fixup,\"ax\" \n" \
"3: li %0, %3 \n" \
" j 2b \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \ " .section __ex_table,\"a\" \n" \
" "STR(PTR)" 1b, 2b \n" \ " "STR(PTR)" 1b, 3b \n" \
" .previous" \ " .previous" \
: \ : "+r" (__err) \
: "i" (op), "r" (addr)) : "i" (op), "r" (addr), "i" (-EFAULT)); \
__err; \
})
#define protected_cachee_op(op,addr) \ #define protected_cachee_op(op,addr) \
({ \
int __err = 0; \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \ " .set push \n" \
" .set noreorder \n" \ " .set noreorder \n" \
" .set mips0 \n" \ " .set mips0 \n" \
" .set eva \n" \ " .set eva \n" \
"1: cachee %0, (%1) \n" \ "1: cachee %1, (%2) \n" \
"2: .set pop \n" \ "2: .set pop \n" \
" .section .fixup,\"ax\" \n" \
"3: li %0, %3 \n" \
" j 2b \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \ " .section __ex_table,\"a\" \n" \
" "STR(PTR)" 1b, 2b \n" \ " "STR(PTR)" 1b, 3b \n" \
" .previous" \ " .previous" \
: \ : "+r" (__err) \
: "i" (op), "r" (addr)) : "i" (op), "r" (addr), "i" (-EFAULT)); \
__err; \
})
/* /*
* The next two are for badland addresses like signal trampolines. * The next two are for badland addresses like signal trampolines.
*/ */
static inline void protected_flush_icache_line(unsigned long addr) static inline int protected_flush_icache_line(unsigned long addr)
{ {
switch (boot_cpu_type()) { switch (boot_cpu_type()) {
case CPU_LOONGSON2: case CPU_LOONGSON2:
protected_cache_op(Hit_Invalidate_I_Loongson2, addr); return protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
break;
default: default:
#ifdef CONFIG_EVA #ifdef CONFIG_EVA
protected_cachee_op(Hit_Invalidate_I, addr); return protected_cachee_op(Hit_Invalidate_I, addr);
#else #else
protected_cache_op(Hit_Invalidate_I, addr); return protected_cache_op(Hit_Invalidate_I, addr);
#endif #endif
break;
} }
} }
...@@ -199,21 +214,21 @@ static inline void protected_flush_icache_line(unsigned long addr) ...@@ -199,21 +214,21 @@ static inline void protected_flush_icache_line(unsigned long addr)
* caches. We're talking about one cacheline unnecessarily getting invalidated * caches. We're talking about one cacheline unnecessarily getting invalidated
* here so the penalty isn't overly hard. * here so the penalty isn't overly hard.
*/ */
static inline void protected_writeback_dcache_line(unsigned long addr) static inline int protected_writeback_dcache_line(unsigned long addr)
{ {
#ifdef CONFIG_EVA #ifdef CONFIG_EVA
protected_cachee_op(Hit_Writeback_Inv_D, addr); return protected_cachee_op(Hit_Writeback_Inv_D, addr);
#else #else
protected_cache_op(Hit_Writeback_Inv_D, addr); return protected_cache_op(Hit_Writeback_Inv_D, addr);
#endif #endif
} }
static inline void protected_writeback_scache_line(unsigned long addr) static inline int protected_writeback_scache_line(unsigned long addr)
{ {
#ifdef CONFIG_EVA #ifdef CONFIG_EVA
protected_cachee_op(Hit_Writeback_Inv_SD, addr); return protected_cachee_op(Hit_Writeback_Inv_SD, addr);
#else #else
protected_cache_op(Hit_Writeback_Inv_SD, addr); return protected_cache_op(Hit_Writeback_Inv_SD, addr);
#endif #endif
} }
......
#ifndef __ASM_TLBEX_H
#define __ASM_TLBEX_H
#include <asm/uasm.h>
/*
* Write random or indexed TLB entry, and care about the hazards from
* the preceding mtc0 and for the following eret.
*/
enum tlb_write_entry {
tlb_random,
tlb_indexed
};
extern int pgd_reg;
void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
unsigned int tmp, unsigned int ptr);
void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr);
void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr);
void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep);
void build_tlb_write_entry(u32 **p, struct uasm_label **l,
struct uasm_reloc **r,
enum tlb_write_entry wmode);
#endif /* __ASM_TLBEX_H */
...@@ -9,6 +9,9 @@ ...@@ -9,6 +9,9 @@
* Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
*/ */
#ifndef __ASM_UASM_H
#define __ASM_UASM_H
#include <linux/types.h> #include <linux/types.h>
#ifdef CONFIG_EXPORT_UASM #ifdef CONFIG_EXPORT_UASM
...@@ -309,3 +312,5 @@ void uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); ...@@ -309,3 +312,5 @@ void uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
void uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1, void uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
unsigned int reg2, int lid); unsigned int reg2, int lid);
void uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); void uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
#endif /* __ASM_UASM_H */
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
obj-y += cache.o dma-default.o extable.o fault.o \ obj-y += cache.o dma-default.o extable.o fault.o \
gup.o init.o mmap.o page.o page-funcs.o \ gup.o init.o mmap.o page.o page-funcs.o \
tlbex.o tlbex-fault.o tlb-funcs.o pgtable.o tlbex.o tlbex-fault.o tlb-funcs.o
ifdef CONFIG_CPU_MICROMIPS ifdef CONFIG_CPU_MICROMIPS
obj-y += uasm-micromips.o obj-y += uasm-micromips.o
......
...@@ -538,5 +538,6 @@ unsigned long pgd_current[NR_CPUS]; ...@@ -538,5 +538,6 @@ unsigned long pgd_current[NR_CPUS];
pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir); pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
#ifndef __PAGETABLE_PMD_FOLDED #ifndef __PAGETABLE_PMD_FOLDED
pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss; pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
EXPORT_SYMBOL_GPL(invalid_pmd_table);
#endif #endif
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
* Copyright (C) 1999, 2000 by Silicon Graphics * Copyright (C) 1999, 2000 by Silicon Graphics
* Copyright (C) 2003 by Ralf Baechle * Copyright (C) 2003 by Ralf Baechle
*/ */
#include <linux/export.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
...@@ -60,6 +61,7 @@ void pmd_init(unsigned long addr, unsigned long pagetable) ...@@ -60,6 +61,7 @@ void pmd_init(unsigned long addr, unsigned long pagetable)
p[-1] = pagetable; p[-1] = pagetable;
} while (p != end); } while (p != end);
} }
EXPORT_SYMBOL_GPL(pmd_init);
#endif #endif
pmd_t mk_pmd(struct page *page, pgprot_t prot) pmd_t mk_pmd(struct page *page, pgprot_t prot)
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <asm/pgalloc.h>
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *ret, *init;
ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
if (ret) {
init = pgd_offset(&init_mm, 0UL);
pgd_init((unsigned long)ret);
memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
}
return ret;
}
EXPORT_SYMBOL_GPL(pgd_alloc);
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
*/ */
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/export.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/smp.h> #include <linux/smp.h>
...@@ -34,6 +35,7 @@ ...@@ -34,6 +35,7 @@
#include <asm/war.h> #include <asm/war.h>
#include <asm/uasm.h> #include <asm/uasm.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/tlbex.h>
static int mips_xpa_disabled; static int mips_xpa_disabled;
...@@ -344,7 +346,8 @@ static int allocate_kscratch(void) ...@@ -344,7 +346,8 @@ static int allocate_kscratch(void)
} }
static int scratch_reg; static int scratch_reg;
static int pgd_reg; int pgd_reg;
EXPORT_SYMBOL_GPL(pgd_reg);
enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch}; enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
static struct work_registers build_get_work_registers(u32 **p) static struct work_registers build_get_work_registers(u32 **p)
...@@ -496,13 +499,7 @@ static void __maybe_unused build_tlb_probe_entry(u32 **p) ...@@ -496,13 +499,7 @@ static void __maybe_unused build_tlb_probe_entry(u32 **p)
} }
} }
/* void build_tlb_write_entry(u32 **p, struct uasm_label **l,
* Write random or indexed TLB entry, and care about the hazards from
* the preceding mtc0 and for the following eret.
*/
enum tlb_write_entry { tlb_random, tlb_indexed };
static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
struct uasm_reloc **r, struct uasm_reloc **r,
enum tlb_write_entry wmode) enum tlb_write_entry wmode)
{ {
...@@ -627,6 +624,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l, ...@@ -627,6 +624,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
break; break;
} }
} }
EXPORT_SYMBOL_GPL(build_tlb_write_entry);
static __maybe_unused void build_convert_pte_to_entrylo(u32 **p, static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
unsigned int reg) unsigned int reg)
...@@ -781,8 +779,7 @@ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r, ...@@ -781,8 +779,7 @@ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
* TMP and PTR are scratch. * TMP and PTR are scratch.
* TMP will be clobbered, PTR will hold the pmd entry. * TMP will be clobbered, PTR will hold the pmd entry.
*/ */
static void void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
unsigned int tmp, unsigned int ptr) unsigned int tmp, unsigned int ptr)
{ {
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
...@@ -859,6 +856,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, ...@@ -859,6 +856,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
#endif #endif
} }
EXPORT_SYMBOL_GPL(build_get_pmde64);
/* /*
* BVADDR is the faulting address, PTR is scratch. * BVADDR is the faulting address, PTR is scratch.
...@@ -934,8 +932,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, ...@@ -934,8 +932,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
* TMP and PTR are scratch. * TMP and PTR are scratch.
* TMP will be clobbered, PTR will hold the pgd entry. * TMP will be clobbered, PTR will hold the pgd entry.
*/ */
static void __maybe_unused void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
{ {
if (pgd_reg != -1) { if (pgd_reg != -1) {
/* pgd is in pgd_reg */ /* pgd is in pgd_reg */
...@@ -960,6 +957,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) ...@@ -960,6 +957,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
uasm_i_sll(p, tmp, tmp, PGD_T_LOG2); uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */ uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
} }
EXPORT_SYMBOL_GPL(build_get_pgde32);
#endif /* !CONFIG_64BIT */ #endif /* !CONFIG_64BIT */
...@@ -989,7 +987,7 @@ static void build_adjust_context(u32 **p, unsigned int ctx) ...@@ -989,7 +987,7 @@ static void build_adjust_context(u32 **p, unsigned int ctx)
uasm_i_andi(p, ctx, ctx, mask); uasm_i_andi(p, ctx, ctx, mask);
} }
static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
{ {
/* /*
* Bug workaround for the Nevada. It seems as if under certain * Bug workaround for the Nevada. It seems as if under certain
...@@ -1013,8 +1011,9 @@ static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) ...@@ -1013,8 +1011,9 @@ static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
build_adjust_context(p, tmp); build_adjust_context(p, tmp);
UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
} }
EXPORT_SYMBOL_GPL(build_get_ptep);
static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
{ {
int pte_off_even = 0; int pte_off_even = 0;
int pte_off_odd = sizeof(pte_t); int pte_off_odd = sizeof(pte_t);
...@@ -1063,6 +1062,7 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) ...@@ -1063,6 +1062,7 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
UASM_i_MTC0(p, 0, C0_ENTRYLO1); UASM_i_MTC0(p, 0, C0_ENTRYLO1);
UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
} }
EXPORT_SYMBOL_GPL(build_update_entries);
struct mips_huge_tlb_info { struct mips_huge_tlb_info {
int huge_pte; int huge_pte;
...@@ -1536,7 +1536,9 @@ static void build_loongson3_tlb_refill_handler(void) ...@@ -1536,7 +1536,9 @@ static void build_loongson3_tlb_refill_handler(void)
extern u32 handle_tlbl[], handle_tlbl_end[]; extern u32 handle_tlbl[], handle_tlbl_end[];
extern u32 handle_tlbs[], handle_tlbs_end[]; extern u32 handle_tlbs[], handle_tlbs_end[];
extern u32 handle_tlbm[], handle_tlbm_end[]; extern u32 handle_tlbm[], handle_tlbm_end[];
extern u32 tlbmiss_handler_setup_pgd_start[], tlbmiss_handler_setup_pgd[]; extern u32 tlbmiss_handler_setup_pgd_start[];
extern u32 tlbmiss_handler_setup_pgd[];
EXPORT_SYMBOL_GPL(tlbmiss_handler_setup_pgd);
extern u32 tlbmiss_handler_setup_pgd_end[]; extern u32 tlbmiss_handler_setup_pgd_end[];
static void build_setup_pgd(void) static void build_setup_pgd(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment