Commit 2440e271 authored by Russell King's avatar Russell King

[ARM] Clean up vector support code.

Rather than working out where the vector page is mapped, always
map the vector page at the high vectors location, and conditionally
handle the coherency issues with the low vector mapping if present.
parent c5c0f921
......@@ -1657,6 +1657,8 @@ __stubs_end:
ENTRY(__trap_init)
stmfd sp!, {r4 - r6, lr}
mov r0, #0xff000000
orr r0, r0, #0x00ff0000 @ high vectors position
adr r1, .LCvectors @ set up the vectors
ldmia r1, {r1, r2, r3, r4, r5, r6, ip, lr}
stmia r0, {r1, r2, r3, r4, r5, r6, ip, lr}
......
......@@ -46,32 +46,16 @@
#include <asm/system.h>
#include <asm/uaccess.h>
#define FIQ_VECTOR (vectors_base() + 0x1c)
static unsigned long no_fiq_insn;
static inline void unprotect_page_0(void)
{
modify_domain(DOMAIN_USER, DOMAIN_MANAGER);
}
static inline void protect_page_0(void)
{
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
}
/* Default reacquire function
* - we always relinquish FIQ control
* - we always reacquire FIQ control
*/
static int fiq_def_op(void *ref, int relinquish)
{
if (!relinquish) {
unprotect_page_0();
*(unsigned long *)FIQ_VECTOR = no_fiq_insn;
protect_page_0();
flush_icache_range(FIQ_VECTOR, FIQ_VECTOR + 4);
}
if (!relinquish)
set_fiq_handler(&no_fiq_insn, sizeof(no_fiq_insn));
return 0;
}
......@@ -93,12 +77,10 @@ int show_fiq_list(struct seq_file *p, void *v)
void set_fiq_handler(void *start, unsigned int length)
{
unprotect_page_0();
memcpy((void *)FIQ_VECTOR, start, length);
protect_page_0();
flush_icache_range(FIQ_VECTOR, FIQ_VECTOR + length);
memcpy((void *)0xffff001c, start, length);
flush_icache_range(0xffff001c, 0xffff001c + length);
if (!vectors_high())
flush_icache_range(0x1c, 0x1c + length);
}
/*
......@@ -198,6 +180,5 @@ EXPORT_SYMBOL(disable_fiq);
void __init init_FIQ(void)
{
no_fiq_insn = *(unsigned long *)FIQ_VECTOR;
set_fs(get_fs());
no_fiq_insn = *(unsigned long *)0xffff001c;
}
......@@ -51,6 +51,13 @@ asmlinkage int sys_pipe(unsigned long __user *fildes)
return error;
}
/*
* This is the lowest virtual address we can permit any user space
* mapping to be mapped at. This is particularly important for
* non-high vector CPUs.
*/
#define MIN_MAP_ADDR (vectors_high() ? 0 : PAGE_SIZE)
/* common code for old and new mmaps */
inline long do_mmap2(
unsigned long addr, unsigned long len,
......@@ -62,11 +69,7 @@ inline long do_mmap2(
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
/*
* If we are doing a fixed mapping, and address < PAGE_SIZE,
* then deny it.
*/
if (flags & MAP_FIXED && addr < PAGE_SIZE && vectors_base() == 0)
if (flags & MAP_FIXED && addr < MIN_MAP_ADDR)
goto out;
error = -EBADF;
......@@ -119,12 +122,7 @@ sys_arm_mremap(unsigned long addr, unsigned long old_len,
{
unsigned long ret = -EINVAL;
/*
* If we are doing a fixed mapping, and address < PAGE_SIZE,
* then deny it.
*/
if (flags & MREMAP_FIXED && new_addr < PAGE_SIZE &&
vectors_base() == 0)
if (flags & MREMAP_FIXED && new_addr < MIN_MAP_ADDR)
goto out;
down_write(&current->mm->mmap_sem);
......
......@@ -328,20 +328,11 @@ asmlinkage void do_unexp_fiq (struct pt_regs *regs)
*/
asmlinkage void bad_mode(struct pt_regs *regs, int reason, int proc_mode)
{
unsigned int vectors = vectors_base();
console_verbose();
printk(KERN_CRIT "Bad mode in %s handler detected: mode %s\n",
handler[reason], processor_modes[proc_mode]);
/*
* Dump out the vectors and stub routines. Maybe a better solution
* would be to dump them out only if we detect that they are corrupted.
*/
dump_mem(KERN_CRIT "Vectors: ", vectors, vectors + 0x40);
dump_mem(KERN_CRIT "Stubs: ", vectors + 0x200, vectors + 0x4b8);
die("Oops - bad mode", regs, 0);
local_irq_disable();
panic("bad mode");
......@@ -575,13 +566,9 @@ EXPORT_SYMBOL(abort);
void __init trap_init(void)
{
extern void __trap_init(unsigned long);
unsigned long base = vectors_base();
__trap_init(base);
flush_icache_range(base, base + PAGE_SIZE);
if (base != 0)
printk(KERN_DEBUG "Relocating machine vectors to 0x%08lx\n",
base);
extern void __trap_init(void);
__trap_init();
flush_icache_range(0xffff0000, 0xffff0000 + PAGE_SIZE);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
}
......@@ -158,7 +158,7 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
init_pgd = pgd_offset_k(0);
if (vectors_base() == 0) {
if (!vectors_high()) {
/*
* This lock is here just to satisfy pmd_alloc and pte_lock
*/
......@@ -317,12 +317,18 @@ static struct mem_types mem_types[] __initdata = {
.prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE,
.domain = DOMAIN_KERNEL,
},
[MT_VECTORS] = {
[MT_LOW_VECTORS] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_EXEC,
.prot_l1 = PMD_TYPE_TABLE,
.domain = DOMAIN_USER,
},
[MT_HIGH_VECTORS] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_USER | L_PTE_EXEC,
.prot_l1 = PMD_TYPE_TABLE,
.domain = DOMAIN_USER,
},
[MT_MEMORY] = {
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
......@@ -357,13 +363,12 @@ static void __init build_mem_type_table(void)
}
if (cpu_arch <= CPU_ARCH_ARMv5) {
mem_types[MT_DEVICE].prot_l1 |= PMD_BIT4;
mem_types[MT_DEVICE].prot_sect |= PMD_BIT4;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_BIT4;
mem_types[MT_MINICLEAN].prot_sect |= PMD_BIT4;
mem_types[MT_VECTORS].prot_l1 |= PMD_BIT4;
mem_types[MT_MEMORY].prot_sect |= PMD_BIT4;
mem_types[MT_ROM].prot_sect |= PMD_BIT4;
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
if (mem_types[i].prot_l1)
mem_types[i].prot_l1 |= PMD_BIT4;
if (mem_types[i].prot_sect)
mem_types[i].prot_sect |= PMD_BIT4;
}
}
/*
......@@ -387,13 +392,16 @@ static void __init build_mem_type_table(void)
cp = &cache_policies[cachepolicy];
if (cpu_arch >= CPU_ARCH_ARMv5) {
mem_types[MT_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE;
mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE;
mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE;
} else {
mem_types[MT_VECTORS].prot_pte |= cp->pte;
mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte;
mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte;
mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
}
mem_types[MT_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_ROM].prot_sect |= cp->pmd;
......@@ -420,6 +428,8 @@ static void __init build_mem_type_table(void)
ecc_mask ? "en" : "dis", cp->policy);
}
#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
/*
* Create the page directory entries and any necessary
* page tables for the mapping specified by `md'. We
......@@ -587,16 +597,22 @@ void __init memtable_init(struct meminfo *mi)
} while (address != 0);
/*
* Create a mapping for the machine vectors at virtual address 0
* or 0xffff0000. We should always try the high mapping.
* Create a mapping for the machine vectors at the high-vectors
* location (0xffff0000). If we aren't using high-vectors, also
* create a mapping at the low-vectors virtual address.
*/
init_maps->physical = virt_to_phys(init_maps);
init_maps->virtual = vectors_base();
init_maps->virtual = 0xffff0000;
init_maps->length = PAGE_SIZE;
init_maps->type = MT_VECTORS;
init_maps->type = MT_HIGH_VECTORS;
create_mapping(init_maps);
if (!vectors_high()) {
init_maps->virtual = 0;
init_maps->type = MT_LOW_VECTORS;
create_mapping(init_maps);
}
flush_cache_all();
flush_tlb_all();
}
......
......@@ -21,9 +21,10 @@ struct meminfo;
#define MT_DEVICE 0
#define MT_CACHECLEAN 1
#define MT_MINICLEAN 2
#define MT_VECTORS 3
#define MT_MEMORY 4
#define MT_ROM 5
#define MT_LOW_VECTORS 3
#define MT_HIGH_VECTORS 4
#define MT_MEMORY 5
#define MT_ROM 6
extern void create_memmap_holes(struct meminfo *);
extern void memtable_init(struct meminfo *);
......
......@@ -128,9 +128,9 @@ extern unsigned long cr_alignment; /* defined in entry-armv.S */
extern unsigned int user_debug;
#if __LINUX_ARM_ARCH__ >= 4
#define vectors_base() ((cr_alignment & CR_V) ? 0xffff0000 : 0)
#define vectors_high() (cr_alignment & CR_V)
#else
#define vectors_base() (0)
#define vectors_high() (0)
#endif
#define mb() __asm__ __volatile__ ("" : : : "memory")
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment