Commit f657af0f authored by Russell King's avatar Russell King

[ARM] Provide more early command line parsing.

We need to parse the command line arguments not only for the memory
parameters, but also CPU cache policies.  Rather than extending the
early parsing in arch/arm/kernel/setup.c, we make this a generic
feature.  The parameters and their parsing function can now be
placed along side the code which makes use of the parsed information.
parent 308e8f7a
......@@ -13,7 +13,7 @@
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/utsname.h>
#include <linux/initrd.h>
#include <linux/blk.h>
#include <linux/console.h>
#include <linux/bootmem.h>
#include <linux/seq_file.h>
......@@ -323,58 +323,77 @@ static struct machine_desc * __init setup_machine(unsigned int nr)
return list;
}
static void __init early_initrd(char **p)
{
unsigned long start, size;
start = memparse(*p, p);
if (**p == ',') {
size = memparse((*p) + 1, p);
phys_initrd_start = start;
phys_initrd_size = size;
}
}
__early_param("initrd=", early_initrd);
/*
* Initial parsing of the command line. We need to pick out the
* memory size. We look for mem=size@start, where start and size
* are "size[KkMm]"
* Pick out the memory size. We look for mem=size@start,
* where start and size are "size[KkMm]"
*/
static void __init
parse_cmdline(struct meminfo *mi, char **cmdline_p, char *from)
static void __init early_mem(char **p)
{
static int usermem __initdata = 0;
unsigned long size, start;
/*
* If the user specifies memory size, we
* blow away any automatically generated
* size.
*/
if (usermem == 0) {
usermem = 1;
meminfo.nr_banks = 0;
}
start = PHYS_OFFSET;
size = memparse(*p, p);
if (**p == '@')
start = memparse(*p + 1, p);
meminfo.bank[meminfo.nr_banks].start = start;
meminfo.bank[meminfo.nr_banks].size = size;
meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(start);
meminfo.nr_banks += 1;
}
__early_param("mem=", early_mem);
/*
* Initial parsing of the command line.
*/
static void __init parse_cmdline(char **cmdline_p, char *from)
{
char c = ' ', *to = command_line;
int usermem = 0, len = 0;
int len = 0;
for (;;) {
if (c == ' ' && !memcmp(from, "mem=", 4)) {
unsigned long size, start;
if (to != command_line)
to -= 1;
/*
* If the user specifies memory size, we
* blow away any automatically generated
* size.
*/
if (usermem == 0) {
usermem = 1;
mi->nr_banks = 0;
}
start = PHYS_OFFSET;
size = memparse(from + 4, &from);
if (*from == '@')
start = memparse(from + 1, &from);
mi->bank[mi->nr_banks].start = start;
mi->bank[mi->nr_banks].size = size;
mi->bank[mi->nr_banks].node = PHYS_TO_NID(start);
mi->nr_banks += 1;
} else if (c == ' ' && !memcmp(from, "initrd=", 7)) {
unsigned long start, size;
/*
* Remove space character
*/
if (to != command_line)
to -= 1;
start = memparse(from + 7, &from);
if (*from == ',') {
size = memparse(from + 1, &from);
phys_initrd_start = start;
phys_initrd_size = size;
if (c == ' ') {
extern struct early_params __early_begin, __early_end;
struct early_params *p;
for (p = &__early_begin; p < &__early_end; p++) {
int len = strlen(p->arg);
if (memcmp(from, p->arg, len) == 0) {
if (to != command_line)
to -= 1;
from += len;
p->fn(&from);
while (*from != ' ' && *from != '\0')
from++;
break;
}
}
}
c = *from++;
......@@ -536,6 +555,8 @@ __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
static int __init parse_tag_initrd(const struct tag *tag)
{
printk(KERN_WARNING "ATAG_INITRD is deprecated; "
"please update your bootloader.\n");
phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
phys_initrd_size = tag->u.initrd.size;
return 0;
......@@ -668,7 +689,7 @@ void __init setup_arch(char **cmdline_p)
memcpy(saved_command_line, from, COMMAND_LINE_SIZE);
saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
parse_cmdline(&meminfo, cmdline_p, from);
parse_cmdline(cmdline_p, from);
bootmem_init(&meminfo);
paging_init(&meminfo, mdesc);
request_standard_resources(&meminfo, mdesc);
......
......@@ -24,30 +24,82 @@
#include <asm/mach/map.h>
static unsigned int cachepolicy __initdata = PMD_SECT_WB;
static unsigned int ecc_mask __initdata = 0;
struct cachepolicy {
char *policy;
unsigned int cr_mask;
unsigned int pmd;
};
static struct cachepolicy cache_policies[] __initdata = {
{ "uncached", CR1_W|CR1_C, PMD_SECT_UNCACHED },
{ "buffered", CR1_C, PMD_SECT_BUFFERED },
{ "writethrough", 0, PMD_SECT_WT },
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
{ "writeback", 0, PMD_SECT_WB },
{ "writealloc", 0, PMD_SECT_WBWA }
#endif
};
/*
* These are useful for identifing cache coherency
* problems by allowing the cache or the cache and
* writebuffer to be turned off. (Note: the write
* buffer should not be on and the cache off).
*/
static int __init nocache_setup(char *__unused)
static void __init early_cachepolicy(char **p)
{
cr_alignment &= ~CR1_C;
cr_no_alignment &= ~CR1_C;
int i;
for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
int len = strlen(cache_policies[i].policy);
if (memcmp(*p, cache_policies[i].policy, len) == 0) {
cachepolicy = cache_policies[i].pmd;
cr_alignment &= ~cache_policies[i].cr_mask;
cr_no_alignment &= ~cache_policies[i].cr_mask;
*p += len;
break;
}
}
if (i == ARRAY_SIZE(cache_policies))
printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
flush_cache_all();
set_cr(cr_alignment);
return 1;
}
static int __init nowrite_setup(char *__unused)
static void __init early_nocache(char **__unused)
{
cr_alignment &= ~(CR1_W|CR1_C);
cr_no_alignment &= ~(CR1_W|CR1_C);
flush_cache_all();
set_cr(cr_alignment);
return 1;
char *p = "buffered";
printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
early_cachepolicy(&p);
}
static void __init early_nowrite(char **__unused)
{
char *p = "uncached";
printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
early_cachepolicy(&p);
}
static void __init early_ecc(char **p)
{
if (memcmp(*p, "on", 2) == 0) {
ecc_mask = PMD_PROTECTION;
*p += 2;
} else if (memcmp(*p, "off", 3) == 0) {
ecc_mask = 0;
*p += 3;
}
}
__early_param("nocache", early_nocache);
__early_param("nowb", early_nowrite);
__early_param("cachepolicy=", early_cachepolicy);
__early_param("ecc=", early_ecc);
static int __init noalign_setup(char *__unused)
{
cr_alignment &= ~CR1_A;
......@@ -57,8 +109,6 @@ static int __init noalign_setup(char *__unused)
}
__setup("noalign", noalign_setup);
__setup("nocache", nocache_setup);
__setup("nowb", nowrite_setup);
#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
......@@ -231,32 +281,20 @@ static struct mem_types mem_types[] __initdata = {
.domain = DOMAIN_IO,
},
[MT_CACHECLEAN] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_CACHEABLE | L_PTE_BUFFERABLE,
.prot_l1 = PMD_TYPE_TABLE | PMD_BIT4,
.prot_sect = PMD_TYPE_SECT | PMD_BIT4,
.domain = DOMAIN_KERNEL,
},
[MT_MINICLEAN] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_CACHEABLE,
.prot_l1 = PMD_TYPE_TABLE | PMD_BIT4,
.prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_MINICACHE,
.domain = DOMAIN_KERNEL,
},
[MT_VECTORS] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_CACHEABLE | L_PTE_BUFFERABLE |
L_PTE_EXEC,
.prot_l1 = PMD_TYPE_TABLE | PMD_BIT4,
.prot_sect = PMD_TYPE_SECT | PMD_BIT4,
.domain = DOMAIN_USER,
},
[MT_MEMORY] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_CACHEABLE | L_PTE_BUFFERABLE |
L_PTE_EXEC | L_PTE_WRITE,
.prot_l1 = PMD_TYPE_TABLE | PMD_BIT4,
.prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
}
......@@ -268,37 +306,50 @@ static struct mem_types mem_types[] __initdata = {
static void __init build_mem_type_table(void)
{
int cpu_arch = cpu_architecture();
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
int writethrough = 1;
#else
int writethrough = 0;
#endif
int writealloc = 0, ecc = 0;
const char *policy;
if (cpu_arch < CPU_ARCH_ARMv5) {
writealloc = 0;
ecc = 0;
/*
* ARMv5 can use ECC memory.
*/
if (cpu_arch == CPU_ARCH_ARMv5) {
mem_types[MT_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_MEMORY].prot_sect |= ecc_mask;
} else {
mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
if (cachepolicy == PMD_SECT_WBWA)
cachepolicy = PMD_SECT_WB;
ecc_mask = 0;
}
if (writethrough) {
mem_types[MT_MEMORY].prot_sect |= cachepolicy;
switch (cachepolicy) {
default:
case PMD_SECT_UNCACHED:
policy = "uncached";
break;
case PMD_SECT_BUFFERED:
mem_types[MT_VECTORS].prot_pte |= PTE_BUFFERABLE;
policy = "buffered";
break;
case PMD_SECT_WT:
mem_types[MT_VECTORS].prot_pte |= PTE_BUFFERABLE|PTE_CACHEABLE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
mem_types[MT_VECTORS].prot_sect |= PMD_SECT_WT;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_WT;
} else {
policy = "write through";
break;
case PMD_SECT_WB:
mem_types[MT_VECTORS].prot_pte |= PTE_BUFFERABLE|PTE_CACHEABLE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
mem_types[MT_VECTORS].prot_sect |= PMD_SECT_WB;
if (writealloc)
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_WBWA;
else
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_WB;
}
if (ecc) {
mem_types[MT_VECTORS].prot_sect |= PMD_PROTECTION;
mem_types[MT_MEMORY].prot_sect |= PMD_PROTECTION;
policy = "write back";
break;
case PMD_SECT_WBWA:
mem_types[MT_VECTORS].prot_pte |= PTE_BUFFERABLE|PTE_CACHEABLE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
policy = "write back, write allocate";
break;
}
printk("Memory policy: ECC %sabled, Data cache %s\n",
ecc_mask ? "en" : "dis", policy);
}
/*
......@@ -330,6 +381,14 @@ static void __init create_mapping(struct map_desc *md)
off = md->physical - virt;
length = md->length;
if (mem_types[md->type].prot_l1 == 0 &&
(virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) {
printk(KERN_WARNING "MM: map for 0x%08lx at 0x%08lx can not "
"be mapped using pages, ignoring.\n",
md->physical, md->virtual);
return;
}
while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) {
alloc_init_page(virt, virt + off, prot_l1, prot_pte);
......
......@@ -35,6 +35,9 @@ SECTIONS
__setup_start = .;
*(.init.setup)
__setup_end = .;
__early_begin = .;
*(__early_param)
__early_end = .;
__start___param = .;
*(__param)
__stop___param = .;
......
......@@ -202,4 +202,17 @@ struct meminfo {
extern struct meminfo meminfo;
/*
* Early command line parameters.
*/
struct early_params {
const char *arg;
void (*fn)(char **p);
};
#define __early_param(name,fn) \
static struct early_params __early_##fn \
__attribute__((section("__early_param"), unused)) = \
{ name, fn }
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment