Commit a70c6913 authored by Tejun Heo's avatar Tejun Heo

sparc64: implement page mapping percpu first chunk allocator

Implement page mapping percpu first chunk allocator as a fallback to
the embedding allocator.  The next patch will make the embedding
allocator check distances between units to determine whether it fits
within the vmalloc area so that this fallback can be used on such
cases.

sparc64 currently has relatively small vmalloc area which makes it
impossible to create any dynamic chunks on certain configurations
leading to percpu allocation failures.  This and the next patch should
allow those configurations to keep working until proper solution is
found.

While at it, mark pcpu_cpu_distance() with __init.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
parent fb59e72e
...@@ -102,6 +102,9 @@ config HAVE_SETUP_PER_CPU_AREA ...@@ -102,6 +102,9 @@ config HAVE_SETUP_PER_CPU_AREA
config NEED_PER_CPU_EMBED_FIRST_CHUNK config NEED_PER_CPU_EMBED_FIRST_CHUNK
def_bool y if SPARC64 def_bool y if SPARC64
config NEED_PER_CPU_PAGE_FIRST_CHUNK
def_bool y if SPARC64
config GENERIC_HARDIRQS_NO__DO_IRQ config GENERIC_HARDIRQS_NO__DO_IRQ
bool bool
def_bool y if SPARC64 def_bool y if SPARC64
......
...@@ -1420,7 +1420,7 @@ static void __init pcpu_free_bootmem(void *ptr, size_t size) ...@@ -1420,7 +1420,7 @@ static void __init pcpu_free_bootmem(void *ptr, size_t size)
free_bootmem(__pa(ptr), size); free_bootmem(__pa(ptr), size);
} }
static int pcpu_cpu_distance(unsigned int from, unsigned int to) static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
{ {
if (cpu_to_node(from) == cpu_to_node(to)) if (cpu_to_node(from) == cpu_to_node(to))
return LOCAL_DISTANCE; return LOCAL_DISTANCE;
...@@ -1428,18 +1428,53 @@ static int pcpu_cpu_distance(unsigned int from, unsigned int to) ...@@ -1428,18 +1428,53 @@ static int pcpu_cpu_distance(unsigned int from, unsigned int to)
return REMOTE_DISTANCE; return REMOTE_DISTANCE;
} }
static void __init pcpu_populate_pte(unsigned long addr)
{
pgd_t *pgd = pgd_offset_k(addr);
pud_t *pud;
pmd_t *pmd;
pud = pud_offset(pgd, addr);
if (pud_none(*pud)) {
pmd_t *new;
new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
pud_populate(&init_mm, pud, new);
}
pmd = pmd_offset(pud, addr);
if (!pmd_present(*pmd)) {
pte_t *new;
new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
pmd_populate_kernel(&init_mm, pmd, new);
}
}
void __init setup_per_cpu_areas(void) void __init setup_per_cpu_areas(void)
{ {
unsigned long delta; unsigned long delta;
unsigned int cpu; unsigned int cpu;
int rc; int rc = -EINVAL;
rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, if (pcpu_chosen_fc != PCPU_FC_PAGE) {
PERCPU_DYNAMIC_RESERVE, 4 << 20, rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
pcpu_cpu_distance, pcpu_alloc_bootmem, PERCPU_DYNAMIC_RESERVE, 4 << 20,
pcpu_free_bootmem); pcpu_cpu_distance,
if (rc) pcpu_alloc_bootmem,
panic("failed to initialize first chunk (%d)", rc); pcpu_free_bootmem);
if (rc)
pr_warning("PERCPU: %s allocator failed (%d), "
"falling back to page size\n",
pcpu_fc_names[pcpu_chosen_fc], rc);
}
if (rc < 0)
rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
pcpu_alloc_bootmem,
pcpu_free_bootmem,
pcpu_populate_pte);
if (rc < 0)
panic("cannot initialize percpu area (err=%d)", rc);
delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment