Commit 67060d9c authored by Heiko Carstens's avatar Heiko Carstens Committed by Martin Schwidefsky

[S390] Fix section mismatch warnings.

This fixes the last remaining section mismatch warnings in s390
architecture code. It reveals also a real bug introduced by... me
with git commit 2069e978
("[S390] sparsemem vmemmap: initialize memmap.")

Calling the generic vmemmap_alloc_block() function to get initialized
memory is a nice idea, however that function is __meminit annotated
and therefore the function might be gone if we try to call it later.
This can happen if a DCSS segment gets added.

So basically revert the patch and clear the memmap explicitly to fix
the original bug.
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent bebd9a45
...@@ -1089,7 +1089,7 @@ static int __devinit smp_add_present_cpu(int cpu) ...@@ -1089,7 +1089,7 @@ static int __devinit smp_add_present_cpu(int cpu)
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
int smp_rescan_cpus(void) int __ref smp_rescan_cpus(void)
{ {
cpumask_t newcpus; cpumask_t newcpus;
int cpu; int cpu;
......
...@@ -27,12 +27,19 @@ struct memory_segment { ...@@ -27,12 +27,19 @@ struct memory_segment {
static LIST_HEAD(mem_segs); static LIST_HEAD(mem_segs);
static pud_t *vmem_pud_alloc(void) static void __ref *vmem_alloc_pages(unsigned int order)
{
if (slab_is_available())
return (void *)__get_free_pages(GFP_KERNEL, order);
return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
}
static inline pud_t *vmem_pud_alloc(void)
{ {
pud_t *pud = NULL; pud_t *pud = NULL;
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
pud = vmemmap_alloc_block(PAGE_SIZE * 4, 0); pud = vmem_alloc_pages(2);
if (!pud) if (!pud)
return NULL; return NULL;
clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
...@@ -40,12 +47,12 @@ static pud_t *vmem_pud_alloc(void) ...@@ -40,12 +47,12 @@ static pud_t *vmem_pud_alloc(void)
return pud; return pud;
} }
static pmd_t *vmem_pmd_alloc(void) static inline pmd_t *vmem_pmd_alloc(void)
{ {
pmd_t *pmd = NULL; pmd_t *pmd = NULL;
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
pmd = vmemmap_alloc_block(PAGE_SIZE * 4, 0); pmd = vmem_alloc_pages(2);
if (!pmd) if (!pmd)
return NULL; return NULL;
clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
...@@ -207,13 +214,14 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) ...@@ -207,13 +214,14 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
if (pte_none(*pt_dir)) { if (pte_none(*pt_dir)) {
unsigned long new_page; unsigned long new_page;
new_page =__pa(vmemmap_alloc_block(PAGE_SIZE, 0)); new_page =__pa(vmem_alloc_pages(0));
if (!new_page) if (!new_page)
goto out; goto out;
pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
*pt_dir = pte; *pt_dir = pte;
} }
} }
memset(start, 0, nr * sizeof(struct page));
ret = 0; ret = 0;
out: out:
flush_tlb_kernel_range(start_addr, end_addr); flush_tlb_kernel_range(start_addr, end_addr);
......
...@@ -40,7 +40,7 @@ static void sclp_cpu_capability_notify(struct work_struct *work) ...@@ -40,7 +40,7 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
put_online_cpus(); put_online_cpus();
} }
static void sclp_cpu_change_notify(struct work_struct *work) static void __ref sclp_cpu_change_notify(struct work_struct *work)
{ {
smp_rescan_cpus(); smp_rescan_cpus();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment