Commit 511157ab authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/vdso: Move vdso datapage up front

Move the vdso datapage in front of the VDSO area,
before vdso test.

This will allow to remove the __kernel_datapage_offset symbol
and simplify __get_datapage() in following patches.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/b68c99b6e8ee0b1d99bfa4c7e34c359fc1bc1000.1601197618.git.christophe.leroy@csgroup.eu
parent c102f076
...@@ -262,7 +262,7 @@ extern void arch_exit_mmap(struct mm_struct *mm); ...@@ -262,7 +262,7 @@ extern void arch_exit_mmap(struct mm_struct *mm);
static inline void arch_unmap(struct mm_struct *mm, static inline void arch_unmap(struct mm_struct *mm,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
unsigned long vdso_base = (unsigned long)mm->context.vdso; unsigned long vdso_base = (unsigned long)mm->context.vdso - PAGE_SIZE;
if (start <= vdso_base && vdso_base < end) if (start <= vdso_base && vdso_base < end)
mm->context.vdso = NULL; mm->context.vdso = NULL;
......
...@@ -123,7 +123,7 @@ static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struc ...@@ -123,7 +123,7 @@ static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struc
if (new_size != text_size + PAGE_SIZE) if (new_size != text_size + PAGE_SIZE)
return -EINVAL; return -EINVAL;
current->mm->context.vdso = (void __user *)new_vma->vm_start; current->mm->context.vdso = (void __user *)new_vma->vm_start + PAGE_SIZE;
return 0; return 0;
} }
...@@ -198,7 +198,7 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int ...@@ -198,7 +198,7 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
* install_special_mapping or the perf counter mmap tracking code * install_special_mapping or the perf counter mmap tracking code
* will fail to recognise it as a vDSO. * will fail to recognise it as a vDSO.
*/ */
mm->context.vdso = (void __user *)vdso_base; mm->context.vdso = (void __user *)vdso_base + PAGE_SIZE;
/* /*
* our vma flags don't have VM_WRITE so by default, the process isn't * our vma flags don't have VM_WRITE so by default, the process isn't
...@@ -507,7 +507,7 @@ static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32, ...@@ -507,7 +507,7 @@ static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32,
return -1; return -1;
} }
*((int *)(vdso64_kbase + sym64->st_value - VDSO64_LBASE)) = *((int *)(vdso64_kbase + sym64->st_value - VDSO64_LBASE)) =
(vdso64_pages << PAGE_SHIFT) - -PAGE_SIZE -
(sym64->st_value - VDSO64_LBASE); (sym64->st_value - VDSO64_LBASE);
#endif /* CONFIG_PPC64 */ #endif /* CONFIG_PPC64 */
...@@ -519,7 +519,7 @@ static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32, ...@@ -519,7 +519,7 @@ static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32,
return -1; return -1;
} }
*((int *)(vdso32_kbase + (sym32->st_value - VDSO32_LBASE))) = *((int *)(vdso32_kbase + (sym32->st_value - VDSO32_LBASE))) =
(vdso32_pages << PAGE_SHIFT) - -PAGE_SIZE -
(sym32->st_value - VDSO32_LBASE); (sym32->st_value - VDSO32_LBASE);
#endif #endif
...@@ -693,10 +693,10 @@ static struct page ** __init vdso_setup_pages(void *start, void *end) ...@@ -693,10 +693,10 @@ static struct page ** __init vdso_setup_pages(void *start, void *end)
if (!pagelist) if (!pagelist)
panic("%s: Cannot allocate page list for VDSO", __func__); panic("%s: Cannot allocate page list for VDSO", __func__);
for (i = 0; i < pages; i++) pagelist[0] = virt_to_page(vdso_data);
pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
pagelist[i] = virt_to_page(vdso_data); for (i = 0; i < pages; i++)
pagelist[i + 1] = virt_to_page(start + i * PAGE_SIZE);
return pagelist; return pagelist;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment