Commit 5056c2c5 authored by Heiko Carstens's avatar Heiko Carstens Committed by Vasily Gorbik

s390/vdso: put vdso datapage in a separate vma

Add a separate "[vvar]" mapping for the vdso datapage, since it
doesn't need to be executable or COW-able.

This is actually the s390 implementation of commit 87154938
("arm64: vdso: put vdso datapage in a separate vma")
Reviewed-by: default avatarAlexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: default avatarHeiko Carstens <hca@linux.ibm.com>
Signed-off-by: default avatarVasily Gorbik <gor@linux.ibm.com>
parent dfc11c98
......@@ -40,6 +40,14 @@ static int __init vdso_setup(char *str)
}
__setup("vdso=", vdso_setup);
static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
if (vmf->pgoff == 0)
return vmf_insert_pfn(vma, vmf->address, virt_to_pfn(vdso_data));
return VM_FAULT_SIGBUS;
}
static int vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *vma)
{
......@@ -47,6 +55,11 @@ static int vdso_mremap(const struct vm_special_mapping *sm,
return 0;
}
static struct vm_special_mapping vvar_mapping = {
.name = "[vvar]",
.fault = vvar_fault,
};
static struct vm_special_mapping vdso_mapping = {
.name = "[vdso]",
.mremap = vdso_mremap,
......@@ -61,38 +74,41 @@ early_initcall(vdso_getcpu_init); /* Must be called before SMP init */
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
unsigned long addr, vdso_text_start, vdso_text_len, vdso_mapping_len;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long vdso_base;
int rc;
if (!vdso_enabled || is_compat_task())
return 0;
if (mmap_write_lock_killable(mm))
return -EINTR;
vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
rc = vdso_base;
if (IS_ERR_VALUE(vdso_base))
vdso_text_len = vdso_pages << PAGE_SHIFT;
vdso_mapping_len = vdso_text_len + PAGE_SIZE;
addr = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
rc = addr;
if (IS_ERR_VALUE(addr))
goto out;
/*
* our vma flags don't have VM_WRITE so by default, the process
* isn't allowed to write those pages.
* gdb can break that with ptrace interface, and thus trigger COW
* on those pages but it's then your responsibility to never do that
* on the "data" page of the vDSO or you'll stop getting kernel
* updates and your nice userland gettimeofday will be totally dead.
* It's fine to use that for setting breakpoints in the vDSO code
* pages though.
*/
vma = _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
/* VM_MAYWRITE for COW so gdb can set breakpoints */
vdso_text_start = addr;
vma = _install_special_mapping(mm, addr, vdso_text_len,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
&vdso_mapping);
rc = PTR_ERR(vma);
if (IS_ERR(vma))
goto out;
current->mm->context.vdso_base = vdso_base;
rc = 0;
addr += vdso_text_len;
vma = _install_special_mapping(mm, addr, PAGE_SIZE,
VM_READ|VM_MAYREAD|VM_PFNMAP,
&vvar_mapping);
if (IS_ERR(vma)) {
do_munmap(mm, vdso_text_start, vdso_text_len, NULL);
rc = PTR_ERR(vma);
} else {
current->mm->context.vdso_base = vdso_text_start;
rc = 0;
}
out:
mmap_write_unlock(mm);
return rc;
......@@ -103,15 +119,14 @@ static int __init vdso_init(void)
struct page **pages;
int i;
vdso_pages = ((vdso64_end - vdso64_start) >> PAGE_SHIFT) + 1;
vdso_pages = (vdso64_end - vdso64_start) >> PAGE_SHIFT;
pages = kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
vdso_enabled = 0;
return -ENOMEM;
}
for (i = 0; i < vdso_pages - 1; i++)
for (i = 0; i < vdso_pages; i++)
pages[i] = virt_to_page(vdso64_start + i * PAGE_SIZE);
pages[vdso_pages - 1] = virt_to_page(vdso_data);
pages[vdso_pages] = NULL;
vdso_mapping.pages = pages;
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment