Commit af0d95af authored by Oleg Nesterov's avatar Oleg Nesterov

uprobes: Teach __create_xol_area() to accept the predefined vaddr

Currently xol_add_vma() uses get_unmapped_area() for area->vaddr,
but the next patches need to use the fixed address. So this patch
adds the new "vaddr" argument to __create_xol_area() which should
be used as area->vaddr if it is nonzero.

xol_add_vma() doesn't bother to verify that the predefined addr is
not used, insert_vm_struct() should fail if find_vma_links() detects
the overlap with the existing vma.

Also, __create_xol_area() doesn't need __GFP_ZERO to allocate area.
Signed-off-by: default avatarOleg Nesterov <oleg@redhat.com>
Acked-by: default avatarSrikar Dronamraju <srikar@linux.vnet.ibm.com>
parent 6441ec8b
...@@ -1104,12 +1104,15 @@ static int xol_add_vma(struct mm_struct *mm, struct xol_area *area) ...@@ -1104,12 +1104,15 @@ static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
if (mm->uprobes_state.xol_area) if (mm->uprobes_state.xol_area)
goto fail; goto fail;
if (!area->vaddr) {
/* Try to map as high as possible, this is only a hint. */ /* Try to map as high as possible, this is only a hint. */
area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0); area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
PAGE_SIZE, 0, 0);
if (area->vaddr & ~PAGE_MASK) { if (area->vaddr & ~PAGE_MASK) {
ret = area->vaddr; ret = area->vaddr;
goto fail; goto fail;
} }
}
ret = install_special_mapping(mm, area->vaddr, PAGE_SIZE, ret = install_special_mapping(mm, area->vaddr, PAGE_SIZE,
VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, &area->page); VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, &area->page);
...@@ -1124,13 +1127,13 @@ static int xol_add_vma(struct mm_struct *mm, struct xol_area *area) ...@@ -1124,13 +1127,13 @@ static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
return ret; return ret;
} }
static struct xol_area *__create_xol_area(void) static struct xol_area *__create_xol_area(unsigned long vaddr)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
uprobe_opcode_t insn = UPROBE_SWBP_INSN; uprobe_opcode_t insn = UPROBE_SWBP_INSN;
struct xol_area *area; struct xol_area *area;
area = kzalloc(sizeof(*area), GFP_KERNEL); area = kmalloc(sizeof(*area), GFP_KERNEL);
if (unlikely(!area)) if (unlikely(!area))
goto out; goto out;
...@@ -1142,6 +1145,7 @@ static struct xol_area *__create_xol_area(void) ...@@ -1142,6 +1145,7 @@ static struct xol_area *__create_xol_area(void)
if (!area->page) if (!area->page)
goto free_bitmap; goto free_bitmap;
area->vaddr = vaddr;
init_waitqueue_head(&area->wq); init_waitqueue_head(&area->wq);
/* Reserve the 1st slot for get_trampoline_vaddr() */ /* Reserve the 1st slot for get_trampoline_vaddr() */
set_bit(0, area->bitmap); set_bit(0, area->bitmap);
...@@ -1172,7 +1176,7 @@ static struct xol_area *get_xol_area(void) ...@@ -1172,7 +1176,7 @@ static struct xol_area *get_xol_area(void)
struct xol_area *area; struct xol_area *area;
if (!mm->uprobes_state.xol_area) if (!mm->uprobes_state.xol_area)
__create_xol_area(); __create_xol_area(0);
area = mm->uprobes_state.xol_area; area = mm->uprobes_state.xol_area;
smp_read_barrier_depends(); /* pairs with wmb in xol_add_vma() */ smp_read_barrier_depends(); /* pairs with wmb in xol_add_vma() */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment