Commit 490fc053 authored by Linus Torvalds's avatar Linus Torvalds

mm: make vm_area_alloc() initialize core fields

Like vm_area_dup(), it initializes the anon_vma_chain head, and the
basic mm pointer.

The rest of the fields end up being different for different users,
although the plan is to also initialize the 'vm_ops' field to a dummy
entry.
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 95faf699
...@@ -2278,17 +2278,15 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t ...@@ -2278,17 +2278,15 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
DPRINT(("smpl_buf @%p\n", smpl_buf)); DPRINT(("smpl_buf @%p\n", smpl_buf));
/* allocate vma */ /* allocate vma */
vma = vm_area_alloc(); vma = vm_area_alloc(mm);
if (!vma) { if (!vma) {
DPRINT(("Cannot allocate vma\n")); DPRINT(("Cannot allocate vma\n"));
goto error_kmem; goto error_kmem;
} }
INIT_LIST_HEAD(&vma->anon_vma_chain);
/* /*
* partially initialize the vma for the sampling buffer * partially initialize the vma for the sampling buffer
*/ */
vma->vm_mm = mm;
vma->vm_file = get_file(filp); vma->vm_file = get_file(filp);
vma->vm_flags = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP; vma->vm_flags = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP;
vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */ vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
......
...@@ -114,10 +114,8 @@ ia64_init_addr_space (void) ...@@ -114,10 +114,8 @@ ia64_init_addr_space (void)
* the problem. When the process attempts to write to the register backing store * the problem. When the process attempts to write to the register backing store
* for the first time, it will get a SEGFAULT in this case. * for the first time, it will get a SEGFAULT in this case.
*/ */
vma = vm_area_alloc(); vma = vm_area_alloc(current->mm);
if (vma) { if (vma) {
INIT_LIST_HEAD(&vma->anon_vma_chain);
vma->vm_mm = current->mm;
vma->vm_start = current->thread.rbs_bot & PAGE_MASK; vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
vma->vm_end = vma->vm_start + PAGE_SIZE; vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
...@@ -133,10 +131,8 @@ ia64_init_addr_space (void) ...@@ -133,10 +131,8 @@ ia64_init_addr_space (void)
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
if (!(current->personality & MMAP_PAGE_ZERO)) { if (!(current->personality & MMAP_PAGE_ZERO)) {
vma = vm_area_alloc(); vma = vm_area_alloc(current->mm);
if (vma) { if (vma) {
INIT_LIST_HEAD(&vma->anon_vma_chain);
vma->vm_mm = current->mm;
vma->vm_end = PAGE_SIZE; vma->vm_end = PAGE_SIZE;
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
......
...@@ -290,7 +290,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm) ...@@ -290,7 +290,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
struct vm_area_struct *vma = NULL; struct vm_area_struct *vma = NULL;
struct mm_struct *mm = bprm->mm; struct mm_struct *mm = bprm->mm;
bprm->vma = vma = vm_area_alloc(); bprm->vma = vma = vm_area_alloc(mm);
if (!vma) if (!vma)
return -ENOMEM; return -ENOMEM;
...@@ -298,7 +298,6 @@ static int __bprm_mm_init(struct linux_binprm *bprm) ...@@ -298,7 +298,6 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
err = -EINTR; err = -EINTR;
goto err_free; goto err_free;
} }
vma->vm_mm = mm;
/* /*
* Place the stack at the largest stack address the architecture * Place the stack at the largest stack address the architecture
...@@ -311,7 +310,6 @@ static int __bprm_mm_init(struct linux_binprm *bprm) ...@@ -311,7 +310,6 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
vma->vm_start = vma->vm_end - PAGE_SIZE; vma->vm_start = vma->vm_end - PAGE_SIZE;
vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP; vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
INIT_LIST_HEAD(&vma->anon_vma_chain);
err = insert_vm_struct(mm, vma); err = insert_vm_struct(mm, vma);
if (err) if (err)
......
...@@ -155,7 +155,7 @@ extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *, ...@@ -155,7 +155,7 @@ extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
* mmap() functions). * mmap() functions).
*/ */
struct vm_area_struct *vm_area_alloc(void); struct vm_area_struct *vm_area_alloc(struct mm_struct *);
struct vm_area_struct *vm_area_dup(struct vm_area_struct *); struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
void vm_area_free(struct vm_area_struct *); void vm_area_free(struct vm_area_struct *);
......
...@@ -308,9 +308,15 @@ static struct kmem_cache *vm_area_cachep; ...@@ -308,9 +308,15 @@ static struct kmem_cache *vm_area_cachep;
/* SLAB cache for mm_struct structures (tsk->mm) */ /* SLAB cache for mm_struct structures (tsk->mm) */
static struct kmem_cache *mm_cachep; static struct kmem_cache *mm_cachep;
struct vm_area_struct *vm_area_alloc(void) struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
{ {
return kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); struct vm_area_struct *vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (vma) {
vma->vm_mm = mm;
INIT_LIST_HEAD(&vma->anon_vma_chain);
}
return vma;
} }
struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
......
...@@ -1729,19 +1729,17 @@ unsigned long mmap_region(struct file *file, unsigned long addr, ...@@ -1729,19 +1729,17 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
* specific mapper. the address has already been validated, but * specific mapper. the address has already been validated, but
* not unmapped, but the maps are removed from the list. * not unmapped, but the maps are removed from the list.
*/ */
vma = vm_area_alloc(); vma = vm_area_alloc(mm);
if (!vma) { if (!vma) {
error = -ENOMEM; error = -ENOMEM;
goto unacct_error; goto unacct_error;
} }
vma->vm_mm = mm;
vma->vm_start = addr; vma->vm_start = addr;
vma->vm_end = addr + len; vma->vm_end = addr + len;
vma->vm_flags = vm_flags; vma->vm_flags = vm_flags;
vma->vm_page_prot = vm_get_page_prot(vm_flags); vma->vm_page_prot = vm_get_page_prot(vm_flags);
vma->vm_pgoff = pgoff; vma->vm_pgoff = pgoff;
INIT_LIST_HEAD(&vma->anon_vma_chain);
if (file) { if (file) {
if (vm_flags & VM_DENYWRITE) { if (vm_flags & VM_DENYWRITE) {
...@@ -2979,14 +2977,12 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla ...@@ -2979,14 +2977,12 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla
/* /*
* create a vma struct for an anonymous mapping * create a vma struct for an anonymous mapping
*/ */
vma = vm_area_alloc(); vma = vm_area_alloc(mm);
if (!vma) { if (!vma) {
vm_unacct_memory(len >> PAGE_SHIFT); vm_unacct_memory(len >> PAGE_SHIFT);
return -ENOMEM; return -ENOMEM;
} }
INIT_LIST_HEAD(&vma->anon_vma_chain);
vma->vm_mm = mm;
vma->vm_start = addr; vma->vm_start = addr;
vma->vm_end = addr + len; vma->vm_end = addr + len;
vma->vm_pgoff = pgoff; vma->vm_pgoff = pgoff;
...@@ -3343,12 +3339,10 @@ static struct vm_area_struct *__install_special_mapping( ...@@ -3343,12 +3339,10 @@ static struct vm_area_struct *__install_special_mapping(
int ret; int ret;
struct vm_area_struct *vma; struct vm_area_struct *vma;
vma = vm_area_alloc(); vma = vm_area_alloc(mm);
if (unlikely(vma == NULL)) if (unlikely(vma == NULL))
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&vma->anon_vma_chain);
vma->vm_mm = mm;
vma->vm_start = addr; vma->vm_start = addr;
vma->vm_end = addr + len; vma->vm_end = addr + len;
......
...@@ -1204,7 +1204,7 @@ unsigned long do_mmap(struct file *file, ...@@ -1204,7 +1204,7 @@ unsigned long do_mmap(struct file *file,
if (!region) if (!region)
goto error_getting_region; goto error_getting_region;
vma = vm_area_alloc(); vma = vm_area_alloc(current->mm);
if (!vma) if (!vma)
goto error_getting_vma; goto error_getting_vma;
...@@ -1212,7 +1212,6 @@ unsigned long do_mmap(struct file *file, ...@@ -1212,7 +1212,6 @@ unsigned long do_mmap(struct file *file,
region->vm_flags = vm_flags; region->vm_flags = vm_flags;
region->vm_pgoff = pgoff; region->vm_pgoff = pgoff;
INIT_LIST_HEAD(&vma->anon_vma_chain);
vma->vm_flags = vm_flags; vma->vm_flags = vm_flags;
vma->vm_pgoff = pgoff; vma->vm_pgoff = pgoff;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment