Commit 5846fc6c authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] consolidate the VMA splitting code

Patch from Christoph Hellwig.  250 lines of code go away.

The three syscalls madvice/mlock/mprotect all change attributes of
memory regions.  These attributes are stored in vm_area_structs (VMAs).

The current code for those syscalls does this by having four
subroutines each, for changing a whole VMA, one for just setting new
flags if a full VMA is affected, one that creates a new VMA in the
right part of the old one and sets the flags there, one for the left
part, and one that splits of both the left and the right part and sets
the new flags in the middle.

This patch makes those routines create new VMAs on the left and on te
right hand first and then always setting the flags in the old VMA.
Together with using a library function (split_vma) to to the split this
simplies the code and makes it more readable.  As a side affect it
fixes some VM accounting bugs observed by Hugh Dickins.
parent c89a8bad
......@@ -504,6 +504,8 @@ extern int expand_stack(struct vm_area_struct * vma, unsigned long address);
extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
struct vm_area_struct **pprev);
extern int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
unsigned long addr, int new_below);
/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
NULL if none. Assume start_addr < end_addr. */
......
......@@ -2,17 +2,37 @@
* linux/mm/madvise.c
*
* Copyright (C) 1999 Linus Torvalds
* Copyright (C) 2002 Christoph Hellwig
*/
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
static inline void setup_read_behavior(struct vm_area_struct * vma,
int behavior)
/*
* We can potentially split a vm area into separate
* areas, each area with its own behavior.
*/
static long madvise_behavior(struct vm_area_struct * vma, unsigned long start,
unsigned long end, int behavior)
{
struct mm_struct * mm = vma->vm_mm;
int error;
if (start != vma->vm_start) {
error = split_vma(mm, vma, start, 1);
if (error)
return -EAGAIN;
}
if (end != vma->vm_end) {
error = split_vma(mm, vma, end, 0);
if (error)
return -EAGAIN;
}
spin_lock(&mm->page_table_lock);
vma->vm_raend = 0;
VM_ClearReadHint(vma);
switch (behavior) {
......@@ -25,133 +45,11 @@ static inline void setup_read_behavior(struct vm_area_struct * vma,
default:
break;
}
}
static long madvise_fixup_start(struct vm_area_struct * vma,
unsigned long end, int behavior)
{
struct vm_area_struct * n;
struct mm_struct * mm = vma->vm_mm;
n = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (!n)
return -EAGAIN;
*n = *vma;
n->vm_end = end;
setup_read_behavior(n, behavior);
n->vm_raend = 0;
if (n->vm_file)
get_file(n->vm_file);
if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n);
vma->vm_pgoff += (end - vma->vm_start) >> PAGE_SHIFT;
lock_vma_mappings(vma);
spin_lock(&mm->page_table_lock);
vma->vm_start = end;
__insert_vm_struct(mm, n);
spin_unlock(&mm->page_table_lock);
unlock_vma_mappings(vma);
return 0;
}
static long madvise_fixup_end(struct vm_area_struct * vma,
unsigned long start, int behavior)
{
struct vm_area_struct * n;
struct mm_struct * mm = vma->vm_mm;
n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (!n)
return -EAGAIN;
*n = *vma;
n->vm_start = start;
n->vm_pgoff += (n->vm_start - vma->vm_start) >> PAGE_SHIFT;
setup_read_behavior(n, behavior);
n->vm_raend = 0;
if (n->vm_file)
get_file(n->vm_file);
if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n);
lock_vma_mappings(vma);
spin_lock(&mm->page_table_lock);
vma->vm_end = start;
__insert_vm_struct(mm, n);
spin_unlock(&mm->page_table_lock);
unlock_vma_mappings(vma);
return 0;
}
static long madvise_fixup_middle(struct vm_area_struct * vma, unsigned long start,
unsigned long end, int behavior)
{
struct vm_area_struct * left, * right;
struct mm_struct * mm = vma->vm_mm;
left = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (!left)
return -EAGAIN;
right = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (!right) {
kmem_cache_free(vm_area_cachep, left);
return -EAGAIN;
}
*left = *vma;
*right = *vma;
left->vm_end = start;
right->vm_start = end;
right->vm_pgoff += (right->vm_start - left->vm_start) >> PAGE_SHIFT;
left->vm_raend = 0;
right->vm_raend = 0;
if (vma->vm_file)
atomic_add(2, &vma->vm_file->f_count);
if (vma->vm_ops && vma->vm_ops->open) {
vma->vm_ops->open(left);
vma->vm_ops->open(right);
}
vma->vm_pgoff += (start - vma->vm_start) >> PAGE_SHIFT;
vma->vm_raend = 0;
lock_vma_mappings(vma);
spin_lock(&mm->page_table_lock);
vma->vm_start = start;
vma->vm_end = end;
setup_read_behavior(vma, behavior);
__insert_vm_struct(mm, left);
__insert_vm_struct(mm, right);
spin_unlock(&mm->page_table_lock);
unlock_vma_mappings(vma);
return 0;
}
/*
* We can potentially split a vm area into separate
* areas, each area with its own behavior.
*/
static long madvise_behavior(struct vm_area_struct * vma, unsigned long start,
unsigned long end, int behavior)
{
int error = 0;
/* This caps the number of vma's this process can own */
if (vma->vm_mm->map_count > MAX_MAP_COUNT)
return -ENOMEM;
if (start == vma->vm_start) {
if (end == vma->vm_end) {
setup_read_behavior(vma, behavior);
vma->vm_raend = 0;
} else
error = madvise_fixup_start(vma, end, behavior);
} else {
if (end == vma->vm_end)
error = madvise_fixup_end(vma, start, behavior);
else
error = madvise_fixup_middle(vma, start, end, behavior);
}
return error;
}
/*
* Schedule all required I/O operations, then run the disk queue
* to make sure they are started. Do not wait for completion.
......
......@@ -2,147 +2,49 @@
* linux/mm/mlock.c
*
* (C) Copyright 1995 Linus Torvalds
* (C) Copyright 2002 Christoph Hellwig
*/
#include <linux/slab.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/smp_lock.h>
#include <linux/pagemap.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <linux/mman.h>
#include <linux/mm.h>
static inline int mlock_fixup_all(struct vm_area_struct * vma, int newflags)
{
spin_lock(&vma->vm_mm->page_table_lock);
vma->vm_flags = newflags;
spin_unlock(&vma->vm_mm->page_table_lock);
return 0;
}
static inline int mlock_fixup_start(struct vm_area_struct * vma,
unsigned long end, int newflags)
static int mlock_fixup(struct vm_area_struct * vma,
unsigned long start, unsigned long end, unsigned int newflags)
{
struct vm_area_struct * n;
struct mm_struct * mm = vma->vm_mm;
int pages, error;
n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (!n)
return -EAGAIN;
*n = *vma;
n->vm_end = end;
n->vm_flags = newflags;
n->vm_raend = 0;
if (n->vm_file)
get_file(n->vm_file);
if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n);
vma->vm_pgoff += (end - vma->vm_start) >> PAGE_SHIFT;
lock_vma_mappings(vma);
spin_lock(&vma->vm_mm->page_table_lock);
vma->vm_start = end;
__insert_vm_struct(current->mm, n);
spin_unlock(&vma->vm_mm->page_table_lock);
unlock_vma_mappings(vma);
if (newflags == vma->vm_flags)
return 0;
}
static inline int mlock_fixup_end(struct vm_area_struct * vma,
unsigned long start, int newflags)
{
struct vm_area_struct * n;
n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (!n)
if (start != vma->vm_start) {
error = split_vma(mm, vma, start, 1);
if (error)
return -EAGAIN;
*n = *vma;
n->vm_start = start;
n->vm_pgoff += (n->vm_start - vma->vm_start) >> PAGE_SHIFT;
n->vm_flags = newflags;
n->vm_raend = 0;
if (n->vm_file)
get_file(n->vm_file);
if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n);
lock_vma_mappings(vma);
spin_lock(&vma->vm_mm->page_table_lock);
vma->vm_end = start;
__insert_vm_struct(current->mm, n);
spin_unlock(&vma->vm_mm->page_table_lock);
unlock_vma_mappings(vma);
return 0;
}
static inline int mlock_fixup_middle(struct vm_area_struct * vma,
unsigned long start, unsigned long end, int newflags)
{
struct vm_area_struct * left, * right;
}
left = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (!left)
return -EAGAIN;
right = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (!right) {
kmem_cache_free(vm_area_cachep, left);
if (end != vma->vm_end) {
error = split_vma(mm, vma, end, 0);
if (error)
return -EAGAIN;
}
*left = *vma;
*right = *vma;
left->vm_end = start;
right->vm_start = end;
right->vm_pgoff += (right->vm_start - left->vm_start) >> PAGE_SHIFT;
vma->vm_flags = newflags;
left->vm_raend = 0;
right->vm_raend = 0;
if (vma->vm_file)
atomic_add(2, &vma->vm_file->f_count);
if (vma->vm_ops && vma->vm_ops->open) {
vma->vm_ops->open(left);
vma->vm_ops->open(right);
}
vma->vm_raend = 0;
vma->vm_pgoff += (start - vma->vm_start) >> PAGE_SHIFT;
lock_vma_mappings(vma);
spin_lock(&vma->vm_mm->page_table_lock);
vma->vm_start = start;
vma->vm_end = end;
vma->vm_flags = newflags;
__insert_vm_struct(current->mm, left);
__insert_vm_struct(current->mm, right);
spin_unlock(&vma->vm_mm->page_table_lock);
unlock_vma_mappings(vma);
return 0;
}
static int mlock_fixup(struct vm_area_struct * vma,
unsigned long start, unsigned long end, unsigned int newflags)
{
int pages, retval;
if (newflags == vma->vm_flags)
return 0;
spin_lock(&mm->page_table_lock);
vma->vm_flags = newflags;
spin_unlock(&mm->page_table_lock);
if (start == vma->vm_start) {
if (end == vma->vm_end)
retval = mlock_fixup_all(vma, newflags);
else
retval = mlock_fixup_start(vma, end, newflags);
} else {
if (end == vma->vm_end)
retval = mlock_fixup_end(vma, start, newflags);
else
retval = mlock_fixup_middle(vma, start, end, newflags);
}
if (!retval) {
/* keep track of amount of locked VM */
/*
* Keep track of amount of locked VM.
*/
pages = (end - start) >> PAGE_SHIFT;
if (newflags & VM_LOCKED) {
pages = -pages;
make_pages_present(start, end);
}
vma->vm_mm->locked_vm -= pages;
}
return retval;
return 0;
}
static int do_mlock(unsigned long start, size_t len, int on)
......
......@@ -1049,10 +1049,11 @@ static struct vm_area_struct *touched_by_munmap(struct mm_struct *mm,
}
/*
* Split a vma into two pieces at address 'addr', the original vma
* will contain the first part, a new vma is allocated for the tail.
* Split a vma into two pieces at address 'addr', a new vma is allocated
* either for the first part or the the tail.
*/
static int splitvma(struct mm_struct *mm, struct vm_area_struct *mpnt, unsigned long addr)
int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
unsigned long addr, int new_below)
{
struct vm_area_struct *new;
......@@ -1064,22 +1065,30 @@ static int splitvma(struct mm_struct *mm, struct vm_area_struct *mpnt, unsigned
return -ENOMEM;
/* most fields are the same, copy all, and then fixup */
*new = *mpnt;
*new = *vma;
if (new_below) {
new->vm_end = addr;
vma->vm_start = addr;
vma->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
} else {
vma->vm_end = addr;
new->vm_start = addr;
new->vm_pgoff = mpnt->vm_pgoff + ((addr - mpnt->vm_start) >> PAGE_SHIFT);
new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
}
new->vm_raend = 0;
if (mpnt->vm_file)
get_file(mpnt->vm_file);
if (mpnt->vm_ops && mpnt->vm_ops->open)
mpnt->vm_ops->open(mpnt);
mpnt->vm_end = addr; /* Truncate area */
if (new->vm_file)
get_file(new->vm_file);
if (new->vm_ops && new->vm_ops->open)
new->vm_ops->open(new);
spin_lock(&mm->page_table_lock);
lock_vma_mappings(mpnt);
lock_vma_mappings(vma);
__insert_vm_struct(mm, new);
unlock_vma_mappings(mpnt);
unlock_vma_mappings(vma);
spin_unlock(&mm->page_table_lock);
return 0;
......@@ -1116,7 +1125,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
* If we need to split any vma, do it now to save pain later.
*/
if (start > mpnt->vm_start) {
if (splitvma(mm, mpnt, start))
if (split_vma(mm, mpnt, start, 0))
return -ENOMEM;
prev = mpnt;
mpnt = mpnt->vm_next;
......@@ -1125,7 +1134,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
/* Does it split the last one? */
last = find_vma(mm, end);
if (last && end > last->vm_start) {
if (splitvma(mm, last, end))
if (split_vma(mm, last, end, 0))
return -ENOMEM;
}
......
......@@ -2,13 +2,14 @@
* mm/mprotect.c
*
* (C) Copyright 1994 Linus Torvalds
* (C) Copyright 2002 Christoph Hellwig
*
* Address space accounting code <alan@redhat.com>
* (C) Copyright 2002 Red Hat Inc, All Rights Reserved
*/
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/fs.h>
......@@ -21,7 +22,8 @@
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
static inline void change_pte_range(pmd_t * pmd, unsigned long address,
static inline void
change_pte_range(pmd_t *pmd, unsigned long address,
unsigned long size, pgprot_t newprot)
{
pte_t * pte;
......@@ -56,7 +58,8 @@ static inline void change_pte_range(pmd_t * pmd, unsigned long address,
pte_unmap(pte - 1);
}
static inline void change_pmd_range(pgd_t * pgd, unsigned long address,
static inline void
change_pmd_range(pgd_t *pgd, unsigned long address,
unsigned long size, pgprot_t newprot)
{
pmd_t * pmd;
......@@ -81,7 +84,9 @@ static inline void change_pmd_range(pgd_t * pgd, unsigned long address,
} while (address && (address < end));
}
static void change_protection(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot)
static void
change_protection(struct vm_area_struct *vma, unsigned long start,
unsigned long end, pgprot_t newprot)
{
pgd_t *dir;
unsigned long beg = start;
......@@ -100,158 +105,58 @@ static void change_protection(struct vm_area_struct *vma, unsigned long start, u
spin_unlock(&current->mm->page_table_lock);
return;
}
static inline int mprotect_fixup_all(struct vm_area_struct * vma, struct vm_area_struct ** pprev,
int newflags, pgprot_t prot)
/*
* Try to merge a vma with the previous flag, return 1 if successful or 0 if it
* was impossible.
*/
static int
mprotect_attempt_merge(struct vm_area_struct *vma, struct vm_area_struct *prev,
unsigned long end, int newflags)
{
struct vm_area_struct * prev = *pprev;
struct mm_struct * mm = vma->vm_mm;
if (prev && prev->vm_end == vma->vm_start && can_vma_merge(prev, newflags) &&
!vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
if (!prev || !vma)
return 0;
if (prev->vm_end != vma->vm_start)
return 0;
if (!can_vma_merge(prev, newflags))
return 0;
if (vma->vm_file || (vma->vm_flags & VM_SHARED))
return 0;
/*
* If the whole area changes to the protection of the previous one
* we can just get rid of it.
*/
if (end == vma->vm_end) {
spin_lock(&mm->page_table_lock);
prev->vm_end = vma->vm_end;
prev->vm_end = end;
__vma_unlink(mm, vma, prev);
spin_unlock(&mm->page_table_lock);
kmem_cache_free(vm_area_cachep, vma);
mm->map_count--;
return 0;
return 1;
}
/*
* Otherwise extend it.
*/
spin_lock(&mm->page_table_lock);
vma->vm_flags = newflags;
vma->vm_page_prot = prot;
spin_unlock(&mm->page_table_lock);
*pprev = vma;
return 0;
}
static inline int mprotect_fixup_start(struct vm_area_struct * vma, struct vm_area_struct ** pprev,
unsigned long end,
int newflags, pgprot_t prot)
{
struct vm_area_struct * n, * prev = *pprev;
*pprev = vma;
if (prev && prev->vm_end == vma->vm_start && can_vma_merge(prev, newflags) &&
!vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
spin_lock(&vma->vm_mm->page_table_lock);
prev->vm_end = end;
vma->vm_start = end;
spin_unlock(&vma->vm_mm->page_table_lock);
return 0;
}
n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (!n)
return -ENOMEM;
*n = *vma;
n->vm_end = end;
n->vm_flags = newflags;
n->vm_raend = 0;
n->vm_page_prot = prot;
if (n->vm_file)
get_file(n->vm_file);
if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n);
vma->vm_pgoff += (end - vma->vm_start) >> PAGE_SHIFT;
lock_vma_mappings(vma);
spin_lock(&vma->vm_mm->page_table_lock);
vma->vm_start = end;
__insert_vm_struct(current->mm, n);
spin_unlock(&vma->vm_mm->page_table_lock);
unlock_vma_mappings(vma);
return 0;
}
static inline int mprotect_fixup_end(struct vm_area_struct * vma, struct vm_area_struct ** pprev,
unsigned long start,
int newflags, pgprot_t prot)
{
struct vm_area_struct * n;
n = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (!n)
return -ENOMEM;
*n = *vma;
n->vm_start = start;
n->vm_pgoff += (n->vm_start - vma->vm_start) >> PAGE_SHIFT;
n->vm_flags = newflags;
n->vm_raend = 0;
n->vm_page_prot = prot;
if (n->vm_file)
get_file(n->vm_file);
if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n);
lock_vma_mappings(vma);
spin_lock(&vma->vm_mm->page_table_lock);
vma->vm_end = start;
__insert_vm_struct(current->mm, n);
spin_unlock(&vma->vm_mm->page_table_lock);
unlock_vma_mappings(vma);
*pprev = n;
return 0;
}
static inline int mprotect_fixup_middle(struct vm_area_struct * vma, struct vm_area_struct ** pprev,
unsigned long start, unsigned long end,
int newflags, pgprot_t prot)
{
struct vm_area_struct * left, * right;
left = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (!left)
return -ENOMEM;
right = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (!right) {
kmem_cache_free(vm_area_cachep, left);
return -ENOMEM;
}
*left = *vma;
*right = *vma;
left->vm_end = start;
right->vm_start = end;
right->vm_pgoff += (right->vm_start - left->vm_start) >> PAGE_SHIFT;
left->vm_raend = 0;
right->vm_raend = 0;
if (vma->vm_file)
atomic_add(2,&vma->vm_file->f_count);
if (vma->vm_ops && vma->vm_ops->open) {
vma->vm_ops->open(left);
vma->vm_ops->open(right);
}
vma->vm_pgoff += (start - vma->vm_start) >> PAGE_SHIFT;
vma->vm_raend = 0;
vma->vm_page_prot = prot;
lock_vma_mappings(vma);
spin_lock(&vma->vm_mm->page_table_lock);
vma->vm_start = start;
vma->vm_end = end;
vma->vm_flags = newflags;
__insert_vm_struct(current->mm, left);
__insert_vm_struct(current->mm, right);
spin_unlock(&vma->vm_mm->page_table_lock);
unlock_vma_mappings(vma);
*pprev = right;
return 0;
spin_unlock(&mm->page_table_lock);
return 1;
}
static int mprotect_fixup(struct vm_area_struct * vma, struct vm_area_struct ** pprev,
static int
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
unsigned long start, unsigned long end, unsigned int newflags)
{
struct mm_struct * mm = vma->vm_mm;
unsigned long charged = 0;
pgprot_t newprot;
int error;
unsigned long charged = 0;
if (newflags == vma->vm_flags) {
*pprev = vma;
......@@ -266,32 +171,50 @@ static int mprotect_fixup(struct vm_area_struct * vma, struct vm_area_struct **
* FIXME? We haven't defined a VM_NORESERVE flag, so mprotecting
* a MAP_NORESERVE private mapping to writable will now reserve.
*/
if ((newflags & VM_WRITE) &&
!(vma->vm_flags & (VM_ACCOUNT|VM_WRITE|VM_SHARED))) {
if (newflags & VM_WRITE) {
if (!(vma->vm_flags & (VM_ACCOUNT|VM_WRITE|VM_SHARED))) {
charged = (end - start) >> PAGE_SHIFT;
if (!vm_enough_memory(charged))
return -ENOMEM;
newflags |= VM_ACCOUNT;
}
}
newprot = protection_map[newflags & 0xf];
if (start == vma->vm_start) {
if (end == vma->vm_end)
error = mprotect_fixup_all(vma, pprev, newflags, newprot);
else
error = mprotect_fixup_start(vma, pprev, end, newflags, newprot);
} else if (end == vma->vm_end)
error = mprotect_fixup_end(vma, pprev, start, newflags, newprot);
else
error = mprotect_fixup_middle(vma, pprev, start, end, newflags, newprot);
if (error) {
vm_unacct_memory(charged);
return error;
/*
* Try to merge with the previous vma.
*/
if (mprotect_attempt_merge(vma, *pprev, end, newflags))
return 0;
} else {
error = split_vma(mm, vma, start, 1);
if (error)
goto fail;
}
if (end != vma->vm_end) {
error = split_vma(mm, vma, end, 0);
if (error)
goto fail;
}
spin_lock(&mm->page_table_lock);
vma->vm_flags = newflags;
vma->vm_page_prot = newprot;
spin_unlock(&mm->page_table_lock);
change_protection(vma, start, end, newprot);
return 0;
fail:
vm_unacct_memory(charged);
return error;
}
asmlinkage long sys_mprotect(unsigned long start, size_t len, unsigned long prot)
asmlinkage long
sys_mprotect(unsigned long start, size_t len, unsigned long prot)
{
unsigned long nstart, end, tmp;
struct vm_area_struct * vma, * next, * prev;
......@@ -357,7 +280,9 @@ asmlinkage long sys_mprotect(unsigned long start, size_t len, unsigned long prot
goto out;
}
}
if (next && prev->vm_end == next->vm_start && can_vma_merge(next, prev->vm_flags) &&
if (next && prev->vm_end == next->vm_start &&
can_vma_merge(next, prev->vm_flags) &&
!prev->vm_file && !(prev->vm_flags & VM_SHARED)) {
spin_lock(&prev->vm_mm->page_table_lock);
prev->vm_end = next->vm_end;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment