Commit 46dea3d0 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

[PATCH] mm: ia64 use expand_upwards

ia64 has expand_backing_store function for growing its Register Backing Store
vma upwards.  But more complete code for this purpose is found in the
CONFIG_STACK_GROWSUP part of mm/mmap.c.  Uglify its #ifdefs further to provide
expand_upwards for ia64 as well as expand_stack for parisc.

The Register Backing Store vma should be marked VM_ACCOUNT.  Implement the
intention of growing it only a page at a time, instead of passing an address
outside of the vma to handle_mm_fault, with unknown consequences.
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent f449952b
...@@ -19,32 +19,6 @@ ...@@ -19,32 +19,6 @@
extern void die (char *, struct pt_regs *, long); extern void die (char *, struct pt_regs *, long);
/*
* This routine is analogous to expand_stack() but instead grows the
* register backing store (which grows towards higher addresses).
* Since the register backing store is access sequentially, we
* disallow growing the RBS by more than a page at a time. Note that
* the VM_GROWSUP flag can be set on any VM area but that's fine
* because the total process size is still limited by RLIMIT_STACK and
* RLIMIT_AS.
*/
static inline long
expand_backing_store (struct vm_area_struct *vma, unsigned long address)
{
unsigned long grow;
grow = PAGE_SIZE >> PAGE_SHIFT;
if (address - vma->vm_start > current->signal->rlim[RLIMIT_STACK].rlim_cur
|| (((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->signal->rlim[RLIMIT_AS].rlim_cur))
return -ENOMEM;
vma->vm_end += PAGE_SIZE;
vma->vm_mm->total_vm += grow;
if (vma->vm_flags & VM_LOCKED)
vma->vm_mm->locked_vm += grow;
vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, grow);
return 0;
}
/* /*
* Return TRUE if ADDRESS points at a page in the kernel's mapped segment * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
* (inside region 5, on ia64) and that page is present. * (inside region 5, on ia64) and that page is present.
...@@ -185,7 +159,13 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re ...@@ -185,7 +159,13 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
|| REGION_OFFSET(address) >= RGN_MAP_LIMIT) || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
goto bad_area; goto bad_area;
if (expand_backing_store(vma, address)) /*
* Since the register backing store is accessed sequentially,
* we disallow growing it by more than a page at a time.
*/
if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
goto bad_area;
if (expand_upwards(vma, address))
goto bad_area; goto bad_area;
} }
goto good_area; goto good_area;
......
...@@ -158,7 +158,7 @@ ia64_init_addr_space (void) ...@@ -158,7 +158,7 @@ ia64_init_addr_space (void)
vma->vm_start = current->thread.rbs_bot & PAGE_MASK; vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
vma->vm_end = vma->vm_start + PAGE_SIZE; vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7]; vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
vma->vm_flags = VM_DATA_DEFAULT_FLAGS | VM_GROWSUP; vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
down_write(&current->mm->mmap_sem); down_write(&current->mm->mmap_sem);
if (insert_vm_struct(current->mm, vma)) { if (insert_vm_struct(current->mm, vma)) {
up_write(&current->mm->mmap_sem); up_write(&current->mm->mmap_sem);
......
...@@ -896,7 +896,8 @@ void handle_ra_miss(struct address_space *mapping, ...@@ -896,7 +896,8 @@ void handle_ra_miss(struct address_space *mapping,
unsigned long max_sane_readahead(unsigned long nr); unsigned long max_sane_readahead(unsigned long nr);
/* Do stack extension */ /* Do stack extension */
extern int expand_stack(struct vm_area_struct * vma, unsigned long address); extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
......
...@@ -1508,11 +1508,15 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un ...@@ -1508,11 +1508,15 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
return 0; return 0;
} }
#ifdef CONFIG_STACK_GROWSUP #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
/* /*
* vma is the first one with address > vma->vm_end. Have to extend vma. * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
* vma is the last one with address > vma->vm_end. Have to extend vma.
*/ */
int expand_stack(struct vm_area_struct * vma, unsigned long address) #ifdef CONFIG_STACK_GROWSUP
static inline
#endif
int expand_upwards(struct vm_area_struct *vma, unsigned long address)
{ {
int error; int error;
...@@ -1550,6 +1554,13 @@ int expand_stack(struct vm_area_struct * vma, unsigned long address) ...@@ -1550,6 +1554,13 @@ int expand_stack(struct vm_area_struct * vma, unsigned long address)
anon_vma_unlock(vma); anon_vma_unlock(vma);
return error; return error;
} }
#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
#ifdef CONFIG_STACK_GROWSUP
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
return expand_upwards(vma, address);
}
struct vm_area_struct * struct vm_area_struct *
find_extend_vma(struct mm_struct *mm, unsigned long addr) find_extend_vma(struct mm_struct *mm, unsigned long addr)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment