Commit a6d938f4 authored by Linus Torvalds's avatar Linus Torvalds

Import 2.1.51

parent 94520e42
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -186,16 +186,15 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code) ...@@ -186,16 +186,15 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
* *
* First we check if it was the bootup rw-test, though.. * First we check if it was the bootup rw-test, though..
*/ */
if (wp_works_ok < 0 && address == 0xc0000000 && (error_code & 1)) { if (wp_works_ok < 0 && address == TASK_SIZE && (error_code & 1)) {
wp_works_ok = 1; wp_works_ok = 1;
pg0[0] = pte_val(mk_pte(0, PAGE_SHARED)); pg0[0] = pte_val(mk_pte(TASK_SIZE, PAGE_SHARED));
flush_tlb(); flush_tlb();
goto out; goto out;
} }
if (address < PAGE_SIZE) { if (address < PAGE_SIZE)
printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
pg0[0] = pte_val(mk_pte(0, PAGE_SHARED)); else
} else
printk(KERN_ALERT "Unable to handle kernel paging request"); printk(KERN_ALERT "Unable to handle kernel paging request");
printk(" at virtual address %08lx\n",address); printk(" at virtual address %08lx\n",address);
__asm__("movl %%cr3,%0" : "=r" (page)); __asm__("movl %%cr3,%0" : "=r" (page));
......
/*
* uni_hash.tbl
*
* Do not edit this file; it was automatically generated by
*
* conmakehash cp437.uni > uni_hash.tbl
*
*/
#include <linux/types.h>
#include <linux/kd.h>
static u8 dfont_unicount[256] =
{
1, 1, 1, 1, 2, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
2, 2, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 2, 1, 1, 1, 1, 1,
1, 1, 1, 1, 2, 2, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 5, 1, 2, 1, 4, 1, 1,
1, 5, 1, 2, 1, 1, 1, 5,
1, 1, 2, 1, 1, 4, 1, 1,
1, 2, 1, 1, 1, 1, 1, 2,
1, 2, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 2,
1, 1, 1, 1, 1, 1, 1, 1,
2, 2, 1, 1, 2, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 2,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 2, 1, 1, 1, 1, 2, 1,
2, 1, 2, 1, 1, 2, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 2, 1
};
static u16 dfont_unitable[297] =
{
0x0000, 0x263a, 0x263b, 0x2665, 0x2666, 0x25c6, 0x2663, 0x2660,
0x2022, 0x25d8, 0x25cb, 0x25d9, 0x2642, 0x2640, 0x266a, 0x266b,
0x263c, 0x25b6, 0x25ba, 0x25c0, 0x25c4, 0x2195, 0x203c, 0x00b6,
0x00a7, 0x25ac, 0x21a8, 0x2191, 0x2193, 0x2192, 0x2190, 0x221f,
0x2194, 0x25b2, 0x25bc, 0x0020, 0x0021, 0x0022, 0x00a8, 0x0023,
0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b,
0x002c, 0x00b8, 0x002d, 0x00ad, 0x002e, 0x002f, 0x0030, 0x0031,
0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039,
0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, 0x0040, 0x0041,
0x00c0, 0x00c1, 0x00c2, 0x00c3, 0x0042, 0x0043, 0x00a9, 0x0044,
0x0045, 0x00c8, 0x00ca, 0x00cb, 0x0046, 0x0047, 0x0048, 0x0049,
0x00cc, 0x00cd, 0x00ce, 0x00cf, 0x004a, 0x004b, 0x212a, 0x004c,
0x004d, 0x004e, 0x004f, 0x00d2, 0x00d3, 0x00d4, 0x00d5, 0x0050,
0x0051, 0x0052, 0x00ae, 0x0053, 0x0054, 0x0055, 0x00d9, 0x00da,
0x00db, 0x0056, 0x0057, 0x0058, 0x0059, 0x00dd, 0x005a, 0x005b,
0x005c, 0x005d, 0x005e, 0x005f, 0xf804, 0x0060, 0x0061, 0x00e3,
0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069,
0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, 0x00f5, 0x0070,
0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078,
0x00d7, 0x0079, 0x00fd, 0x007a, 0x007b, 0x007c, 0x00a5, 0x007d,
0x007e, 0x2302, 0x00c7, 0x00fc, 0x00e9, 0x00e2, 0x00e4, 0x00e0,
0x00e5, 0x00e7, 0x00ea, 0x00eb, 0x00e8, 0x00ef, 0x00ee, 0x00ec,
0x00c4, 0x00c5, 0x212b, 0x00c9, 0x00e6, 0x00c6, 0x00f4, 0x00f6,
0x00f2, 0x00fb, 0x00f9, 0x00ff, 0x00d6, 0x00dc, 0x00a2, 0x00a3,
0x00a5, 0x20a7, 0x0192, 0x00e1, 0x00ed, 0x00f3, 0x00fa, 0x00f1,
0x00d1, 0x00aa, 0x00ba, 0x00bf, 0x2310, 0x00ac, 0x00bd, 0x00bc,
0x00a1, 0x00ab, 0x00bb, 0x2591, 0x2592, 0x2593, 0x2502, 0x2524,
0x2561, 0x2562, 0x2556, 0x2555, 0x2563, 0x2551, 0x2557, 0x255d,
0x255c, 0x255b, 0x2510, 0x2514, 0x2534, 0x252c, 0x251c, 0x2500,
0x253c, 0x255e, 0x255f, 0x255a, 0x2554, 0x2569, 0x2566, 0x2560,
0x2550, 0x256c, 0x2567, 0x2568, 0x2564, 0x2565, 0x2559, 0x2558,
0x2552, 0x2553, 0x256b, 0x256a, 0x2518, 0x250c, 0x2588, 0x2584,
0x258c, 0x2590, 0x2580, 0x03b1, 0x03b2, 0x00df, 0x0393, 0x03c0,
0x03a3, 0x03c3, 0x00b5, 0x03bc, 0x03c4, 0x03a6, 0x00d8, 0x0398,
0x03a9, 0x2126, 0x03b4, 0x221e, 0x03c6, 0x00f8, 0x03b5, 0x2229,
0x2261, 0x00b1, 0x2265, 0x2264, 0x2320, 0x2321, 0x00f7, 0x2248,
0x00b0, 0x2219, 0x00b7, 0x221a, 0x207f, 0x00b2, 0x25a0, 0xfffd,
0x00a0
};
\ No newline at end of file
...@@ -105,18 +105,6 @@ static inline char * get_page(void) ...@@ -105,18 +105,6 @@ static inline char * get_page(void)
return res; return res;
} }
/*
* Kernel pointers have redundant information, so we can use a
* scheme where we can return either an error code or a dentry
* pointer with the same return value.
*
* This should be a per-architecture thing, to allow different
* error and pointer decisions.
*/
#define ERR_PTR(err) ((void *)((long)(err)))
#define PTR_ERR(ptr) ((long)(ptr))
#define IS_ERR(ptr) ((unsigned long)(ptr) > (unsigned long)(-1000))
inline void putname(char * name) inline void putname(char * name)
{ {
if (name) { if (name) {
......
...@@ -248,7 +248,7 @@ exp_export(struct nfsctl_export *nxp) ...@@ -248,7 +248,7 @@ exp_export(struct nfsctl_export *nxp)
finish: finish:
/* Release dentry */ /* Release dentry */
if (err < 0 && dentry) if (err < 0 && !IS_ERR(dentry))
dput(dentry); dput(dentry);
/* Unlock hashtable */ /* Unlock hashtable */
......
...@@ -399,7 +399,7 @@ static struct inode * get_pipe_inode(void) ...@@ -399,7 +399,7 @@ static struct inode * get_pipe_inode(void)
/* /*
* Mark the inode dirty from the very beginning, * Mark the inode dirty from the very beginning,
* that way it will never be moved to the dirty * that way it will never be moved to the dirty
* list because "make_inode_dirty()" will think * list because "mark_inode_dirty()" will think
* that it already _is_ on the dirty list. * that it already _is_ on the dirty list.
*/ */
inode->i_state = 1 << I_DIRTY; inode->i_state = 1 << I_DIRTY;
...@@ -439,7 +439,7 @@ int do_pipe(int *fd) ...@@ -439,7 +439,7 @@ int do_pipe(int *fd)
int error; int error;
int i,j; int i,j;
error = ENFILE; error = -ENFILE;
f1 = get_empty_filp(); f1 = get_empty_filp();
if (!f1) if (!f1)
goto no_files; goto no_files;
...@@ -462,7 +462,10 @@ int do_pipe(int *fd) ...@@ -462,7 +462,10 @@ int do_pipe(int *fd)
goto close_f12_inode_i; goto close_f12_inode_i;
j = error; j = error;
error = -ENOMEM;
f1->f_dentry = f2->f_dentry = dget(d_alloc_root(inode, NULL)); f1->f_dentry = f2->f_dentry = dget(d_alloc_root(inode, NULL));
if (!f1->f_dentry)
goto close_f12_inode_i_j;
/* read file */ /* read file */
f1->f_pos = f2->f_pos = 0; f1->f_pos = f2->f_pos = 0;
...@@ -480,6 +483,8 @@ int do_pipe(int *fd) ...@@ -480,6 +483,8 @@ int do_pipe(int *fd)
fd[1] = j; fd[1] = j;
return 0; return 0;
close_f12_inode_i_j:
put_unused_fd(j);
close_f12_inode_i: close_f12_inode_i:
put_unused_fd(i); put_unused_fd(i);
close_f12_inode: close_f12_inode:
......
...@@ -130,7 +130,7 @@ asmlinkage long sys_read(unsigned int fd, char * buf, unsigned long count) ...@@ -130,7 +130,7 @@ asmlinkage long sys_read(unsigned int fd, char * buf, unsigned long count)
goto bad_file; goto bad_file;
dentry = file->f_dentry; dentry = file->f_dentry;
if (!dentry) if (!dentry)
goto bad_file; goto out;
inode = dentry->d_inode; inode = dentry->d_inode;
if (!inode) if (!inode)
goto out; goto out;
......
...@@ -811,14 +811,12 @@ static int do_remount_sb(struct super_block *sb, int flags, char *data) ...@@ -811,14 +811,12 @@ static int do_remount_sb(struct super_block *sb, int flags, char *data)
if ((flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY)) if ((flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY))
if (!fs_may_remount_ro(sb)) if (!fs_may_remount_ro(sb))
return -EBUSY; return -EBUSY;
sb->s_flags = (flags & ~MS_RDONLY) | (sb->s_flags & MS_RDONLY);
if (sb->s_op && sb->s_op->remount_fs) { if (sb->s_op && sb->s_op->remount_fs) {
retval = sb->s_op->remount_fs(sb, &flags, data); retval = sb->s_op->remount_fs(sb, &flags, data);
if (retval) if (retval)
return retval; return retval;
} }
sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
(flags & MS_RMT_MASK);
vfsmnt = lookup_vfsmnt(sb->s_dev); vfsmnt = lookup_vfsmnt(sb->s_dev);
if (vfsmnt) if (vfsmnt)
vfsmnt->mnt_flags = sb->s_flags; vfsmnt->mnt_flags = sb->s_flags;
......
...@@ -93,7 +93,7 @@ extern int max_files, nr_files; ...@@ -93,7 +93,7 @@ extern int max_files, nr_files;
/* /*
* Flags that can be altered by MS_REMOUNT * Flags that can be altered by MS_REMOUNT
*/ */
#define MS_RMT_MASK (MS_RDONLY|MS_MANDLOCK) #define MS_RMT_MASK (MS_RDONLY|MS_MANDLOCK|MS_NOATIME)
/* /*
* Magic mount flag number. Has to be or-ed to the flag values. * Magic mount flag number. Has to be or-ed to the flag values.
......
...@@ -137,6 +137,7 @@ fh_put(struct svc_fh *fhp) ...@@ -137,6 +137,7 @@ fh_put(struct svc_fh *fhp)
if (fhp->fh_dverified) { if (fhp->fh_dverified) {
fh_unlock(fhp); fh_unlock(fhp);
dput(fhp->fh_handle.fh_dentry); dput(fhp->fh_handle.fh_dentry);
fhp->fh_dverified = 0;
} }
} }
#else #else
...@@ -159,6 +160,7 @@ __fh_put(struct svc_fh *fhp, char *file, int line) ...@@ -159,6 +160,7 @@ __fh_put(struct svc_fh *fhp, char *file, int line)
} else { } else {
fh_unlock(fhp); fh_unlock(fhp);
dput(dentry); dput(dentry);
fhp->fh_dverified = 0;
} }
} }
#endif #endif
......
...@@ -206,6 +206,7 @@ static int get_pid(unsigned long flags) ...@@ -206,6 +206,7 @@ static int get_pid(unsigned long flags)
static inline int dup_mmap(struct mm_struct * mm) static inline int dup_mmap(struct mm_struct * mm)
{ {
struct vm_area_struct * mpnt, *tmp, **pprev; struct vm_area_struct * mpnt, *tmp, **pprev;
int retval;
mm->mmap = mm->mmap_cache = NULL; mm->mmap = mm->mmap_cache = NULL;
flush_cache_mm(current->mm); flush_cache_mm(current->mm);
...@@ -213,19 +214,17 @@ static inline int dup_mmap(struct mm_struct * mm) ...@@ -213,19 +214,17 @@ static inline int dup_mmap(struct mm_struct * mm)
for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) { for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) {
struct dentry *dentry; struct dentry *dentry;
retval = -ENOMEM;
tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (!tmp) { if (!tmp)
exit_mmap(mm); goto fail_nomem;
flush_tlb_mm(current->mm);
return -ENOMEM;
}
*tmp = *mpnt; *tmp = *mpnt;
tmp->vm_flags &= ~VM_LOCKED; tmp->vm_flags &= ~VM_LOCKED;
tmp->vm_mm = mm; tmp->vm_mm = mm;
tmp->vm_next = NULL; tmp->vm_next = NULL;
dentry = tmp->vm_dentry; dentry = tmp->vm_dentry;
if (dentry) { if (dentry) {
dentry->d_count++; dget(dentry);
if (tmp->vm_flags & VM_DENYWRITE) if (tmp->vm_flags & VM_DENYWRITE)
dentry->d_inode->i_writecount--; dentry->d_inode->i_writecount--;
...@@ -236,60 +235,79 @@ static inline int dup_mmap(struct mm_struct * mm) ...@@ -236,60 +235,79 @@ static inline int dup_mmap(struct mm_struct * mm)
mpnt->vm_next_share = tmp; mpnt->vm_next_share = tmp;
tmp->vm_pprev_share = &mpnt->vm_next_share; tmp->vm_pprev_share = &mpnt->vm_next_share;
} }
if (copy_page_range(mm, current->mm, tmp)) {
exit_mmap(mm); /* Copy the pages, but defer checking for errors */
flush_tlb_mm(current->mm); retval = copy_page_range(mm, current->mm, tmp);
return -ENOMEM; if (!retval && tmp->vm_ops && tmp->vm_ops->open)
}
if (tmp->vm_ops && tmp->vm_ops->open)
tmp->vm_ops->open(tmp); tmp->vm_ops->open(tmp);
/* Ok, finally safe to link it in. */ /*
* Link in the new vma even if an error occurred,
* so that exit_mmap() can clean up the mess.
*/
if((tmp->vm_next = *pprev) != NULL) if((tmp->vm_next = *pprev) != NULL)
(*pprev)->vm_pprev = &tmp->vm_next; (*pprev)->vm_pprev = &tmp->vm_next;
*pprev = tmp; *pprev = tmp;
tmp->vm_pprev = pprev; tmp->vm_pprev = pprev;
pprev = &tmp->vm_next; pprev = &tmp->vm_next;
if (retval)
goto fail_nomem;
} }
flush_tlb_mm(current->mm); flush_tlb_mm(current->mm);
return 0; return 0;
fail_nomem:
exit_mmap(mm);
flush_tlb_mm(current->mm);
return retval;
} }
static inline int copy_mm(unsigned long clone_flags, struct task_struct * tsk) static inline int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
{ {
if (!(clone_flags & CLONE_VM)) { struct mm_struct * mm;
struct mm_struct * mm = kmem_cache_alloc(mm_cachep, SLAB_KERNEL); int retval;
if (!mm)
return -1; if (clone_flags & CLONE_VM) {
*mm = *current->mm; current->mm->count++;
init_new_context(mm); SET_PAGE_DIR(tsk, current->mm->pgd);
mm->count = 1;
mm->def_flags = 0;
mm->mmap_sem = MUTEX;
/* It has not run yet, so cannot be present in anyone's
* cache or tlb.
*/
mm->cpu_vm_mask = 0;
tsk->mm = mm;
tsk->min_flt = tsk->maj_flt = 0;
tsk->cmin_flt = tsk->cmaj_flt = 0;
tsk->nswap = tsk->cnswap = 0;
if (new_page_tables(tsk))
goto free_mm;
if (dup_mmap(mm)) {
free_page_tables(mm);
free_mm:
kmem_cache_free(mm_cachep, mm);
return -1;
}
return 0; return 0;
} }
current->mm->count++;
SET_PAGE_DIR(tsk, current->mm->pgd); retval = -ENOMEM;
mm = kmem_cache_alloc(mm_cachep, SLAB_KERNEL);
if (!mm)
goto fail_nomem;
*mm = *current->mm;
init_new_context(mm);
mm->count = 1;
mm->def_flags = 0;
mm->mmap_sem = MUTEX;
/* It has not run yet, so cannot be present in anyone's
* cache or tlb.
*/
mm->cpu_vm_mask = 0;
tsk->mm = mm;
tsk->min_flt = tsk->maj_flt = 0;
tsk->cmin_flt = tsk->cmaj_flt = 0;
tsk->nswap = tsk->cnswap = 0;
retval = new_page_tables(tsk);
if (retval)
goto free_mm;
retval = dup_mmap(mm);
if (retval)
goto free_pt;
return 0; return 0;
free_pt:
free_page_tables(mm);
free_mm:
tsk->mm = NULL;
kmem_cache_free(mm_cachep, mm);
fail_nomem:
return retval;
} }
static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk) static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk)
...@@ -478,9 +496,9 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs) ...@@ -478,9 +496,9 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
bad_fork_cleanup_sighand: bad_fork_cleanup_sighand:
exit_sighand(p); exit_sighand(p);
bad_fork_cleanup_fs: bad_fork_cleanup_fs:
exit_fs(p); exit_fs(p); /* blocking */
bad_fork_cleanup_files: bad_fork_cleanup_files:
exit_files(p); exit_files(p); /* blocking */
bad_fork_cleanup: bad_fork_cleanup:
charge_uid(current, -1); charge_uid(current, -1);
if (p->exec_domain && p->exec_domain->module) if (p->exec_domain && p->exec_domain->module)
......
...@@ -291,19 +291,20 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src, ...@@ -291,19 +291,20 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
return error; return error;
} }
static inline void free_pte(pte_t page) /*
* Return indicates whether a page was freed so caller can adjust rss
*/
static inline int free_pte(pte_t page)
{ {
if (pte_present(page)) { if (pte_present(page)) {
unsigned long addr = pte_page(page); unsigned long addr = pte_page(page);
if (MAP_NR(addr) >= max_mapnr || PageReserved(mem_map+MAP_NR(addr))) if (MAP_NR(addr) >= max_mapnr || PageReserved(mem_map+MAP_NR(addr)))
return; return 0;
free_page(addr); free_page(addr);
if (current->mm->rss <= 0) return 1;
return;
current->mm->rss--;
return;
} }
swap_free(pte_val(page)); swap_free(pte_val(page));
return 0;
} }
static inline void forget_pte(pte_t page) static inline void forget_pte(pte_t page)
...@@ -314,22 +315,24 @@ static inline void forget_pte(pte_t page) ...@@ -314,22 +315,24 @@ static inline void forget_pte(pte_t page)
} }
} }
static inline void zap_pte_range(pmd_t * pmd, unsigned long address, unsigned long size) static inline int zap_pte_range(pmd_t * pmd, unsigned long address, unsigned long size)
{ {
pte_t * pte; pte_t * pte;
int freed;
if (pmd_none(*pmd)) if (pmd_none(*pmd))
return; return 0;
if (pmd_bad(*pmd)) { if (pmd_bad(*pmd)) {
printk("zap_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd)); printk("zap_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
pmd_clear(pmd); pmd_clear(pmd);
return; return 0;
} }
pte = pte_offset(pmd, address); pte = pte_offset(pmd, address);
address &= ~PMD_MASK; address &= ~PMD_MASK;
if (address + size > PMD_SIZE) if (address + size > PMD_SIZE)
size = PMD_SIZE - address; size = PMD_SIZE - address;
size >>= PAGE_SHIFT; size >>= PAGE_SHIFT;
freed = 0;
for (;;) { for (;;) {
pte_t page; pte_t page;
if (!size) if (!size)
...@@ -340,32 +343,36 @@ static inline void zap_pte_range(pmd_t * pmd, unsigned long address, unsigned lo ...@@ -340,32 +343,36 @@ static inline void zap_pte_range(pmd_t * pmd, unsigned long address, unsigned lo
if (pte_none(page)) if (pte_none(page))
continue; continue;
pte_clear(pte-1); pte_clear(pte-1);
free_pte(page); freed += free_pte(page);
} }
return freed;
} }
static inline void zap_pmd_range(pgd_t * dir, unsigned long address, unsigned long size) static inline int zap_pmd_range(pgd_t * dir, unsigned long address, unsigned long size)
{ {
pmd_t * pmd; pmd_t * pmd;
unsigned long end; unsigned long end;
int freed;
if (pgd_none(*dir)) if (pgd_none(*dir))
return; return 0;
if (pgd_bad(*dir)) { if (pgd_bad(*dir)) {
printk("zap_pmd_range: bad pgd (%08lx)\n", pgd_val(*dir)); printk("zap_pmd_range: bad pgd (%08lx)\n", pgd_val(*dir));
pgd_clear(dir); pgd_clear(dir);
return; return 0;
} }
pmd = pmd_offset(dir, address); pmd = pmd_offset(dir, address);
address &= ~PGDIR_MASK; address &= ~PGDIR_MASK;
end = address + size; end = address + size;
if (end > PGDIR_SIZE) if (end > PGDIR_SIZE)
end = PGDIR_SIZE; end = PGDIR_SIZE;
freed = 0;
do { do {
zap_pte_range(pmd, address, end - address); freed += zap_pte_range(pmd, address, end - address);
address = (address + PMD_SIZE) & PMD_MASK; address = (address + PMD_SIZE) & PMD_MASK;
pmd++; pmd++;
} while (address < end); } while (address < end);
return freed;
} }
/* /*
...@@ -375,13 +382,22 @@ void zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long s ...@@ -375,13 +382,22 @@ void zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long s
{ {
pgd_t * dir; pgd_t * dir;
unsigned long end = address + size; unsigned long end = address + size;
int freed = 0;
dir = pgd_offset(mm, address); dir = pgd_offset(mm, address);
while (address < end) { while (address < end) {
zap_pmd_range(dir, address, end - address); freed += zap_pmd_range(dir, address, end - address);
address = (address + PGDIR_SIZE) & PGDIR_MASK; address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++; dir++;
} }
/*
* Update rss for the mm_struct (not necessarily current->mm)
*/
if (mm->rss > 0) {
mm->rss -= freed;
if (mm->rss < 0)
mm->rss = 0;
}
} }
static inline void zeromap_pte_range(pte_t * pte, unsigned long address, unsigned long size, pte_t zero_pte) static inline void zeromap_pte_range(pte_t * pte, unsigned long address, unsigned long size, pte_t zero_pte)
......
...@@ -373,10 +373,12 @@ unsigned long get_unmapped_area(unsigned long addr, unsigned long len) ...@@ -373,10 +373,12 @@ unsigned long get_unmapped_area(unsigned long addr, unsigned long len)
* Unmapping between to intermediate points, making a hole. * Unmapping between to intermediate points, making a hole.
* *
* Case 4 involves the creation of 2 new areas, for each side of * Case 4 involves the creation of 2 new areas, for each side of
* the hole. * the hole. If possible, we reuse the existing area rather than
* allocate a new one, and the return indicates whether the old
* area was reused.
*/ */
static void unmap_fixup(struct vm_area_struct *area, static int unmap_fixup(struct vm_area_struct *area, unsigned long addr,
unsigned long addr, size_t len) size_t len, struct vm_area_struct **extra)
{ {
struct vm_area_struct *mpnt; struct vm_area_struct *mpnt;
unsigned long end = addr + len; unsigned long end = addr + len;
...@@ -391,7 +393,7 @@ static void unmap_fixup(struct vm_area_struct *area, ...@@ -391,7 +393,7 @@ static void unmap_fixup(struct vm_area_struct *area,
area->vm_ops->close(area); area->vm_ops->close(area);
if (area->vm_dentry) if (area->vm_dentry)
dput(area->vm_dentry); dput(area->vm_dentry);
return; return 0;
} }
/* Work out to one of the ends. */ /* Work out to one of the ends. */
...@@ -403,17 +405,16 @@ static void unmap_fixup(struct vm_area_struct *area, ...@@ -403,17 +405,16 @@ static void unmap_fixup(struct vm_area_struct *area,
} else { } else {
/* Unmapping a hole: area->vm_start < addr <= end < area->vm_end */ /* Unmapping a hole: area->vm_start < addr <= end < area->vm_end */
/* Add end mapping -- leave beginning for below */ /* Add end mapping -- leave beginning for below */
mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); mpnt = *extra;
*extra = NULL;
if (!mpnt)
return;
mpnt->vm_mm = area->vm_mm; mpnt->vm_mm = area->vm_mm;
mpnt->vm_start = end; mpnt->vm_start = end;
mpnt->vm_end = area->vm_end; mpnt->vm_end = area->vm_end;
mpnt->vm_page_prot = area->vm_page_prot; mpnt->vm_page_prot = area->vm_page_prot;
mpnt->vm_flags = area->vm_flags; mpnt->vm_flags = area->vm_flags;
mpnt->vm_ops = area->vm_ops; mpnt->vm_ops = area->vm_ops;
mpnt->vm_offset += (end - area->vm_start); mpnt->vm_offset = area->vm_offset + (end - area->vm_start);
mpnt->vm_dentry = dget(area->vm_dentry); mpnt->vm_dentry = dget(area->vm_dentry);
if (mpnt->vm_ops && mpnt->vm_ops->open) if (mpnt->vm_ops && mpnt->vm_ops->open)
mpnt->vm_ops->open(mpnt); mpnt->vm_ops->open(mpnt);
...@@ -421,18 +422,18 @@ static void unmap_fixup(struct vm_area_struct *area, ...@@ -421,18 +422,18 @@ static void unmap_fixup(struct vm_area_struct *area,
insert_vm_struct(current->mm, mpnt); insert_vm_struct(current->mm, mpnt);
} }
/* Construct whatever mapping is needed. */ /* Close the current area ... */
mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (!mpnt)
return;
*mpnt = *area;
if (mpnt->vm_ops && mpnt->vm_ops->open)
mpnt->vm_ops->open(mpnt);
if (area->vm_ops && area->vm_ops->close) { if (area->vm_ops && area->vm_ops->close) {
end = area->vm_end; /* save new end */
area->vm_end = area->vm_start; area->vm_end = area->vm_start;
area->vm_ops->close(area); area->vm_ops->close(area);
area->vm_end = end;
} }
insert_vm_struct(current->mm, mpnt); /* ... then reopen and reinsert. */
if (area->vm_ops && area->vm_ops->open)
area->vm_ops->open(area);
insert_vm_struct(current->mm, area);
return 1;
} }
asmlinkage int sys_munmap(unsigned long addr, size_t len) asmlinkage int sys_munmap(unsigned long addr, size_t len)
...@@ -452,7 +453,8 @@ asmlinkage int sys_munmap(unsigned long addr, size_t len) ...@@ -452,7 +453,8 @@ asmlinkage int sys_munmap(unsigned long addr, size_t len)
*/ */
int do_munmap(unsigned long addr, size_t len) int do_munmap(unsigned long addr, size_t len)
{ {
struct vm_area_struct *mpnt, *next, *free; struct vm_area_struct *mpnt, *next, *free, *extra;
int freed;
if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr) if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
return -EINVAL; return -EINVAL;
...@@ -471,6 +473,14 @@ int do_munmap(unsigned long addr, size_t len) ...@@ -471,6 +473,14 @@ int do_munmap(unsigned long addr, size_t len)
if (!mpnt) if (!mpnt)
return 0; return 0;
/*
* We may need one additional vma to fix up the mappings ...
* and this is the last chance for an easy error exit.
*/
extra = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (!extra)
return -ENOMEM;
next = mpnt->vm_next; next = mpnt->vm_next;
/* we have mpnt->vm_next = next and addr < mpnt->vm_end */ /* we have mpnt->vm_next = next and addr < mpnt->vm_end */
...@@ -486,19 +496,18 @@ int do_munmap(unsigned long addr, size_t len) ...@@ -486,19 +496,18 @@ int do_munmap(unsigned long addr, size_t len)
free = mpnt; free = mpnt;
mpnt = next; mpnt = next;
} }
if (free == NULL)
return 0;
/* Ok - we have the memory areas we should free on the 'free' list, /* Ok - we have the memory areas we should free on the 'free' list,
* so release them, and unmap the page range.. * so release them, and unmap the page range..
* If the one of the segments is only being partially unmapped, * If the one of the segments is only being partially unmapped,
* it will put new vm_area_struct(s) into the address space. * it will put new vm_area_struct(s) into the address space.
*/ */
do { freed = 0;
while ((mpnt = free) != NULL) {
unsigned long st, end, size; unsigned long st, end, size;
mpnt = free;
free = free->vm_next; free = free->vm_next;
freed = 1;
remove_shared_vm_struct(mpnt); remove_shared_vm_struct(mpnt);
...@@ -514,12 +523,19 @@ int do_munmap(unsigned long addr, size_t len) ...@@ -514,12 +523,19 @@ int do_munmap(unsigned long addr, size_t len)
zap_page_range(current->mm, st, size); zap_page_range(current->mm, st, size);
flush_tlb_range(current->mm, st, end); flush_tlb_range(current->mm, st, end);
unmap_fixup(mpnt, st, size); /*
* Fix the mapping, and free the old area if it wasn't reused.
*/
if (!unmap_fixup(mpnt, st, size, &extra))
kmem_cache_free(vm_area_cachep, mpnt);
}
kmem_cache_free(vm_area_cachep, mpnt); /* Release the extra vma struct if it wasn't used */
} while (free); if (extra)
kmem_cache_free(vm_area_cachep, extra);
current->mm->mmap_cache = NULL; /* Kill the cache. */ if (freed)
current->mm->mmap_cache = NULL; /* Kill the cache. */
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment