Commit b585ca3e authored by Ingo Molnar's avatar Ingo Molnar

- make i_mmap and i_mmap_shared a list.h list

-
 make vma->vm_next_share and vma->vm_pprev_share
  a proper list.h list as well.
parent 6d22c10f
...@@ -989,8 +989,8 @@ static int __init init_blkmtd(void) ...@@ -989,8 +989,8 @@ static int __init init_blkmtd(void)
rawdevice->as.nrpages = 0; rawdevice->as.nrpages = 0;
rawdevice->as.a_ops = &blkmtd_aops; rawdevice->as.a_ops = &blkmtd_aops;
rawdevice->as.host = inode; rawdevice->as.host = inode;
rawdevice->as.i_mmap = NULL; INIT_LIST_HEAD(&rawdevice->as.i_mmap);
rawdevice->as.i_mmap_shared = NULL; INIT_LIST_HEAD(&rawdevice->as.i_mmap_shared);
spin_lock_init(&rawdevice->as.i_shared_lock); spin_lock_init(&rawdevice->as.i_shared_lock);
rawdevice->as.gfp_mask = GFP_KERNEL; rawdevice->as.gfp_mask = GFP_KERNEL;
rawdevice->file = file; rawdevice->file = file;
......
...@@ -144,6 +144,8 @@ void inode_init_once(struct inode *inode) ...@@ -144,6 +144,8 @@ void inode_init_once(struct inode *inode)
INIT_LIST_HEAD(&inode->i_devices); INIT_LIST_HEAD(&inode->i_devices);
sema_init(&inode->i_sem, 1); sema_init(&inode->i_sem, 1);
spin_lock_init(&inode->i_data.i_shared_lock); spin_lock_init(&inode->i_data.i_shared_lock);
INIT_LIST_HEAD(&inode->i_data.i_mmap);
INIT_LIST_HEAD(&inode->i_data.i_mmap_shared);
} }
static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
......
...@@ -1459,7 +1459,7 @@ int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l) ...@@ -1459,7 +1459,7 @@ int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
(inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) { (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) {
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
if (mapping->i_mmap_shared != NULL) { if (!list_empty(&mapping->i_mmap_shared)) {
error = -EAGAIN; error = -EAGAIN;
goto out_putf; goto out_putf;
} }
...@@ -1615,7 +1615,7 @@ int fcntl_setlk64(unsigned int fd, unsigned int cmd, struct flock64 *l) ...@@ -1615,7 +1615,7 @@ int fcntl_setlk64(unsigned int fd, unsigned int cmd, struct flock64 *l)
(inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) { (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) {
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
if (mapping->i_mmap_shared != NULL) { if (!list_empty(&mapping->i_mmap_shared)) {
error = -EAGAIN; error = -EAGAIN;
goto out_putf; goto out_putf;
} }
......
...@@ -377,8 +377,8 @@ struct address_space { ...@@ -377,8 +377,8 @@ struct address_space {
unsigned long nrpages; /* number of total pages */ unsigned long nrpages; /* number of total pages */
struct address_space_operations *a_ops; /* methods */ struct address_space_operations *a_ops; /* methods */
struct inode *host; /* owner: inode, block_device */ struct inode *host; /* owner: inode, block_device */
struct vm_area_struct *i_mmap; /* list of private mappings */ list_t i_mmap; /* list of private mappings */
struct vm_area_struct *i_mmap_shared; /* list of shared mappings */ list_t i_mmap_shared; /* list of private mappings */
spinlock_t i_shared_lock; /* and spinlock protecting it */ spinlock_t i_shared_lock; /* and spinlock protecting it */
int gfp_mask; /* how to allocate the pages */ int gfp_mask; /* how to allocate the pages */
}; };
......
...@@ -61,8 +61,7 @@ struct vm_area_struct { ...@@ -61,8 +61,7 @@ struct vm_area_struct {
* one of the address_space->i_mmap{,shared} lists, * one of the address_space->i_mmap{,shared} lists,
* for shm areas, the list of attaches, otherwise unused. * for shm areas, the list of attaches, otherwise unused.
*/ */
struct vm_area_struct *vm_next_share; list_t shared;
struct vm_area_struct **vm_pprev_share;
/* Function pointers to deal with this struct. */ /* Function pointers to deal with this struct. */
struct vm_operations_struct * vm_ops; struct vm_operations_struct * vm_ops;
......
...@@ -219,11 +219,7 @@ static inline int dup_mmap(struct mm_struct * mm) ...@@ -219,11 +219,7 @@ static inline int dup_mmap(struct mm_struct * mm)
/* insert tmp into the share list, just after mpnt */ /* insert tmp into the share list, just after mpnt */
spin_lock(&inode->i_mapping->i_shared_lock); spin_lock(&inode->i_mapping->i_shared_lock);
if((tmp->vm_next_share = mpnt->vm_next_share) != NULL) list_add_tail(&tmp->shared, &mpnt->shared);
mpnt->vm_next_share->vm_pprev_share =
&tmp->vm_next_share;
mpnt->vm_next_share = tmp;
tmp->vm_pprev_share = &mpnt->vm_next_share;
spin_unlock(&inode->i_mapping->i_shared_lock); spin_unlock(&inode->i_mapping->i_shared_lock);
} }
......
...@@ -1399,7 +1399,7 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t * ...@@ -1399,7 +1399,7 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t *
* virtual addresses, take care about potential aliasing * virtual addresses, take care about potential aliasing
* before reading the page on the kernel side. * before reading the page on the kernel side.
*/ */
if (mapping->i_mmap_shared != NULL) if (!list_empty(&mapping->i_mmap_shared))
flush_dcache_page(page); flush_dcache_page(page);
/* /*
......
...@@ -1032,31 +1032,35 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma, ...@@ -1032,31 +1032,35 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
return -1; return -1;
} }
static void vmtruncate_list(struct vm_area_struct *mpnt, unsigned long pgoff) static void vmtruncate_list(list_t *head, unsigned long pgoff)
{ {
do { unsigned long start, end, len, diff;
unsigned long start = mpnt->vm_start; struct vm_area_struct *vma;
unsigned long end = mpnt->vm_end; list_t *curr;
unsigned long len = end - start;
unsigned long diff; list_for_each(curr, head) {
vma = list_entry(curr, struct vm_area_struct, shared);
start = vma->vm_start;
end = vma->vm_end;
len = end - start;
/* mapping wholly truncated? */ /* mapping wholly truncated? */
if (mpnt->vm_pgoff >= pgoff) { if (vma->vm_pgoff >= pgoff) {
zap_page_range(mpnt, start, len); zap_page_range(vma, start, len);
continue; continue;
} }
/* mapping wholly unaffected? */ /* mapping wholly unaffected? */
len = len >> PAGE_SHIFT; len = len >> PAGE_SHIFT;
diff = pgoff - mpnt->vm_pgoff; diff = pgoff - vma->vm_pgoff;
if (diff >= len) if (diff >= len)
continue; continue;
/* Ok, partially affected.. */ /* Ok, partially affected.. */
start += diff << PAGE_SHIFT; start += diff << PAGE_SHIFT;
len = (len - diff) << PAGE_SHIFT; len = (len - diff) << PAGE_SHIFT;
zap_page_range(mpnt, start, len); zap_page_range(vma, start, len);
} while ((mpnt = mpnt->vm_next_share) != NULL); }
} }
/* /*
...@@ -1077,14 +1081,14 @@ int vmtruncate(struct inode * inode, loff_t offset) ...@@ -1077,14 +1081,14 @@ int vmtruncate(struct inode * inode, loff_t offset)
goto do_expand; goto do_expand;
inode->i_size = offset; inode->i_size = offset;
spin_lock(&mapping->i_shared_lock); spin_lock(&mapping->i_shared_lock);
if (!mapping->i_mmap && !mapping->i_mmap_shared) if (list_empty(&mapping->i_mmap) && list_empty(&mapping->i_mmap_shared))
goto out_unlock; goto out_unlock;
pgoff = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; pgoff = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
if (mapping->i_mmap != NULL) if (!list_empty(&mapping->i_mmap))
vmtruncate_list(mapping->i_mmap, pgoff); vmtruncate_list(&mapping->i_mmap, pgoff);
if (mapping->i_mmap_shared != NULL) if (!list_empty(&mapping->i_mmap_shared))
vmtruncate_list(mapping->i_mmap_shared, pgoff); vmtruncate_list(&mapping->i_mmap_shared, pgoff);
out_unlock: out_unlock:
spin_unlock(&mapping->i_shared_lock); spin_unlock(&mapping->i_shared_lock);
......
...@@ -101,9 +101,7 @@ static inline void __remove_shared_vm_struct(struct vm_area_struct *vma) ...@@ -101,9 +101,7 @@ static inline void __remove_shared_vm_struct(struct vm_area_struct *vma)
struct inode *inode = file->f_dentry->d_inode; struct inode *inode = file->f_dentry->d_inode;
if (vma->vm_flags & VM_DENYWRITE) if (vma->vm_flags & VM_DENYWRITE)
atomic_inc(&inode->i_writecount); atomic_inc(&inode->i_writecount);
if(vma->vm_next_share) list_del_init(&vma->shared);
vma->vm_next_share->vm_pprev_share = vma->vm_pprev_share;
*vma->vm_pprev_share = vma->vm_next_share;
} }
} }
...@@ -308,20 +306,14 @@ static inline void __vma_link_file(struct vm_area_struct * vma) ...@@ -308,20 +306,14 @@ static inline void __vma_link_file(struct vm_area_struct * vma)
if (file) { if (file) {
struct inode * inode = file->f_dentry->d_inode; struct inode * inode = file->f_dentry->d_inode;
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
struct vm_area_struct **head;
if (vma->vm_flags & VM_DENYWRITE) if (vma->vm_flags & VM_DENYWRITE)
atomic_dec(&inode->i_writecount); atomic_dec(&inode->i_writecount);
head = &mapping->i_mmap;
if (vma->vm_flags & VM_SHARED) if (vma->vm_flags & VM_SHARED)
head = &mapping->i_mmap_shared; list_add_tail(&vma->shared, &mapping->i_mmap_shared);
else
/* insert vma into inode's share list */ list_add_tail(&vma->shared, &mapping->i_mmap);
if((vma->vm_next_share = *head) != NULL)
(*head)->vm_pprev_share = &vma->vm_next_share;
*head = vma;
vma->vm_pprev_share = head;
} }
} }
......
...@@ -904,7 +904,7 @@ static void do_shmem_file_read(struct file * filp, loff_t *ppos, read_descriptor ...@@ -904,7 +904,7 @@ static void do_shmem_file_read(struct file * filp, loff_t *ppos, read_descriptor
if ((desc->error = shmem_getpage(inode, index, &page))) if ((desc->error = shmem_getpage(inode, index, &page)))
break; break;
if (mapping->i_mmap_shared != NULL) if (!list_empty(&mapping->i_mmap_shared))
flush_dcache_page(page); flush_dcache_page(page);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment