Commit 5096494f authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] Interface to invalidate regions of mmaps

From: "Paul E. McKenney" <paulmck@us.ibm.com>

The patch reworks and generalises vmtruncate_list() a bit to create an API
which invalidates a specified portion of an address_space, permitting
distributed filesystems to maintain POSIX semantics when a file mmap()ed on
one client is modified on another client.
parent 6e20adb2
......@@ -421,6 +421,9 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
int zeromap_page_range(struct vm_area_struct *vma, unsigned long from,
unsigned long size, pgprot_t prot);
extern void invalidate_mmap_range(struct address_space *mapping,
loff_t const holebegin,
loff_t const holelen);
extern int vmtruncate(struct inode * inode, loff_t offset);
extern pmd_t *FASTCALL(__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address));
extern pte_t *FASTCALL(pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address));
......
......@@ -120,6 +120,7 @@ EXPORT_SYMBOL(mem_map);
EXPORT_SYMBOL(max_mapnr);
#endif
EXPORT_SYMBOL(high_memory);
EXPORT_SYMBOL_GPL(invalidate_mmap_range);
EXPORT_SYMBOL(vmtruncate);
EXPORT_SYMBOL(find_vma);
EXPORT_SYMBOL(get_unmapped_area);
......
......@@ -1062,35 +1062,75 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
return ret;
}
static void vmtruncate_list(struct list_head *head, unsigned long pgoff)
/*
* Helper function for invalidate_mmap_range().
* Both hba and hlen are page numbers in PAGE_SIZE units.
* An hlen of zero blows away the entire portion file after hba.
*/
static void
invalidate_mmap_range_list(struct list_head *head,
unsigned long const hba,
unsigned long const hlen)
{
unsigned long start, end, len, diff;
struct vm_area_struct *vma;
struct list_head *curr;
unsigned long hea; /* last page of hole. */
unsigned long vba;
unsigned long vea; /* last page of corresponding uva hole. */
struct vm_area_struct *vp;
unsigned long zba;
unsigned long zea;
hea = hba + hlen - 1; /* avoid overflow. */
if (hea < hba)
hea = ULONG_MAX;
list_for_each(curr, head) {
vma = list_entry(curr, struct vm_area_struct, shared);
start = vma->vm_start;
end = vma->vm_end;
len = end - start;
/* mapping wholly truncated? */
if (vma->vm_pgoff >= pgoff) {
zap_page_range(vma, start, len);
continue;
vp = list_entry(curr, struct vm_area_struct, shared);
vba = vp->vm_pgoff;
vea = vba + ((vp->vm_end - vp->vm_start) >> PAGE_SHIFT) - 1;
if (hea < vba || vea < hba)
continue; /* Mapping disjoint from hole. */
zba = (hba <= vba) ? vba : hba;
zea = (vea <= hea) ? vea : hea;
zap_page_range(vp,
((zba - vba) << PAGE_SHIFT) + vp->vm_start,
(zea - zba + 1) << PAGE_SHIFT);
}
}
/* mapping wholly unaffected? */
len = len >> PAGE_SHIFT;
diff = pgoff - vma->vm_pgoff;
if (diff >= len)
continue;
/**
* invalidate_mmap_range - invalidate the portion of all mmaps
* in the specified address_space corresponding to the specified
* page range in the underlying file.
* @address_space: the address space containing mmaps to be invalidated.
* @holebegin: byte in first page to invalidate, relative to the start of
* the underlying file. This will be rounded down to a PAGE_SIZE
* boundary. Note that this is different from vmtruncate(), which
* must keep the partial page. In contrast, we must get rid of
* partial pages.
* @holelen: size of prospective hole in bytes. This will be rounded
* up to a PAGE_SIZE boundary. A holelen of zero truncates to the
* end of the file.
*/
void invalidate_mmap_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen)
{
unsigned long hba = holebegin >> PAGE_SHIFT;
unsigned long hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* Ok, partially affected.. */
start += diff << PAGE_SHIFT;
len = (len - diff) << PAGE_SHIFT;
zap_page_range(vma, start, len);
/* Check for overflow. */
if (sizeof(holelen) > sizeof(hlen)) {
long long holeend =
(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (holeend & ~(long long)ULONG_MAX)
hlen = ULONG_MAX - hba + 1;
}
down(&mapping->i_shared_sem);
if (unlikely(!list_empty(&mapping->i_mmap)))
invalidate_mmap_range_list(&mapping->i_mmap, hba, hlen);
if (unlikely(!list_empty(&mapping->i_mmap_shared)))
invalidate_mmap_range_list(&mapping->i_mmap_shared, hba, hlen);
up(&mapping->i_shared_sem);
}
/*
......@@ -1103,20 +1143,13 @@ static void vmtruncate_list(struct list_head *head, unsigned long pgoff)
*/
int vmtruncate(struct inode * inode, loff_t offset)
{
unsigned long pgoff;
struct address_space *mapping = inode->i_mapping;
unsigned long limit;
if (inode->i_size < offset)
goto do_expand;
i_size_write(inode, offset);
pgoff = (offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
down(&mapping->i_shared_sem);
if (unlikely(!list_empty(&mapping->i_mmap)))
vmtruncate_list(&mapping->i_mmap, pgoff);
if (unlikely(!list_empty(&mapping->i_mmap_shared)))
vmtruncate_list(&mapping->i_mmap_shared, pgoff);
up(&mapping->i_shared_sem);
invalidate_mmap_range(mapping, offset + PAGE_SIZE - 1, 0);
truncate_inode_pages(mapping, offset);
goto out_truncate;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment