Commit f1820361 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds

mm: implement ->map_pages for page cache

filemap_map_pages() is generic implementation of ->map_pages() for
filesystems who uses page cache.

It should be safe to use filemap_map_pages() for ->map_pages() if
filesystem use filemap_fault() for ->fault().
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Matthew Wilcox <matthew.r.wilcox@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Ning Qu <quning@gmail.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8c6e50b0
...@@ -832,6 +832,7 @@ static void v9fs_mmap_vm_close(struct vm_area_struct *vma) ...@@ -832,6 +832,7 @@ static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
static const struct vm_operations_struct v9fs_file_vm_ops = { static const struct vm_operations_struct v9fs_file_vm_ops = {
.fault = filemap_fault, .fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = v9fs_vm_page_mkwrite, .page_mkwrite = v9fs_vm_page_mkwrite,
.remap_pages = generic_file_remap_pages, .remap_pages = generic_file_remap_pages,
}; };
...@@ -839,6 +840,7 @@ static const struct vm_operations_struct v9fs_file_vm_ops = { ...@@ -839,6 +840,7 @@ static const struct vm_operations_struct v9fs_file_vm_ops = {
static const struct vm_operations_struct v9fs_mmap_file_vm_ops = { static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
.close = v9fs_mmap_vm_close, .close = v9fs_mmap_vm_close,
.fault = filemap_fault, .fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = v9fs_vm_page_mkwrite, .page_mkwrite = v9fs_vm_page_mkwrite,
.remap_pages = generic_file_remap_pages, .remap_pages = generic_file_remap_pages,
}; };
......
...@@ -2025,6 +2025,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) ...@@ -2025,6 +2025,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
static const struct vm_operations_struct btrfs_file_vm_ops = { static const struct vm_operations_struct btrfs_file_vm_ops = {
.fault = filemap_fault, .fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = btrfs_page_mkwrite, .page_mkwrite = btrfs_page_mkwrite,
.remap_pages = generic_file_remap_pages, .remap_pages = generic_file_remap_pages,
}; };
......
...@@ -3113,6 +3113,7 @@ cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -3113,6 +3113,7 @@ cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
static struct vm_operations_struct cifs_file_vm_ops = { static struct vm_operations_struct cifs_file_vm_ops = {
.fault = filemap_fault, .fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = cifs_page_mkwrite, .page_mkwrite = cifs_page_mkwrite,
.remap_pages = generic_file_remap_pages, .remap_pages = generic_file_remap_pages,
}; };
......
...@@ -200,6 +200,7 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov, ...@@ -200,6 +200,7 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
static const struct vm_operations_struct ext4_file_vm_ops = { static const struct vm_operations_struct ext4_file_vm_ops = {
.fault = filemap_fault, .fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = ext4_page_mkwrite, .page_mkwrite = ext4_page_mkwrite,
.remap_pages = generic_file_remap_pages, .remap_pages = generic_file_remap_pages,
}; };
......
...@@ -84,6 +84,7 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, ...@@ -84,6 +84,7 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
static const struct vm_operations_struct f2fs_file_vm_ops = { static const struct vm_operations_struct f2fs_file_vm_ops = {
.fault = filemap_fault, .fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = f2fs_vm_page_mkwrite, .page_mkwrite = f2fs_vm_page_mkwrite,
.remap_pages = generic_file_remap_pages, .remap_pages = generic_file_remap_pages,
}; };
......
...@@ -2117,6 +2117,7 @@ static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -2117,6 +2117,7 @@ static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
static const struct vm_operations_struct fuse_file_vm_ops = { static const struct vm_operations_struct fuse_file_vm_ops = {
.close = fuse_vma_close, .close = fuse_vma_close,
.fault = filemap_fault, .fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = fuse_page_mkwrite, .page_mkwrite = fuse_page_mkwrite,
.remap_pages = generic_file_remap_pages, .remap_pages = generic_file_remap_pages,
}; };
......
...@@ -494,6 +494,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -494,6 +494,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
static const struct vm_operations_struct gfs2_vm_ops = { static const struct vm_operations_struct gfs2_vm_ops = {
.fault = filemap_fault, .fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = gfs2_page_mkwrite, .page_mkwrite = gfs2_page_mkwrite,
.remap_pages = generic_file_remap_pages, .remap_pages = generic_file_remap_pages,
}; };
......
...@@ -617,6 +617,7 @@ static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -617,6 +617,7 @@ static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
static const struct vm_operations_struct nfs_file_vm_ops = { static const struct vm_operations_struct nfs_file_vm_ops = {
.fault = filemap_fault, .fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = nfs_vm_page_mkwrite, .page_mkwrite = nfs_vm_page_mkwrite,
.remap_pages = generic_file_remap_pages, .remap_pages = generic_file_remap_pages,
}; };
......
...@@ -134,6 +134,7 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -134,6 +134,7 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
static const struct vm_operations_struct nilfs_file_vm_ops = { static const struct vm_operations_struct nilfs_file_vm_ops = {
.fault = filemap_fault, .fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = nilfs_page_mkwrite, .page_mkwrite = nilfs_page_mkwrite,
.remap_pages = generic_file_remap_pages, .remap_pages = generic_file_remap_pages,
}; };
......
...@@ -1538,6 +1538,7 @@ static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma, ...@@ -1538,6 +1538,7 @@ static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma,
static const struct vm_operations_struct ubifs_file_vm_ops = { static const struct vm_operations_struct ubifs_file_vm_ops = {
.fault = filemap_fault, .fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = ubifs_vm_page_mkwrite, .page_mkwrite = ubifs_vm_page_mkwrite,
.remap_pages = generic_file_remap_pages, .remap_pages = generic_file_remap_pages,
}; };
......
...@@ -1483,6 +1483,7 @@ const struct file_operations xfs_dir_file_operations = { ...@@ -1483,6 +1483,7 @@ const struct file_operations xfs_dir_file_operations = {
static const struct vm_operations_struct xfs_file_vm_ops = { static const struct vm_operations_struct xfs_file_vm_ops = {
.fault = filemap_fault, .fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = xfs_vm_page_mkwrite, .page_mkwrite = xfs_vm_page_mkwrite,
.remap_pages = generic_file_remap_pages, .remap_pages = generic_file_remap_pages,
}; };
...@@ -1847,6 +1847,7 @@ extern void truncate_inode_pages_final(struct address_space *); ...@@ -1847,6 +1847,7 @@ extern void truncate_inode_pages_final(struct address_space *);
/* generic vm_area_ops exported for stackable file systems */ /* generic vm_area_ops exported for stackable file systems */
extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
extern void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf);
extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
/* mm/page-writeback.c */ /* mm/page-writeback.c */
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */ #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
#include <linux/memcontrol.h> #include <linux/memcontrol.h>
#include <linux/cleancache.h> #include <linux/cleancache.h>
#include <linux/rmap.h>
#include "internal.h" #include "internal.h"
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
...@@ -2064,6 +2065,78 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -2064,6 +2065,78 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
} }
EXPORT_SYMBOL(filemap_fault); EXPORT_SYMBOL(filemap_fault);
void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct radix_tree_iter iter;
void **slot;
struct file *file = vma->vm_file;
struct address_space *mapping = file->f_mapping;
loff_t size;
struct page *page;
unsigned long address = (unsigned long) vmf->virtual_address;
unsigned long addr;
pte_t *pte;
rcu_read_lock();
radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, vmf->pgoff) {
if (iter.index > vmf->max_pgoff)
break;
repeat:
page = radix_tree_deref_slot(slot);
if (unlikely(!page))
goto next;
if (radix_tree_exception(page)) {
if (radix_tree_deref_retry(page))
break;
else
goto next;
}
if (!page_cache_get_speculative(page))
goto repeat;
/* Has the page moved? */
if (unlikely(page != *slot)) {
page_cache_release(page);
goto repeat;
}
if (!PageUptodate(page) ||
PageReadahead(page) ||
PageHWPoison(page))
goto skip;
if (!trylock_page(page))
goto skip;
if (page->mapping != mapping || !PageUptodate(page))
goto unlock;
size = i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1;
if (page->index >= size >> PAGE_CACHE_SHIFT)
goto unlock;
pte = vmf->pte + page->index - vmf->pgoff;
if (!pte_none(*pte))
goto unlock;
if (file->f_ra.mmap_miss > 0)
file->f_ra.mmap_miss--;
addr = address + (page->index - vmf->pgoff) * PAGE_SIZE;
do_set_pte(vma, addr, page, pte, false, false);
unlock_page(page);
goto next;
unlock:
unlock_page(page);
skip:
page_cache_release(page);
next:
if (iter.index == vmf->max_pgoff)
break;
}
rcu_read_unlock();
}
EXPORT_SYMBOL(filemap_map_pages);
int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{ {
struct page *page = vmf->page; struct page *page = vmf->page;
...@@ -2093,6 +2166,7 @@ EXPORT_SYMBOL(filemap_page_mkwrite); ...@@ -2093,6 +2166,7 @@ EXPORT_SYMBOL(filemap_page_mkwrite);
const struct vm_operations_struct generic_file_vm_ops = { const struct vm_operations_struct generic_file_vm_ops = {
.fault = filemap_fault, .fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = filemap_page_mkwrite, .page_mkwrite = filemap_page_mkwrite,
.remap_pages = generic_file_remap_pages, .remap_pages = generic_file_remap_pages,
}; };
......
...@@ -1985,6 +1985,12 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1985,6 +1985,12 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
} }
EXPORT_SYMBOL(filemap_fault); EXPORT_SYMBOL(filemap_fault);
void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
{
BUG();
}
EXPORT_SYMBOL(filemap_map_pages);
int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr, int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
unsigned long size, pgoff_t pgoff) unsigned long size, pgoff_t pgoff)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment