Commit 83e4fa9c authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

tmpfs: support fallocate FALLOC_FL_PUNCH_HOLE

tmpfs has supported hole-punching since 2.6.16, via
madvise(,,MADV_REMOVE).

But nowadays fallocate(,FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE,,) is
the agreed way to punch holes.

So add shmem_fallocate() to support that, and tweak shmem_truncate_range()
to support partial pages at both the beginning and end of range (never
needed for madvise, which demands rounded addr and rounds up length).
Based-on-patch-by: default avatarCong Wang <amwang@redhat.com>
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Cong Wang <amwang@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ec9516fb
...@@ -53,6 +53,7 @@ static struct vfsmount *shm_mnt; ...@@ -53,6 +53,7 @@ static struct vfsmount *shm_mnt;
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/pagevec.h> #include <linux/pagevec.h>
#include <linux/percpu_counter.h> #include <linux/percpu_counter.h>
#include <linux/falloc.h>
#include <linux/splice.h> #include <linux/splice.h>
#include <linux/security.h> #include <linux/security.h>
#include <linux/swapops.h> #include <linux/swapops.h>
...@@ -432,21 +433,23 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) ...@@ -432,21 +433,23 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_inode_info *info = SHMEM_I(inode);
pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT;
pgoff_t end = (lend >> PAGE_CACHE_SHIFT); unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1);
unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
struct pagevec pvec; struct pagevec pvec;
pgoff_t indices[PAGEVEC_SIZE]; pgoff_t indices[PAGEVEC_SIZE];
long nr_swaps_freed = 0; long nr_swaps_freed = 0;
pgoff_t index; pgoff_t index;
int i; int i;
BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1)); if (lend == -1)
end = -1; /* unsigned, so actually very big */
pagevec_init(&pvec, 0); pagevec_init(&pvec, 0);
index = start; index = start;
while (index <= end) { while (index < end) {
pvec.nr = shmem_find_get_pages_and_swap(mapping, index, pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, min(end - index, (pgoff_t)PAGEVEC_SIZE),
pvec.pages, indices); pvec.pages, indices);
if (!pvec.nr) if (!pvec.nr)
break; break;
...@@ -455,7 +458,7 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) ...@@ -455,7 +458,7 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
struct page *page = pvec.pages[i]; struct page *page = pvec.pages[i];
index = indices[i]; index = indices[i];
if (index > end) if (index >= end)
break; break;
if (radix_tree_exceptional_entry(page)) { if (radix_tree_exceptional_entry(page)) {
...@@ -479,22 +482,39 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) ...@@ -479,22 +482,39 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
index++; index++;
} }
if (partial) { if (partial_start) {
struct page *page = NULL; struct page *page = NULL;
shmem_getpage(inode, start - 1, &page, SGP_READ, NULL); shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
if (page) { if (page) {
zero_user_segment(page, partial, PAGE_CACHE_SIZE); unsigned int top = PAGE_CACHE_SIZE;
if (start > end) {
top = partial_end;
partial_end = 0;
}
zero_user_segment(page, partial_start, top);
set_page_dirty(page);
unlock_page(page);
page_cache_release(page);
}
}
if (partial_end) {
struct page *page = NULL;
shmem_getpage(inode, end, &page, SGP_READ, NULL);
if (page) {
zero_user_segment(page, 0, partial_end);
set_page_dirty(page); set_page_dirty(page);
unlock_page(page); unlock_page(page);
page_cache_release(page); page_cache_release(page);
} }
} }
if (start >= end)
return;
index = start; index = start;
for ( ; ; ) { for ( ; ; ) {
cond_resched(); cond_resched();
pvec.nr = shmem_find_get_pages_and_swap(mapping, index, pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, min(end - index, (pgoff_t)PAGEVEC_SIZE),
pvec.pages, indices); pvec.pages, indices);
if (!pvec.nr) { if (!pvec.nr) {
if (index == start) if (index == start)
...@@ -502,7 +522,7 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) ...@@ -502,7 +522,7 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
index = start; index = start;
continue; continue;
} }
if (index == start && indices[0] > end) { if (index == start && indices[0] >= end) {
shmem_deswap_pagevec(&pvec); shmem_deswap_pagevec(&pvec);
pagevec_release(&pvec); pagevec_release(&pvec);
break; break;
...@@ -512,7 +532,7 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) ...@@ -512,7 +532,7 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
struct page *page = pvec.pages[i]; struct page *page = pvec.pages[i];
index = indices[i]; index = indices[i];
if (index > end) if (index >= end)
break; break;
if (radix_tree_exceptional_entry(page)) { if (radix_tree_exceptional_entry(page)) {
...@@ -1578,6 +1598,31 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, ...@@ -1578,6 +1598,31 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
return error; return error;
} }
static long shmem_fallocate(struct file *file, int mode, loff_t offset,
loff_t len)
{
struct inode *inode = file->f_path.dentry->d_inode;
int error = -EOPNOTSUPP;
mutex_lock(&inode->i_mutex);
if (mode & FALLOC_FL_PUNCH_HOLE) {
struct address_space *mapping = file->f_mapping;
loff_t unmap_start = round_up(offset, PAGE_SIZE);
loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
if ((u64)unmap_end > (u64)unmap_start)
unmap_mapping_range(mapping, unmap_start,
1 + unmap_end - unmap_start, 0);
shmem_truncate_range(inode, offset, offset + len - 1);
/* No need to unmap again: hole-punching leaves COWed pages */
error = 0;
}
mutex_unlock(&inode->i_mutex);
return error;
}
static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
{ {
struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
...@@ -2490,6 +2535,7 @@ static const struct file_operations shmem_file_operations = { ...@@ -2490,6 +2535,7 @@ static const struct file_operations shmem_file_operations = {
.fsync = noop_fsync, .fsync = noop_fsync,
.splice_read = shmem_file_splice_read, .splice_read = shmem_file_splice_read,
.splice_write = generic_file_splice_write, .splice_write = generic_file_splice_write,
.fallocate = shmem_fallocate,
#endif #endif
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment