Commit a2646d1e authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

[PATCH] holepunch: fix shmem_truncate_range punching too far

Miklos Szeredi observes BUG_ON(!entry) in shmem_writepage() triggered in rare
circumstances, because shmem_truncate_range() erroneously removes partially
truncated directory pages at the end of the range: later reclaim on pages
pointing to these removed directories triggers the BUG.  Indeed, and it can
also cause data loss beyond the hole.

Fix this as in the patch proposed by Miklos, but distinguish between "limit"
(how far we need to search: ignore truncation's next_index optimization in the
holepunch case - if there are races it's more consistent to act on the whole
range specified) and "upper_limit" (how far we can free directory pages:
generally we must be careful to keep partially punched pages, but can relax at
end of file - i_size being held stable by i_mutex).
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Cc: Miklos Szeredi <mszeredi@suse.cs>
Cc: Badari Pulavarty <pbadari@us.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 96fac9dc
...@@ -481,7 +481,8 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) ...@@ -481,7 +481,8 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
long nr_swaps_freed = 0; long nr_swaps_freed = 0;
int offset; int offset;
int freed; int freed;
int punch_hole = 0; int punch_hole;
unsigned long upper_limit;
inode->i_ctime = inode->i_mtime = CURRENT_TIME; inode->i_ctime = inode->i_mtime = CURRENT_TIME;
idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
...@@ -492,11 +493,18 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) ...@@ -492,11 +493,18 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
info->flags |= SHMEM_TRUNCATE; info->flags |= SHMEM_TRUNCATE;
if (likely(end == (loff_t) -1)) { if (likely(end == (loff_t) -1)) {
limit = info->next_index; limit = info->next_index;
upper_limit = SHMEM_MAX_INDEX;
info->next_index = idx; info->next_index = idx;
punch_hole = 0;
} else { } else {
limit = (end + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; if (end + 1 >= inode->i_size) { /* we may free a little more */
if (limit > info->next_index) limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >>
limit = info->next_index; PAGE_CACHE_SHIFT;
upper_limit = SHMEM_MAX_INDEX;
} else {
limit = (end + 1) >> PAGE_CACHE_SHIFT;
upper_limit = limit;
}
punch_hole = 1; punch_hole = 1;
} }
...@@ -520,10 +528,10 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) ...@@ -520,10 +528,10 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
* If there are no indirect blocks or we are punching a hole * If there are no indirect blocks or we are punching a hole
* below indirect blocks, nothing to be done. * below indirect blocks, nothing to be done.
*/ */
if (!topdir || (punch_hole && (limit <= SHMEM_NR_DIRECT))) if (!topdir || limit <= SHMEM_NR_DIRECT)
goto done2; goto done2;
BUG_ON(limit <= SHMEM_NR_DIRECT); upper_limit -= SHMEM_NR_DIRECT;
limit -= SHMEM_NR_DIRECT; limit -= SHMEM_NR_DIRECT;
idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0; idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
offset = idx % ENTRIES_PER_PAGE; offset = idx % ENTRIES_PER_PAGE;
...@@ -543,7 +551,7 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) ...@@ -543,7 +551,7 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
if (*dir) { if (*dir) {
diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) % diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE; ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
if (!diroff && !offset) { if (!diroff && !offset && upper_limit >= stage) {
*dir = NULL; *dir = NULL;
nr_pages_to_free++; nr_pages_to_free++;
list_add(&middir->lru, &pages_to_free); list_add(&middir->lru, &pages_to_free);
...@@ -570,9 +578,11 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) ...@@ -570,9 +578,11 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
} }
stage = idx + ENTRIES_PER_PAGEPAGE; stage = idx + ENTRIES_PER_PAGEPAGE;
middir = *dir; middir = *dir;
*dir = NULL; if (upper_limit >= stage) {
nr_pages_to_free++; *dir = NULL;
list_add(&middir->lru, &pages_to_free); nr_pages_to_free++;
list_add(&middir->lru, &pages_to_free);
}
shmem_dir_unmap(dir); shmem_dir_unmap(dir);
cond_resched(); cond_resched();
dir = shmem_dir_map(middir); dir = shmem_dir_map(middir);
...@@ -598,7 +608,7 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) ...@@ -598,7 +608,7 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
} }
if (offset) if (offset)
offset = 0; offset = 0;
else if (subdir && !page_private(subdir)) { else if (subdir && upper_limit - idx >= ENTRIES_PER_PAGE) {
dir[diroff] = NULL; dir[diroff] = NULL;
nr_pages_to_free++; nr_pages_to_free++;
list_add(&subdir->lru, &pages_to_free); list_add(&subdir->lru, &pages_to_free);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment