Commit b85e0eff authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

mm: consistent truncate and invalidate loops

Make the pagevec_lookup loops in truncate_inode_pages_range(),
invalidate_mapping_pages() and invalidate_inode_pages2_range() more
consistent with each other.

They were relying upon page->index of an unlocked page, but apologizing
for it: accept it, embrace it, add comments and WARN_ONs, and simplify the
index handling.

invalidate_inode_pages2_range() had special handling for a wrapped
page->index + 1 = 0 case; but MAX_LFS_FILESIZE doesn't let us anywhere
near there, and a corrupt page->index in the radix_tree could cause more
trouble than that would catch.  Remove that wrapped handling.

invalidate_inode_pages2_range() uses min() to limit the pagevec_lookup
when near the end of the range: copy that into the other two, although
it's less useful than you might think (it limits the use of the buffer,
rather than the indices looked up).
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8a549bea
...@@ -128,6 +128,7 @@ void __delete_from_page_cache(struct page *page) ...@@ -128,6 +128,7 @@ void __delete_from_page_cache(struct page *page)
radix_tree_delete(&mapping->page_tree, page->index); radix_tree_delete(&mapping->page_tree, page->index);
page->mapping = NULL; page->mapping = NULL;
/* Leave page->index set: truncation lookup relies upon it */
mapping->nrpages--; mapping->nrpages--;
__dec_zone_page_state(page, NR_FILE_PAGES); __dec_zone_page_state(page, NR_FILE_PAGES);
if (PageSwapBacked(page)) if (PageSwapBacked(page))
...@@ -483,6 +484,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping, ...@@ -483,6 +484,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
spin_unlock_irq(&mapping->tree_lock); spin_unlock_irq(&mapping->tree_lock);
} else { } else {
page->mapping = NULL; page->mapping = NULL;
/* Leave page->index set: truncation relies upon it */
spin_unlock_irq(&mapping->tree_lock); spin_unlock_irq(&mapping->tree_lock);
mem_cgroup_uncharge_cache_page(page); mem_cgroup_uncharge_cache_page(page);
page_cache_release(page); page_cache_release(page);
......
...@@ -199,9 +199,6 @@ int invalidate_inode_page(struct page *page) ...@@ -199,9 +199,6 @@ int invalidate_inode_page(struct page *page)
* The first pass will remove most pages, so the search cost of the second pass * The first pass will remove most pages, so the search cost of the second pass
* is low. * is low.
* *
* When looking at page->index outside the page lock we need to be careful to
* copy it into a local to avoid races (it could change at any time).
*
* We pass down the cache-hot hint to the page freeing code. Even if the * We pass down the cache-hot hint to the page freeing code. Even if the
* mapping is large, it is probably the case that the final pages are the most * mapping is large, it is probably the case that the final pages are the most
* recently touched, and freeing happens in ascending file offset order. * recently touched, and freeing happens in ascending file offset order.
...@@ -210,10 +207,10 @@ void truncate_inode_pages_range(struct address_space *mapping, ...@@ -210,10 +207,10 @@ void truncate_inode_pages_range(struct address_space *mapping,
loff_t lstart, loff_t lend) loff_t lstart, loff_t lend)
{ {
const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
pgoff_t end;
const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
struct pagevec pvec; struct pagevec pvec;
pgoff_t next; pgoff_t index;
pgoff_t end;
int i; int i;
cleancache_flush_inode(mapping); cleancache_flush_inode(mapping);
...@@ -224,24 +221,21 @@ void truncate_inode_pages_range(struct address_space *mapping, ...@@ -224,24 +221,21 @@ void truncate_inode_pages_range(struct address_space *mapping,
end = (lend >> PAGE_CACHE_SHIFT); end = (lend >> PAGE_CACHE_SHIFT);
pagevec_init(&pvec, 0); pagevec_init(&pvec, 0);
next = start; index = start;
while (next <= end && while (index <= end && pagevec_lookup(&pvec, mapping, index,
pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
mem_cgroup_uncharge_start(); mem_cgroup_uncharge_start();
for (i = 0; i < pagevec_count(&pvec); i++) { for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i]; struct page *page = pvec.pages[i];
pgoff_t page_index = page->index;
if (page_index > end) { /* We rely upon deletion not changing page->index */
next = page_index; index = page->index;
if (index > end)
break; break;
}
if (page_index > next)
next = page_index;
next++;
if (!trylock_page(page)) if (!trylock_page(page))
continue; continue;
WARN_ON(page->index != index);
if (PageWriteback(page)) { if (PageWriteback(page)) {
unlock_page(page); unlock_page(page);
continue; continue;
...@@ -252,6 +246,7 @@ void truncate_inode_pages_range(struct address_space *mapping, ...@@ -252,6 +246,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
pagevec_release(&pvec); pagevec_release(&pvec);
mem_cgroup_uncharge_end(); mem_cgroup_uncharge_end();
cond_resched(); cond_resched();
index++;
} }
if (partial) { if (partial) {
...@@ -264,13 +259,14 @@ void truncate_inode_pages_range(struct address_space *mapping, ...@@ -264,13 +259,14 @@ void truncate_inode_pages_range(struct address_space *mapping,
} }
} }
next = start; index = start;
for ( ; ; ) { for ( ; ; ) {
cond_resched(); cond_resched();
if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { if (!pagevec_lookup(&pvec, mapping, index,
if (next == start) min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
if (index == start)
break; break;
next = start; index = start;
continue; continue;
} }
if (pvec.pages[0]->index > end) { if (pvec.pages[0]->index > end) {
...@@ -281,18 +277,20 @@ void truncate_inode_pages_range(struct address_space *mapping, ...@@ -281,18 +277,20 @@ void truncate_inode_pages_range(struct address_space *mapping,
for (i = 0; i < pagevec_count(&pvec); i++) { for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i]; struct page *page = pvec.pages[i];
if (page->index > end) /* We rely upon deletion not changing page->index */
index = page->index;
if (index > end)
break; break;
lock_page(page); lock_page(page);
WARN_ON(page->index != index);
wait_on_page_writeback(page); wait_on_page_writeback(page);
truncate_inode_page(mapping, page); truncate_inode_page(mapping, page);
if (page->index > next)
next = page->index;
next++;
unlock_page(page); unlock_page(page);
} }
pagevec_release(&pvec); pagevec_release(&pvec);
mem_cgroup_uncharge_end(); mem_cgroup_uncharge_end();
index++;
} }
cleancache_flush_inode(mapping); cleancache_flush_inode(mapping);
} }
...@@ -333,35 +331,26 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping, ...@@ -333,35 +331,26 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t end) pgoff_t start, pgoff_t end)
{ {
struct pagevec pvec; struct pagevec pvec;
pgoff_t next = start; pgoff_t index = start;
unsigned long ret; unsigned long ret;
unsigned long count = 0; unsigned long count = 0;
int i; int i;
pagevec_init(&pvec, 0); pagevec_init(&pvec, 0);
while (next <= end && while (index <= end && pagevec_lookup(&pvec, mapping, index,
pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
mem_cgroup_uncharge_start(); mem_cgroup_uncharge_start();
for (i = 0; i < pagevec_count(&pvec); i++) { for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i]; struct page *page = pvec.pages[i];
pgoff_t index;
int lock_failed;
lock_failed = !trylock_page(page);
/* /* We rely upon deletion not changing page->index */
* We really shouldn't be looking at the ->index of an
* unlocked page. But we're not allowed to lock these
* pages. So we rely upon nobody altering the ->index
* of this (pinned-by-us) page.
*/
index = page->index; index = page->index;
if (index > next) if (index > end)
next = index; break;
next++;
if (lock_failed)
continue;
if (!trylock_page(page))
continue;
WARN_ON(page->index != index);
ret = invalidate_inode_page(page); ret = invalidate_inode_page(page);
unlock_page(page); unlock_page(page);
/* /*
...@@ -371,12 +360,11 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping, ...@@ -371,12 +360,11 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
if (!ret) if (!ret)
deactivate_page(page); deactivate_page(page);
count += ret; count += ret;
if (next > end)
break;
} }
pagevec_release(&pvec); pagevec_release(&pvec);
mem_cgroup_uncharge_end(); mem_cgroup_uncharge_end();
cond_resched(); cond_resched();
index++;
} }
return count; return count;
} }
...@@ -442,37 +430,32 @@ int invalidate_inode_pages2_range(struct address_space *mapping, ...@@ -442,37 +430,32 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
pgoff_t start, pgoff_t end) pgoff_t start, pgoff_t end)
{ {
struct pagevec pvec; struct pagevec pvec;
pgoff_t next; pgoff_t index;
int i; int i;
int ret = 0; int ret = 0;
int ret2 = 0; int ret2 = 0;
int did_range_unmap = 0; int did_range_unmap = 0;
int wrapped = 0;
cleancache_flush_inode(mapping); cleancache_flush_inode(mapping);
pagevec_init(&pvec, 0); pagevec_init(&pvec, 0);
next = start; index = start;
while (next <= end && !wrapped && while (index <= end && pagevec_lookup(&pvec, mapping, index,
pagevec_lookup(&pvec, mapping, next, min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
mem_cgroup_uncharge_start(); mem_cgroup_uncharge_start();
for (i = 0; i < pagevec_count(&pvec); i++) { for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i]; struct page *page = pvec.pages[i];
pgoff_t page_index;
/* We rely upon deletion not changing page->index */
index = page->index;
if (index > end)
break;
lock_page(page); lock_page(page);
WARN_ON(page->index != index);
if (page->mapping != mapping) { if (page->mapping != mapping) {
unlock_page(page); unlock_page(page);
continue; continue;
} }
page_index = page->index;
next = page_index + 1;
if (next == 0)
wrapped = 1;
if (page_index > end) {
unlock_page(page);
break;
}
wait_on_page_writeback(page); wait_on_page_writeback(page);
if (page_mapped(page)) { if (page_mapped(page)) {
if (!did_range_unmap) { if (!did_range_unmap) {
...@@ -480,9 +463,9 @@ int invalidate_inode_pages2_range(struct address_space *mapping, ...@@ -480,9 +463,9 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
* Zap the rest of the file in one hit. * Zap the rest of the file in one hit.
*/ */
unmap_mapping_range(mapping, unmap_mapping_range(mapping,
(loff_t)page_index<<PAGE_CACHE_SHIFT, (loff_t)index << PAGE_CACHE_SHIFT,
(loff_t)(end - page_index + 1) (loff_t)(1 + end - index)
<< PAGE_CACHE_SHIFT, << PAGE_CACHE_SHIFT,
0); 0);
did_range_unmap = 1; did_range_unmap = 1;
} else { } else {
...@@ -490,8 +473,8 @@ int invalidate_inode_pages2_range(struct address_space *mapping, ...@@ -490,8 +473,8 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
* Just zap this page * Just zap this page
*/ */
unmap_mapping_range(mapping, unmap_mapping_range(mapping,
(loff_t)page_index<<PAGE_CACHE_SHIFT, (loff_t)index << PAGE_CACHE_SHIFT,
PAGE_CACHE_SIZE, 0); PAGE_CACHE_SIZE, 0);
} }
} }
BUG_ON(page_mapped(page)); BUG_ON(page_mapped(page));
...@@ -507,6 +490,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping, ...@@ -507,6 +490,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
pagevec_release(&pvec); pagevec_release(&pvec);
mem_cgroup_uncharge_end(); mem_cgroup_uncharge_end();
cond_resched(); cond_resched();
index++;
} }
cleancache_flush_inode(mapping); cleancache_flush_inode(mapping);
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment