truncate.c 24 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5
/*
 * mm/truncate.c - code for taking down pages from address_spaces
 *
 * Copyright (C) 2002, Linus Torvalds
 *
6
 * 10Sep2002	Andrew Morton
Linus Torvalds's avatar
Linus Torvalds committed
7 8 9 10
 *		Initial version.
 */

#include <linux/kernel.h>
Alexey Dobriyan's avatar
Alexey Dobriyan committed
11
#include <linux/backing-dev.h>
12
#include <linux/dax.h>
13
#include <linux/gfp.h>
Linus Torvalds's avatar
Linus Torvalds committed
14
#include <linux/mm.h>
15
#include <linux/swap.h>
16
#include <linux/export.h>
Linus Torvalds's avatar
Linus Torvalds committed
17
#include <linux/pagemap.h>
18
#include <linux/highmem.h>
Linus Torvalds's avatar
Linus Torvalds committed
19
#include <linux/pagevec.h>
20
#include <linux/task_io_accounting_ops.h>
Linus Torvalds's avatar
Linus Torvalds committed
21
#include <linux/buffer_head.h>	/* grr. try_to_release_page,
22
				   do_invalidatepage */
23
#include <linux/cleancache.h>
24
#include <linux/rmap.h>
25
#include "internal.h"
Linus Torvalds's avatar
Linus Torvalds committed
26

27 28 29
static void clear_exceptional_entry(struct address_space *mapping,
				    pgoff_t index, void *entry)
{
30 31 32
	struct radix_tree_node *node;
	void **slot;

33 34 35 36
	/* Handled by shmem itself */
	if (shmem_mapping(mapping))
		return;

37
	if (dax_mapping(mapping)) {
Jan Kara's avatar
Jan Kara committed
38 39
		dax_delete_mapping_entry(mapping, index);
		return;
40
	}
Jan Kara's avatar
Jan Kara committed
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
	spin_lock_irq(&mapping->tree_lock);
	/*
	 * Regular page slots are stabilized by the page lock even
	 * without the tree itself locked.  These unlocked entries
	 * need verification under the tree lock.
	 */
	if (!__radix_tree_lookup(&mapping->page_tree, index, &node,
				&slot))
		goto unlock;
	if (*slot != entry)
		goto unlock;
	radix_tree_replace_slot(slot, NULL);
	mapping->nrexceptional--;
	if (!node)
		goto unlock;
	workingset_node_shadows_dec(node);
	/*
	 * Don't track node without shadow entries.
	 *
	 * Avoid acquiring the list_lru lock if already untracked.
	 * The list_empty() test is safe as node->private_list is
	 * protected by mapping->tree_lock.
	 */
	if (!workingset_node_shadows(node) &&
	    !list_empty(&node->private_list))
		list_lru_del(&workingset_shadow_nodes,
				&node->private_list);
	__radix_tree_delete_node(&mapping->page_tree, node);
69
unlock:
70 71
	spin_unlock_irq(&mapping->tree_lock);
}
Linus Torvalds's avatar
Linus Torvalds committed
72

73
/**
74
 * do_invalidatepage - invalidate part or all of a page
75
 * @page: the page which is affected
76 77
 * @offset: start of the range to invalidate
 * @length: length of the range to invalidate
78 79 80 81 82 83 84 85 86 87
 *
 * do_invalidatepage() is called when all or part of the page has become
 * invalidated by a truncate operation.
 *
 * do_invalidatepage() does not have to release all buffers, but it must
 * ensure that no dirty buffer is left outside @offset and that no I/O
 * is underway against any of the blocks which are outside the truncation
 * point.  Because the caller is about to free (and possibly reuse) those
 * blocks on-disk.
 */
88 89
void do_invalidatepage(struct page *page, unsigned int offset,
		       unsigned int length)
90
{
91 92
	void (*invalidatepage)(struct page *, unsigned int, unsigned int);

93
	invalidatepage = page->mapping->a_ops->invalidatepage;
94
#ifdef CONFIG_BLOCK
95 96
	if (!invalidatepage)
		invalidatepage = block_invalidatepage;
97
#endif
98
	if (invalidatepage)
99
		(*invalidatepage)(page, offset, length);
100 101
}

Linus Torvalds's avatar
Linus Torvalds committed
102 103
/*
 * If truncate cannot remove the fs-private metadata from the page, the page
104
 * becomes orphaned.  It will be left on the LRU and may even be mapped into
105
 * user pagetables if we're racing with filemap_fault().
Linus Torvalds's avatar
Linus Torvalds committed
106 107 108
 *
 * We need to bale out if page->mapping is no longer equal to the original
 * mapping.  This happens a) when the VM reclaimed the page while we waited on
109
 * its lock, b) when a concurrent invalidate_mapping_pages got there first and
Linus Torvalds's avatar
Linus Torvalds committed
110 111
 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
 */
112
static int
Linus Torvalds's avatar
Linus Torvalds committed
113 114 115
truncate_complete_page(struct address_space *mapping, struct page *page)
{
	if (page->mapping != mapping)
116
		return -EIO;
Linus Torvalds's avatar
Linus Torvalds committed
117

118
	if (page_has_private(page))
119
		do_invalidatepage(page, 0, PAGE_SIZE);
Linus Torvalds's avatar
Linus Torvalds committed
120

121 122 123 124 125
	/*
	 * Some filesystems seem to re-dirty the page even after
	 * the VM has canceled the dirty bit (eg ext3 journaling).
	 * Hence dirty accounting check is placed after invalidation.
	 */
126
	cancel_dirty_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
127
	ClearPageMappedToDisk(page);
128
	delete_from_page_cache(page);
129
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
130 131 132
}

/*
133
 * This is for invalidate_mapping_pages().  That function can be called at
Linus Torvalds's avatar
Linus Torvalds committed
134
 * any time, and is not supposed to throw away dirty pages.  But pages can
135 136
 * be marked dirty at any time too, so use remove_mapping which safely
 * discards clean, unused pages.
Linus Torvalds's avatar
Linus Torvalds committed
137 138 139 140 141 142
 *
 * Returns non-zero if the page was successfully invalidated.
 */
static int
invalidate_complete_page(struct address_space *mapping, struct page *page)
{
143 144
	int ret;

Linus Torvalds's avatar
Linus Torvalds committed
145 146 147
	if (page->mapping != mapping)
		return 0;

148
	if (page_has_private(page) && !try_to_release_page(page, 0))
Linus Torvalds's avatar
Linus Torvalds committed
149 150
		return 0;

151 152 153
	ret = remove_mapping(mapping, page);

	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
154 155
}

156 157
int truncate_inode_page(struct address_space *mapping, struct page *page)
{
158 159 160 161
	loff_t holelen;
	VM_BUG_ON_PAGE(PageTail(page), page);

	holelen = PageTransHuge(page) ? HPAGE_PMD_SIZE : PAGE_SIZE;
162 163
	if (page_mapped(page)) {
		unmap_mapping_range(mapping,
164
				   (loff_t)page->index << PAGE_SHIFT,
165
				   holelen, 0);
166 167 168 169
	}
	return truncate_complete_page(mapping, page);
}

170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
/*
 * Used to get rid of pages on hardware memory corruption.
 */
int generic_error_remove_page(struct address_space *mapping, struct page *page)
{
	if (!mapping)
		return -EINVAL;
	/*
	 * Only punch for normal data pages for now.
	 * Handling other types like directories would need more auditing.
	 */
	if (!S_ISREG(mapping->host->i_mode))
		return -EIO;
	return truncate_inode_page(mapping, page);
}
EXPORT_SYMBOL(generic_error_remove_page);

187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
/*
 * Safely invalidate one page from its pagecache mapping.
 * It only drops clean, unused pages. The page must be locked.
 *
 * Returns 1 if the page is successfully invalidated, otherwise 0.
 */
int invalidate_inode_page(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
	if (!mapping)
		return 0;
	if (PageDirty(page) || PageWriteback(page))
		return 0;
	if (page_mapped(page))
		return 0;
	return invalidate_complete_page(mapping, page);
}

Linus Torvalds's avatar
Linus Torvalds committed
205
/**
206
 * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
Linus Torvalds's avatar
Linus Torvalds committed
207 208
 * @mapping: mapping to truncate
 * @lstart: offset from which to truncate
209
 * @lend: offset to which to truncate (inclusive)
Linus Torvalds's avatar
Linus Torvalds committed
210
 *
211
 * Truncate the page cache, removing the pages that are between
212 213
 * specified offsets (and zeroing out partial pages
 * if lstart or lend + 1 is not page aligned).
Linus Torvalds's avatar
Linus Torvalds committed
214 215 216 217 218 219 220 221 222 223
 *
 * Truncate takes two passes - the first pass is nonblocking.  It will not
 * block on page locks and it will not block on writeback.  The second pass
 * will wait.  This is to prevent as much IO as possible in the affected region.
 * The first pass will remove most pages, so the search cost of the second pass
 * is low.
 *
 * We pass down the cache-hot hint to the page freeing code.  Even if the
 * mapping is large, it is probably the case that the final pages are the most
 * recently touched, and freeing happens in ascending file offset order.
224 225 226 227
 *
 * Note that since ->invalidatepage() accepts range to invalidate
 * truncate_inode_pages_range is able to handle cases where lend + 1 is not
 * page aligned properly.
Linus Torvalds's avatar
Linus Torvalds committed
228
 */
229 230
void truncate_inode_pages_range(struct address_space *mapping,
				loff_t lstart, loff_t lend)
Linus Torvalds's avatar
Linus Torvalds committed
231
{
232 233 234 235 236
	pgoff_t		start;		/* inclusive */
	pgoff_t		end;		/* exclusive */
	unsigned int	partial_start;	/* inclusive */
	unsigned int	partial_end;	/* exclusive */
	struct pagevec	pvec;
237
	pgoff_t		indices[PAGEVEC_SIZE];
238 239
	pgoff_t		index;
	int		i;
Linus Torvalds's avatar
Linus Torvalds committed
240

241
	cleancache_invalidate_inode(mapping);
242
	if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
Linus Torvalds's avatar
Linus Torvalds committed
243 244
		return;

245
	/* Offsets within partial pages */
246 247
	partial_start = lstart & (PAGE_SIZE - 1);
	partial_end = (lend + 1) & (PAGE_SIZE - 1);
248 249 250 251 252 253 254

	/*
	 * 'start' and 'end' always covers the range of pages to be fully
	 * truncated. Partial pages are covered with 'partial_start' at the
	 * start of the range and 'partial_end' at the end of the range.
	 * Note that 'end' is exclusive while 'lend' is inclusive.
	 */
255
	start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
256 257 258 259 260 261 262 263
	if (lend == -1)
		/*
		 * lend == -1 indicates end-of-file so we have to set 'end'
		 * to the highest possible pgoff_t and since the type is
		 * unsigned we're using -1.
		 */
		end = -1;
	else
264
		end = (lend + 1) >> PAGE_SHIFT;
265

Linus Torvalds's avatar
Linus Torvalds committed
266
	pagevec_init(&pvec, 0);
267
	index = start;
268 269 270
	while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
			min(end - index, (pgoff_t)PAGEVEC_SIZE),
			indices)) {
Linus Torvalds's avatar
Linus Torvalds committed
271 272 273
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];

274
			/* We rely upon deletion not changing page->index */
275
			index = indices[i];
276
			if (index >= end)
277 278
				break;

279 280 281 282 283
			if (radix_tree_exceptional_entry(page)) {
				clear_exceptional_entry(mapping, index, page);
				continue;
			}

Nick Piggin's avatar
Nick Piggin committed
284
			if (!trylock_page(page))
Linus Torvalds's avatar
Linus Torvalds committed
285
				continue;
286
			WARN_ON(page_to_pgoff(page) != index);
Linus Torvalds's avatar
Linus Torvalds committed
287 288 289 290
			if (PageWriteback(page)) {
				unlock_page(page);
				continue;
			}
291
			truncate_inode_page(mapping, page);
Linus Torvalds's avatar
Linus Torvalds committed
292 293
			unlock_page(page);
		}
294
		pagevec_remove_exceptionals(&pvec);
Linus Torvalds's avatar
Linus Torvalds committed
295 296
		pagevec_release(&pvec);
		cond_resched();
297
		index++;
Linus Torvalds's avatar
Linus Torvalds committed
298 299
	}

300
	if (partial_start) {
Linus Torvalds's avatar
Linus Torvalds committed
301 302
		struct page *page = find_lock_page(mapping, start - 1);
		if (page) {
303
			unsigned int top = PAGE_SIZE;
304 305 306 307 308
			if (start > end) {
				/* Truncation within a single page */
				top = partial_end;
				partial_end = 0;
			}
Linus Torvalds's avatar
Linus Torvalds committed
309
			wait_on_page_writeback(page);
310 311 312 313 314
			zero_user_segment(page, partial_start, top);
			cleancache_invalidate_page(mapping, page);
			if (page_has_private(page))
				do_invalidatepage(page, partial_start,
						  top - partial_start);
Linus Torvalds's avatar
Linus Torvalds committed
315
			unlock_page(page);
316
			put_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
317 318
		}
	}
319 320 321 322 323 324 325 326 327 328
	if (partial_end) {
		struct page *page = find_lock_page(mapping, end);
		if (page) {
			wait_on_page_writeback(page);
			zero_user_segment(page, 0, partial_end);
			cleancache_invalidate_page(mapping, page);
			if (page_has_private(page))
				do_invalidatepage(page, 0,
						  partial_end);
			unlock_page(page);
329
			put_page(page);
330 331 332 333 334 335 336 337
		}
	}
	/*
	 * If the truncation happened within a single page no pages
	 * will be released, just zeroed, so we can bail out now.
	 */
	if (start >= end)
		return;
Linus Torvalds's avatar
Linus Torvalds committed
338

339
	index = start;
Linus Torvalds's avatar
Linus Torvalds committed
340 341
	for ( ; ; ) {
		cond_resched();
342
		if (!pagevec_lookup_entries(&pvec, mapping, index,
343 344
			min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
			/* If all gone from start onwards, we're done */
345
			if (index == start)
Linus Torvalds's avatar
Linus Torvalds committed
346
				break;
347
			/* Otherwise restart to make sure all gone */
348
			index = start;
Linus Torvalds's avatar
Linus Torvalds committed
349 350
			continue;
		}
351
		if (index == start && indices[0] >= end) {
352
			/* All gone out of hole to be punched, we're done */
353
			pagevec_remove_exceptionals(&pvec);
354 355 356
			pagevec_release(&pvec);
			break;
		}
Linus Torvalds's avatar
Linus Torvalds committed
357 358 359
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];

360
			/* We rely upon deletion not changing page->index */
361
			index = indices[i];
362 363 364
			if (index >= end) {
				/* Restart punch to make sure all gone */
				index = start - 1;
365
				break;
366
			}
367

368 369 370 371 372
			if (radix_tree_exceptional_entry(page)) {
				clear_exceptional_entry(mapping, index, page);
				continue;
			}

Linus Torvalds's avatar
Linus Torvalds committed
373
			lock_page(page);
374
			WARN_ON(page_to_pgoff(page) != index);
Linus Torvalds's avatar
Linus Torvalds committed
375
			wait_on_page_writeback(page);
376
			truncate_inode_page(mapping, page);
Linus Torvalds's avatar
Linus Torvalds committed
377 378
			unlock_page(page);
		}
379
		pagevec_remove_exceptionals(&pvec);
Linus Torvalds's avatar
Linus Torvalds committed
380
		pagevec_release(&pvec);
381
		index++;
Linus Torvalds's avatar
Linus Torvalds committed
382
	}
383
	cleancache_invalidate_inode(mapping);
Linus Torvalds's avatar
Linus Torvalds committed
384
}
385
EXPORT_SYMBOL(truncate_inode_pages_range);
Linus Torvalds's avatar
Linus Torvalds committed
386

387 388 389 390 391
/**
 * truncate_inode_pages - truncate *all* the pages from an offset
 * @mapping: mapping to truncate
 * @lstart: offset from which to truncate
 *
392
 * Called under (and serialised by) inode->i_mutex.
393 394 395 396 397
 *
 * Note: When this function returns, there can be a page in the process of
 * deletion (inside __delete_from_page_cache()) in the specified range.  Thus
 * mapping->nrpages can be non-zero when this function returns even after
 * truncation of the whole mapping.
398 399 400 401 402
 */
void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
{
	truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
}
Linus Torvalds's avatar
Linus Torvalds committed
403 404
EXPORT_SYMBOL(truncate_inode_pages);

405 406 407 408 409 410 411 412 413 414 415
/**
 * truncate_inode_pages_final - truncate *all* pages before inode dies
 * @mapping: mapping to truncate
 *
 * Called under (and serialized by) inode->i_mutex.
 *
 * Filesystems have to use this in the .evict_inode path to inform the
 * VM that this is the final truncate and the inode is going away.
 */
void truncate_inode_pages_final(struct address_space *mapping)
{
416
	unsigned long nrexceptional;
417 418 419 420 421 422 423 424 425 426 427 428 429
	unsigned long nrpages;

	/*
	 * Page reclaim can not participate in regular inode lifetime
	 * management (can't call iput()) and thus can race with the
	 * inode teardown.  Tell it when the address space is exiting,
	 * so that it does not install eviction information after the
	 * final truncate has begun.
	 */
	mapping_set_exiting(mapping);

	/*
	 * When reclaim installs eviction entries, it increases
430
	 * nrexceptional first, then decreases nrpages.  Make sure we see
431 432 433 434
	 * this in the right order or we might miss an entry.
	 */
	nrpages = mapping->nrpages;
	smp_rmb();
435
	nrexceptional = mapping->nrexceptional;
436

437
	if (nrpages || nrexceptional) {
438 439 440 441 442 443 444 445 446 447 448 449 450 451
		/*
		 * As truncation uses a lockless tree lookup, cycle
		 * the tree lock to make sure any ongoing tree
		 * modification that does not see AS_EXITING is
		 * completed before starting the final truncate.
		 */
		spin_lock_irq(&mapping->tree_lock);
		spin_unlock_irq(&mapping->tree_lock);

		truncate_inode_pages(mapping, 0);
	}
}
EXPORT_SYMBOL(truncate_inode_pages_final);

452 453 454 455 456 457 458 459 460 461 462 463 464 465
/**
 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
 * @mapping: the address_space which holds the pages to invalidate
 * @start: the offset 'from' which to invalidate
 * @end: the offset 'to' which to invalidate (inclusive)
 *
 * This function only removes the unlocked pages, if you want to
 * remove all the pages of one inode, you must call truncate_inode_pages.
 *
 * invalidate_mapping_pages() will not block on IO activity. It will not
 * invalidate pages which are dirty, locked, under writeback or mapped into
 * pagetables.
 */
unsigned long invalidate_mapping_pages(struct address_space *mapping,
Minchan Kim's avatar
Minchan Kim committed
466
		pgoff_t start, pgoff_t end)
Linus Torvalds's avatar
Linus Torvalds committed
467
{
468
	pgoff_t indices[PAGEVEC_SIZE];
Linus Torvalds's avatar
Linus Torvalds committed
469
	struct pagevec pvec;
470
	pgoff_t index = start;
Minchan Kim's avatar
Minchan Kim committed
471 472
	unsigned long ret;
	unsigned long count = 0;
Linus Torvalds's avatar
Linus Torvalds committed
473 474 475
	int i;

	pagevec_init(&pvec, 0);
476 477 478
	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
			indices)) {
Linus Torvalds's avatar
Linus Torvalds committed
479 480
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];
481

482
			/* We rely upon deletion not changing page->index */
483
			index = indices[i];
484 485
			if (index > end)
				break;
486

487 488 489 490 491
			if (radix_tree_exceptional_entry(page)) {
				clear_exceptional_entry(mapping, index, page);
				continue;
			}

492 493
			if (!trylock_page(page))
				continue;
494 495 496 497 498 499 500 501 502 503 504 505 506 507 508

			WARN_ON(page_to_pgoff(page) != index);

			/* Middle of THP: skip */
			if (PageTransTail(page)) {
				unlock_page(page);
				continue;
			} else if (PageTransHuge(page)) {
				index += HPAGE_PMD_NR - 1;
				i += HPAGE_PMD_NR - 1;
				/* 'end' is in the middle of THP */
				if (index ==  round_down(end, HPAGE_PMD_NR))
					continue;
			}

Minchan Kim's avatar
Minchan Kim committed
509
			ret = invalidate_inode_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
510
			unlock_page(page);
Minchan Kim's avatar
Minchan Kim committed
511 512 513 514 515
			/*
			 * Invalidation is a hint that the page is no longer
			 * of interest and try to speed up its reclaim.
			 */
			if (!ret)
516
				deactivate_file_page(page);
Minchan Kim's avatar
Minchan Kim committed
517
			count += ret;
Linus Torvalds's avatar
Linus Torvalds committed
518
		}
519
		pagevec_remove_exceptionals(&pvec);
Linus Torvalds's avatar
Linus Torvalds committed
520
		pagevec_release(&pvec);
521
		cond_resched();
522
		index++;
Linus Torvalds's avatar
Linus Torvalds committed
523
	}
Minchan Kim's avatar
Minchan Kim committed
524
	return count;
Linus Torvalds's avatar
Linus Torvalds committed
525
}
526
EXPORT_SYMBOL(invalidate_mapping_pages);
Linus Torvalds's avatar
Linus Torvalds committed
527

528 529 530 531
/*
 * This is like invalidate_complete_page(), except it ignores the page's
 * refcount.  We do this because invalidate_inode_pages2() needs stronger
 * invalidation guarantees, and cannot afford to leave pages behind because
532 533
 * shrink_page_list() has a temp ref on them, or because they're transiently
 * sitting in the lru_cache_add() pagevecs.
534 535 536 537
 */
static int
invalidate_complete_page2(struct address_space *mapping, struct page *page)
{
538 539
	unsigned long flags;

540 541 542
	if (page->mapping != mapping)
		return 0;

543
	if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
544 545
		return 0;

546
	spin_lock_irqsave(&mapping->tree_lock, flags);
547 548 549
	if (PageDirty(page))
		goto failed;

550
	BUG_ON(page_has_private(page));
551
	__delete_from_page_cache(page, NULL);
552
	spin_unlock_irqrestore(&mapping->tree_lock, flags);
553 554 555 556

	if (mapping->a_ops->freepage)
		mapping->a_ops->freepage(page);

557
	put_page(page);	/* pagecache ref */
558 559
	return 1;
failed:
560
	spin_unlock_irqrestore(&mapping->tree_lock, flags);
561 562 563
	return 0;
}

564 565 566 567 568 569 570 571 572
static int do_launder_page(struct address_space *mapping, struct page *page)
{
	if (!PageDirty(page))
		return 0;
	if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
		return 0;
	return mapping->a_ops->launder_page(page);
}

Linus Torvalds's avatar
Linus Torvalds committed
573 574
/**
 * invalidate_inode_pages2_range - remove range of pages from an address_space
575
 * @mapping: the address_space
Linus Torvalds's avatar
Linus Torvalds committed
576 577 578 579 580 581
 * @start: the page offset 'from' which to invalidate
 * @end: the page offset 'to' which to invalidate (inclusive)
 *
 * Any pages which are found to be mapped into pagetables are unmapped prior to
 * invalidation.
 *
582
 * Returns -EBUSY if any pages could not be invalidated.
Linus Torvalds's avatar
Linus Torvalds committed
583 584 585 586
 */
int invalidate_inode_pages2_range(struct address_space *mapping,
				  pgoff_t start, pgoff_t end)
{
587
	pgoff_t indices[PAGEVEC_SIZE];
Linus Torvalds's avatar
Linus Torvalds committed
588
	struct pagevec pvec;
589
	pgoff_t index;
Linus Torvalds's avatar
Linus Torvalds committed
590 591
	int i;
	int ret = 0;
592
	int ret2 = 0;
Linus Torvalds's avatar
Linus Torvalds committed
593 594
	int did_range_unmap = 0;

595
	cleancache_invalidate_inode(mapping);
Linus Torvalds's avatar
Linus Torvalds committed
596
	pagevec_init(&pvec, 0);
597
	index = start;
598 599 600
	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
			indices)) {
601
		for (i = 0; i < pagevec_count(&pvec); i++) {
Linus Torvalds's avatar
Linus Torvalds committed
602
			struct page *page = pvec.pages[i];
603 604

			/* We rely upon deletion not changing page->index */
605
			index = indices[i];
606 607
			if (index > end)
				break;
Linus Torvalds's avatar
Linus Torvalds committed
608

609 610 611 612 613
			if (radix_tree_exceptional_entry(page)) {
				clear_exceptional_entry(mapping, index, page);
				continue;
			}

Linus Torvalds's avatar
Linus Torvalds committed
614
			lock_page(page);
615
			WARN_ON(page_to_pgoff(page) != index);
Linus Torvalds's avatar
Linus Torvalds committed
616 617 618 619 620
			if (page->mapping != mapping) {
				unlock_page(page);
				continue;
			}
			wait_on_page_writeback(page);
621
			if (page_mapped(page)) {
Linus Torvalds's avatar
Linus Torvalds committed
622 623 624 625 626
				if (!did_range_unmap) {
					/*
					 * Zap the rest of the file in one hit.
					 */
					unmap_mapping_range(mapping,
627
					   (loff_t)index << PAGE_SHIFT,
628
					   (loff_t)(1 + end - index)
629 630
							 << PAGE_SHIFT,
							 0);
Linus Torvalds's avatar
Linus Torvalds committed
631 632 633 634 635 636
					did_range_unmap = 1;
				} else {
					/*
					 * Just zap this page
					 */
					unmap_mapping_range(mapping,
637 638
					   (loff_t)index << PAGE_SHIFT,
					   PAGE_SIZE, 0);
Linus Torvalds's avatar
Linus Torvalds committed
639 640
				}
			}
641
			BUG_ON(page_mapped(page));
642 643 644
			ret2 = do_launder_page(mapping, page);
			if (ret2 == 0) {
				if (!invalidate_complete_page2(mapping, page))
645
					ret2 = -EBUSY;
646 647 648
			}
			if (ret2 < 0)
				ret = ret2;
Linus Torvalds's avatar
Linus Torvalds committed
649 650
			unlock_page(page);
		}
651
		pagevec_remove_exceptionals(&pvec);
Linus Torvalds's avatar
Linus Torvalds committed
652 653
		pagevec_release(&pvec);
		cond_resched();
654
		index++;
Linus Torvalds's avatar
Linus Torvalds committed
655
	}
656
	cleancache_invalidate_inode(mapping);
Linus Torvalds's avatar
Linus Torvalds committed
657 658 659 660 661 662
	return ret;
}
EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);

/**
 * invalidate_inode_pages2 - remove all pages from an address_space
663
 * @mapping: the address_space
Linus Torvalds's avatar
Linus Torvalds committed
664 665 666 667
 *
 * Any pages which are found to be mapped into pagetables are unmapped prior to
 * invalidation.
 *
668
 * Returns -EBUSY if any pages could not be invalidated.
Linus Torvalds's avatar
Linus Torvalds committed
669 670 671 672 673 674
 */
int invalidate_inode_pages2(struct address_space *mapping)
{
	return invalidate_inode_pages2_range(mapping, 0, -1);
}
EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
npiggin@suse.de's avatar
npiggin@suse.de committed
675 676 677 678

/**
 * truncate_pagecache - unmap and remove pagecache that has been truncated
 * @inode: inode
679
 * @newsize: new file size
npiggin@suse.de's avatar
npiggin@suse.de committed
680 681 682 683 684 685 686 687 688 689 690
 *
 * inode's new i_size must already be written before truncate_pagecache
 * is called.
 *
 * This function should typically be called before the filesystem
 * releases resources associated with the freed range (eg. deallocates
 * blocks). This way, pagecache will always stay logically coherent
 * with on-disk format, and the filesystem would not have to deal with
 * situations such as writepage being called for a page that has already
 * had its underlying blocks deallocated.
 */
691
void truncate_pagecache(struct inode *inode, loff_t newsize)
npiggin@suse.de's avatar
npiggin@suse.de committed
692
{
693
	struct address_space *mapping = inode->i_mapping;
694
	loff_t holebegin = round_up(newsize, PAGE_SIZE);
695 696 697 698 699 700 701 702 703 704

	/*
	 * unmap_mapping_range is called twice, first simply for
	 * efficiency so that truncate_inode_pages does fewer
	 * single-page unmaps.  However after this first call, and
	 * before truncate_inode_pages finishes, it is possible for
	 * private pages to be COWed, which remain after
	 * truncate_inode_pages finishes, hence the second
	 * unmap_mapping_range call must be made for correctness.
	 */
705 706 707
	unmap_mapping_range(mapping, holebegin, 0, 1);
	truncate_inode_pages(mapping, newsize);
	unmap_mapping_range(mapping, holebegin, 0, 1);
npiggin@suse.de's avatar
npiggin@suse.de committed
708 709 710
}
EXPORT_SYMBOL(truncate_pagecache);

711 712 713 714 715
/**
 * truncate_setsize - update inode and pagecache for a new file size
 * @inode: inode
 * @newsize: new file size
 *
Jan Kara's avatar
Jan Kara committed
716 717 718
 * truncate_setsize updates i_size and performs pagecache truncation (if
 * necessary) to @newsize. It will be typically be called from the filesystem's
 * setattr function when ATTR_SIZE is passed in.
719
 *
720 721 722
 * Must be called with a lock serializing truncates and writes (generally
 * i_mutex but e.g. xfs uses a different lock) and before all filesystem
 * specific block truncation has been performed.
723 724 725
 */
void truncate_setsize(struct inode *inode, loff_t newsize)
{
726 727
	loff_t oldsize = inode->i_size;

728
	i_size_write(inode, newsize);
729 730
	if (newsize > oldsize)
		pagecache_isize_extended(inode, oldsize, newsize);
731
	truncate_pagecache(inode, newsize);
732 733 734
}
EXPORT_SYMBOL(truncate_setsize);

735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762
/**
 * pagecache_isize_extended - update pagecache after extension of i_size
 * @inode:	inode for which i_size was extended
 * @from:	original inode size
 * @to:		new inode size
 *
 * Handle extension of inode size either caused by extending truncate or by
 * write starting after current i_size. We mark the page straddling current
 * i_size RO so that page_mkwrite() is called on the nearest write access to
 * the page.  This way filesystem can be sure that page_mkwrite() is called on
 * the page before user writes to the page via mmap after the i_size has been
 * changed.
 *
 * The function must be called after i_size is updated so that page fault
 * coming after we unlock the page will already see the new i_size.
 * The function must be called while we still hold i_mutex - this not only
 * makes sure i_size is stable but also that userspace cannot observe new
 * i_size value before we are prepared to store mmap writes at new inode size.
 */
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
{
	int bsize = 1 << inode->i_blkbits;
	loff_t rounded_from;
	struct page *page;
	pgoff_t index;

	WARN_ON(to > inode->i_size);

763
	if (from >= to || bsize == PAGE_SIZE)
764 765 766
		return;
	/* Page straddling @from will not have any hole block created? */
	rounded_from = round_up(from, bsize);
767
	if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
768 769
		return;

770
	index = from >> PAGE_SHIFT;
771 772 773 774 775 776 777 778 779 780 781
	page = find_lock_page(inode->i_mapping, index);
	/* Page not cached? Nothing to do */
	if (!page)
		return;
	/*
	 * See clear_page_dirty_for_io() for details why set_page_dirty()
	 * is needed.
	 */
	if (page_mkclean(page))
		set_page_dirty(page);
	unlock_page(page);
782
	put_page(page);
783 784 785
}
EXPORT_SYMBOL(pagecache_isize_extended);

786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807
/**
 * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
 * @inode: inode
 * @lstart: offset of beginning of hole
 * @lend: offset of last byte of hole
 *
 * This function should typically be called before the filesystem
 * releases resources associated with the freed range (eg. deallocates
 * blocks). This way, pagecache will always stay logically coherent
 * with on-disk format, and the filesystem would not have to deal with
 * situations such as writepage being called for a page that has already
 * had its underlying blocks deallocated.
 */
void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
{
	struct address_space *mapping = inode->i_mapping;
	loff_t unmap_start = round_up(lstart, PAGE_SIZE);
	loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
	/*
	 * This rounding is currently just for example: unmap_mapping_range
	 * expands its hole outwards, whereas we want it to contract the hole
	 * inwards.  However, existing callers of truncate_pagecache_range are
808 809
	 * doing their own page rounding first.  Note that unmap_mapping_range
	 * allows holelen 0 for all, and we allow lend -1 for end of file.
810 811 812 813 814 815 816 817 818 819 820 821 822
	 */

	/*
	 * Unlike in truncate_pagecache, unmap_mapping_range is called only
	 * once (before truncating pagecache), and without "even_cows" flag:
	 * hole-punching should not remove private COWed pages from the hole.
	 */
	if ((u64)unmap_end > (u64)unmap_start)
		unmap_mapping_range(mapping, unmap_start,
				    1 + unmap_end - unmap_start, 0);
	truncate_inode_pages_range(mapping, lstart, lend);
}
EXPORT_SYMBOL(truncate_pagecache_range);