page-writeback.c 15 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * mm/page-writeback.c.
 *
 * Copyright (C) 2002, Linus Torvalds.
 *
 * Contains functions related to writing back dirty pages at the
 * address_space level.
 *
 * 10Apr2002	akpm@zip.com.au
 *		Initial version
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/fs.h>
#include <linux/mm.h>
18
#include <linux/swap.h>
19 20 21 22
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/writeback.h>
#include <linux/init.h>
23
#include <linux/sysrq.h>
Andrew Morton's avatar
Andrew Morton committed
24
#include <linux/backing-dev.h>
25
#include <linux/blkdev.h>
26
#include <linux/mpage.h>
27
#include <linux/percpu.h>
28 29
#include <linux/notifier.h>
#include <linux/smp.h>
30

Andrew Morton's avatar
Andrew Morton committed
31 32 33 34
/*
 * The maximum number of pages to writeout in a single bdflush/kupdate
 * operation.  We do this so we don't hold I_LOCK against an inode for
 * enormous amounts of time, which would block a userspace task which has
Andrew Morton's avatar
Andrew Morton committed
35 36
 * been forced to throttle against that inode.  Also, the code reevaluates
 * the dirty each time it has written this many pages.
Andrew Morton's avatar
Andrew Morton committed
37 38 39
 */
#define MAX_WRITEBACK_PAGES	1024

40
/*
Andrew Morton's avatar
Andrew Morton committed
41
 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
42
 * will look to see if it needs to force writeback or throttling.
Andrew Morton's avatar
Andrew Morton committed
43
 */
44 45
static long ratelimit_pages = 32;

46 47
static long total_pages;	/* The total number of pages in the machine. */
static int dirty_exceeded;	/* Dirty mem may be over limit */
Andrew Morton's avatar
Andrew Morton committed
48 49 50 51 52 53 54

/*
 * When balance_dirty_pages decides that the caller needs to perform some
 * non-background writeback, this is how many pages it will attempt to write.
 * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably
 * large amounts of I/O are submitted.
 */
55
static inline long sync_writeback_pages(void)
56 57 58
{
	return ratelimit_pages + ratelimit_pages / 2;
}
Andrew Morton's avatar
Andrew Morton committed
59

Andrew Morton's avatar
Andrew Morton committed
60 61
/* The following parameters are exported via /proc/sys/vm */

Andrew Morton's avatar
Andrew Morton committed
62
/*
63
 * Start background writeback (via pdflush) at this percentage
64
 */
65
int dirty_background_ratio = 10;
66 67

/*
68
 * The generator of dirty data starts writeback at this percentage
69
 */
70
int vm_dirty_ratio = 40;
71

Andrew Morton's avatar
Andrew Morton committed
72
/*
Andrew Morton's avatar
Andrew Morton committed
73 74
 * The interval between `kupdate'-style writebacks, in centiseconds
 * (hundredths of a second)
Andrew Morton's avatar
Andrew Morton committed
75 76 77 78
 */
int dirty_writeback_centisecs = 5 * 100;

/*
79
 * The longest number of centiseconds for which data is allowed to remain dirty
Andrew Morton's avatar
Andrew Morton committed
80 81 82
 */
int dirty_expire_centisecs = 30 * 100;

Andrew Morton's avatar
Andrew Morton committed
83 84
/* End of sysctl-exported parameters */

85

86
static void background_writeout(unsigned long _min_pages);
Andrew Morton's avatar
Andrew Morton committed
87

88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
/*
 * Work out the current dirty-memory clamping and background writeout
 * thresholds.
 *
 * The main aim here is to lower them aggressively if there is a lot of mapped
 * memory around.  To avoid stressing page reclaim with lots of unreclaimable
 * pages.  It is better to clamp down on writers than to start swapping, and
 * performing lots of scanning.
 *
 * We only allow 1/2 of the currently-unmapped memory to be dirtied.
 *
 * We don't permit the clamping level to fall below 5% - that is getting rather
 * excessive.
 *
 * We make sure that the background writeout level is below the adjusted
 * clamping level.
 */
static void
get_dirty_limits(struct page_state *ps, long *background, long *dirty)
{
	int background_ratio;		/* Percentages */
	int dirty_ratio;
	int unmapped_ratio;

	get_page_state(ps);

	unmapped_ratio = 100 - (ps->nr_mapped * 100) / total_pages;

	dirty_ratio = vm_dirty_ratio;
	if (dirty_ratio > unmapped_ratio / 2)
		dirty_ratio = unmapped_ratio / 2;

	if (dirty_ratio < 5)
		dirty_ratio = 5;

	background_ratio = dirty_background_ratio;
	if (background_ratio >= dirty_ratio)
		background_ratio = dirty_ratio / 2;

	*background = (background_ratio * total_pages) / 100;
	*dirty = (dirty_ratio * total_pages) / 100;
}

131
/*
132 133
 * balance_dirty_pages() must be called by processes which are generating dirty
 * data.  It looks at the number of dirty pages in the machine and will force
134
 * the caller to perform writeback if the system is over `vm_dirty_ratio'.
135 136
 * If we're over `background_thresh' then pdflush is woken to perform some
 * writeout.
137 138 139 140
 */
void balance_dirty_pages(struct address_space *mapping)
{
	struct page_state ps;
141
	long nr_reclaimable;
142
	long background_thresh;
143
	long dirty_thresh;
144 145 146
	unsigned long pages_written = 0;
	unsigned long write_chunk = sync_writeback_pages();

147
	struct backing_dev_info *bdi = mapping->backing_dev_info;
148

149
	for (;;) {
150 151 152 153
		struct writeback_control wbc = {
			.bdi		= bdi,
			.sync_mode	= WB_SYNC_NONE,
			.older_than_this = NULL,
154
			.nr_to_write	= write_chunk,
155
		};
156

157 158 159 160 161
		get_dirty_limits(&ps, &background_thresh, &dirty_thresh);
		nr_reclaimable = ps.nr_dirty + ps.nr_unstable;
		if (nr_reclaimable + ps.nr_writeback <= dirty_thresh)
			break;

162 163
		dirty_exceeded = 1;

164 165 166 167 168 169 170
		/* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
		 * Unstable writes are a feature of certain networked
		 * filesystems (i.e. NFS) in which data may have been
		 * written to the server's write cache, but has not yet
		 * been flushed to permanent storage.
		 */
		if (nr_reclaimable) {
171
			writeback_inodes(&wbc);
172 173 174 175 176 177 178 179 180
			get_dirty_limits(&ps, &background_thresh,
					&dirty_thresh);
			nr_reclaimable = ps.nr_dirty + ps.nr_unstable;
			if (nr_reclaimable + ps.nr_writeback <= dirty_thresh)
				break;
			pages_written += write_chunk - wbc.nr_to_write;
			if (pages_written >= write_chunk)
				break;		/* We've done our duty */
		}
181
		blk_congestion_wait(WRITE, HZ/10);
182 183
	}

184
	if (nr_reclaimable + ps.nr_writeback <= dirty_thresh)
185
		dirty_exceeded = 0;
186

187
	if (!writeback_in_progress(bdi) && nr_reclaimable > background_thresh)
Andrew Morton's avatar
Andrew Morton committed
188
		pdflush_operation(background_writeout, 0);
189 190
}

Andrew Morton's avatar
Andrew Morton committed
191 192 193 194 195 196 197 198
/**
 * balance_dirty_pages_ratelimited - balance dirty memory state
 * @mapping - address_space which was dirtied
 *
 * Processes which are dirtying memory should call in here once for each page
 * which was newly dirtied.  The function will periodically check the system's
 * dirty state and will initiate writeback if needed.
 *
199 200 201 202
 * On really big machines, get_page_state is expensive, so try to avoid calling
 * it too often (ratelimiting).  But once we're over the dirty memory limit we
 * decrease the ratelimiting by a lot, to prevent individual processes from
 * overshooting the limit by (ratelimit_pages) each.
203 204 205
 */
void balance_dirty_pages_ratelimited(struct address_space *mapping)
{
206
	static DEFINE_PER_CPU(int, ratelimits) = 0;
207
	int cpu;
208 209 210 211 212
	long ratelimit;

	ratelimit = ratelimit_pages;
	if (dirty_exceeded)
		ratelimit = 8;
213

Robert Love's avatar
Robert Love committed
214
	cpu = get_cpu();
215 216
	if (per_cpu(ratelimits, cpu)++ >= ratelimit) {
		per_cpu(ratelimits, cpu) = 0;
Robert Love's avatar
Robert Love committed
217
		put_cpu();
218 219 220
		balance_dirty_pages(mapping);
		return;
	}
Robert Love's avatar
Robert Love committed
221
	put_cpu();
222
}
223
EXPORT_SYMBOL_GPL(balance_dirty_pages_ratelimited);
224 225

/*
Andrew Morton's avatar
Andrew Morton committed
226 227
 * writeback at least _min_pages, and keep writing until the amount of dirty
 * memory is less than the background threshold, or until we're all clean.
228
 */
Andrew Morton's avatar
Andrew Morton committed
229
static void background_writeout(unsigned long _min_pages)
230
{
Andrew Morton's avatar
Andrew Morton committed
231
	long min_pages = _min_pages;
232 233 234 235 236
	struct writeback_control wbc = {
		.bdi		= NULL,
		.sync_mode	= WB_SYNC_NONE,
		.older_than_this = NULL,
		.nr_to_write	= 0,
237
		.nonblocking	= 1,
238
	};
239

240
	CHECK_EMERGENCY_SYNC
241
	for ( ; ; ) {
Andrew Morton's avatar
Andrew Morton committed
242
		struct page_state ps;
243 244
		long background_thresh;
		long dirty_thresh;
245

246
		get_dirty_limits(&ps, &background_thresh, &dirty_thresh);
247 248
		if (ps.nr_dirty + ps.nr_unstable < background_thresh
				&& min_pages <= 0)
Andrew Morton's avatar
Andrew Morton committed
249
			break;
250
		wbc.encountered_congestion = 0;
251 252 253
		wbc.nr_to_write = MAX_WRITEBACK_PAGES;
		writeback_inodes(&wbc);
		min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
254 255
		if (wbc.nr_to_write > 0) {
			/* Wrote less than expected */
256 257 258 259 260 261
			if (wbc.encountered_congestion)
				blk_congestion_wait(WRITE, HZ/10);
			else
				break;
		}
	}
262 263
}

Andrew Morton's avatar
Andrew Morton committed
264
/*
265 266 267
 * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
 * the whole world.  Returns 0 if a pdflush thread was dispatched.  Returns
 * -1 if all pdflush threads were busy.
Andrew Morton's avatar
Andrew Morton committed
268
 */
269
int wakeup_bdflush(long nr_pages)
270
{
271 272
	if (nr_pages == 0) {
		struct page_state ps;
Andrew Morton's avatar
Andrew Morton committed
273

274 275 276 277
		get_page_state(&ps);
		nr_pages = ps.nr_dirty;
	}
	return pdflush_operation(background_writeout, nr_pages);
278 279
}

280
static struct timer_list wb_timer;
281 282 283 284 285 286 287 288 289

/*
 * Periodic writeback of "old" data.
 *
 * Define "old": the first time one of an inode's pages is dirtied, we mark the
 * dirtying-time in the inode's address_space.  So this periodic writeback code
 * just walks the superblock inode list, writing back any inodes which are
 * older than a specific point in time.
 *
Andrew Morton's avatar
Andrew Morton committed
290 291 292
 * Try to run once per dirty_writeback_centisecs.  But if a writeback event
 * takes longer than a dirty_writeback_centisecs interval, then leave a
 * one-second gap.
Andrew Morton's avatar
Andrew Morton committed
293 294 295
 *
 * older_than_this takes precedence over nr_to_write.  So we'll only write back
 * all dirty pages if they are all attached to "old" mappings.
296 297 298
 */
static void wb_kupdate(unsigned long arg)
{
Andrew Morton's avatar
Andrew Morton committed
299 300 301
	unsigned long oldest_jif;
	unsigned long start_jif;
	unsigned long next_jif;
302
	long nr_to_write;
303
	struct page_state ps;
304 305 306 307 308
	struct writeback_control wbc = {
		.bdi		= NULL,
		.sync_mode	= WB_SYNC_NONE,
		.older_than_this = &oldest_jif,
		.nr_to_write	= 0,
309
		.nonblocking	= 1,
Andrew Morton's avatar
Andrew Morton committed
310
		.for_kupdate	= 1,
311
	};
312 313 314

	sync_supers();

315
	get_page_state(&ps);
Andrew Morton's avatar
Andrew Morton committed
316
	oldest_jif = jiffies - (dirty_expire_centisecs * HZ) / 100;
Andrew Morton's avatar
Andrew Morton committed
317
	start_jif = jiffies;
Andrew Morton's avatar
Andrew Morton committed
318
	next_jif = start_jif + (dirty_writeback_centisecs * HZ) / 100;
319
	nr_to_write = ps.nr_dirty + ps.nr_unstable;
320 321 322 323
	while (nr_to_write > 0) {
		wbc.encountered_congestion = 0;
		wbc.nr_to_write = MAX_WRITEBACK_PAGES;
		writeback_inodes(&wbc);
Andrew Morton's avatar
Andrew Morton committed
324
		if (wbc.nr_to_write > 0) {
325
			if (wbc.encountered_congestion)
326
				blk_congestion_wait(WRITE, HZ/10);
327 328 329 330 331
			else
				break;	/* All the old data is written */
		}
		nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
	}
Andrew Morton's avatar
Andrew Morton committed
332 333 334
	if (time_before(next_jif, jiffies + HZ))
		next_jif = jiffies + HZ;
	mod_timer(&wb_timer, next_jif);
335 336 337 338
}

static void wb_timer_fn(unsigned long unused)
{
Andrew Morton's avatar
Andrew Morton committed
339
	if (pdflush_operation(wb_kupdate, 0) < 0)
Andrew Morton's avatar
Andrew Morton committed
340 341
		mod_timer(&wb_timer, jiffies + HZ); /* delay 1 second */

342 343
}

344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
/*
 * If ratelimit_pages is too high then we can get into dirty-data overload
 * if a large number of processes all perform writes at the same time.
 * If it is too low then SMP machines will call the (expensive) get_page_state
 * too often.
 *
 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
 * thresholds before writeback cuts in.
 *
 * But the limit should not be set too high.  Because it also controls the
 * amount of memory which the balance_dirty_pages() caller has to write back.
 * If this is too large then the caller will block on the IO queue all the
 * time.  So limit it to four megabytes - the balance_dirty_pages() caller
 * will write six megabyte chunks, max.
 */

static void set_ratelimit(void)
{
363
	ratelimit_pages = total_pages / (num_online_cpus() * 32);
364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
	if (ratelimit_pages < 16)
		ratelimit_pages = 16;
	if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
		ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
}

static int
ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
{
	set_ratelimit();
	return 0;
}

static struct notifier_block ratelimit_nb = {
	.notifier_call	= ratelimit_handler,
	.next		= NULL,
};

382 383 384 385 386
/*
 * If the machine has a large highmem:lowmem ratio then scale back the default
 * dirty memory thresholds: allowing too much dirty highmem pins an excessive
 * number of buffer_heads.
 */
387
void __init page_writeback_init(void)
388
{
389 390 391 392 393 394 395 396 397 398
	long buffer_pages = nr_free_buffer_pages();
	long correction;

	total_pages = nr_free_pagecache_pages();

	correction = (100 * 4 * buffer_pages) / total_pages;

	if (correction < 100) {
		dirty_background_ratio *= correction;
		dirty_background_ratio /= 100;
399 400
		vm_dirty_ratio *= correction;
		vm_dirty_ratio /= 100;
401 402
	}

403
	init_timer(&wb_timer);
Andrew Morton's avatar
Andrew Morton committed
404
	wb_timer.expires = jiffies + (dirty_writeback_centisecs * HZ) / 100;
405 406 407
	wb_timer.data = 0;
	wb_timer.function = wb_timer_fn;
	add_timer(&wb_timer);
408 409
	set_ratelimit();
	register_cpu_notifier(&ratelimit_nb);
410 411
}

412
int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
Andrew Morton's avatar
Andrew Morton committed
413
{
414
	if (mapping->a_ops->writepages)
415 416
		return mapping->a_ops->writepages(mapping, wbc);
	return generic_writepages(mapping, wbc);
Andrew Morton's avatar
Andrew Morton committed
417 418
}

419 420 421 422 423 424
/**
 * write_one_page - write out a single page and optionally wait on I/O
 *
 * @page - the page to write
 * @wait - if true, wait on writeout
 *
Andrew Morton's avatar
Andrew Morton committed
425
 * The page must be locked by the caller and will be unlocked upon return.
426 427 428 429 430 431 432
 *
 * write_one_page() returns a negative error code if I/O failed.
 */
int write_one_page(struct page *page, int wait)
{
	struct address_space *mapping = page->mapping;
	int ret = 0;
Andrew Morton's avatar
Andrew Morton committed
433 434 435
	struct writeback_control wbc = {
		.sync_mode = WB_SYNC_ALL,
	};
436 437 438

	BUG_ON(!PageLocked(page));

439 440 441
	if (wait && PageWriteback(page))
		wait_on_page_writeback(page);

442
	spin_lock(&mapping->page_lock);
443
	list_del(&page->list);
444
	if (test_clear_page_dirty(page)) {
445
		list_add(&page->list, &mapping->locked_pages);
446
		page_cache_get(page);
447
		spin_unlock(&mapping->page_lock);
Andrew Morton's avatar
Andrew Morton committed
448
		ret = mapping->a_ops->writepage(page, &wbc);
449
		if (ret == 0 && wait) {
450
			wait_on_page_writeback(page);
451 452 453 454 455
			if (PageError(page))
				ret = -EIO;
		}
		page_cache_release(page);
	} else {
456
		list_add(&page->list, &mapping->clean_pages);
457
		spin_unlock(&mapping->page_lock);
458 459 460 461 462 463
		unlock_page(page);
	}
	return ret;
}
EXPORT_SYMBOL(write_one_page);

464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
/*
 * For address_spaces which do not use buffers.  Just set the page's dirty bit
 * and move it to the dirty_pages list.  Also perform space reservation if
 * required.
 *
 * __set_page_dirty_nobuffers() may return -ENOSPC.  But if it does, the page
 * is still safe, as long as it actually manages to find some blocks at
 * writeback time.
 *
 * This is also used when a single buffer is being dirtied: we want to set the
 * page dirty in that case, but not all the buffers.  This is a "bottom-up"
 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
 */
int __set_page_dirty_nobuffers(struct page *page)
{
	int ret = 0;

	if (!TestSetPageDirty(page)) {
		struct address_space *mapping = page->mapping;

		if (mapping) {
485
			spin_lock(&mapping->page_lock);
486
			if (page->mapping) {	/* Race with truncate? */
487
				BUG_ON(page->mapping != mapping);
488 489
				if (!mapping->backing_dev_info->memory_backed)
					inc_page_state(nr_dirty);
490 491 492
				list_del(&page->list);
				list_add(&page->list, &mapping->dirty_pages);
			}
493
			spin_unlock(&mapping->page_lock);
494 495 496 497 498 499
			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
		}
	}
	return ret;
}
EXPORT_SYMBOL(__set_page_dirty_nobuffers);
500

501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
/*
 * set_page_dirty() is racy if the caller has no reference against
 * page->mapping->host, and if the page is unlocked.  This is because another
 * CPU could truncate the page off the mapping and then free the mapping.
 *
 * Usually, the page _is_ locked, or the caller is a user-space process which
 * holds a reference on the inode by having an open file.
 *
 * In other cases, the page should be locked before running set_page_dirty().
 */
int set_page_dirty_lock(struct page *page)
{
	int ret;

	lock_page(page);
	ret = set_page_dirty(page);
	unlock_page(page);
	return ret;
}

521 522 523 524 525 526 527 528
/*
 * Clear a page's dirty flag, while caring for dirty memory accounting. 
 * Returns true if the page was previously dirty.
 */
int test_clear_page_dirty(struct page *page)
{
	if (TestClearPageDirty(page)) {
		struct address_space *mapping = page->mapping;
529

530 531 532 533 534 535
		if (mapping && !mapping->backing_dev_info->memory_backed)
			dec_page_state(nr_dirty);
		return 1;
	}
	return 0;
}
536
EXPORT_SYMBOL(test_clear_page_dirty);