Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * mm/page-writeback.c
   3 *
   4 * Copyright (C) 2002, Linus Torvalds.
   5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
   6 *
   7 * Contains functions related to writing back dirty pages at the
   8 * address_space level.
   9 *
  10 * 10Apr2002	Andrew Morton
  11 *		Initial version
  12 */
  13
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/spinlock.h>
  17#include <linux/fs.h>
  18#include <linux/mm.h>
  19#include <linux/swap.h>
  20#include <linux/slab.h>
  21#include <linux/pagemap.h>
  22#include <linux/writeback.h>
  23#include <linux/init.h>
  24#include <linux/backing-dev.h>
  25#include <linux/task_io_accounting_ops.h>
  26#include <linux/blkdev.h>
  27#include <linux/mpage.h>
  28#include <linux/rmap.h>
  29#include <linux/percpu.h>
  30#include <linux/notifier.h>
  31#include <linux/smp.h>
  32#include <linux/sysctl.h>
  33#include <linux/cpu.h>
  34#include <linux/syscalls.h>
  35#include <linux/buffer_head.h>
  36#include <linux/pagevec.h>
  37#include <trace/events/writeback.h>
  38
  39/*
  40 * Sleep at most 200ms at a time in balance_dirty_pages().
  41 */
  42#define MAX_PAUSE		max(HZ/5, 1)
  43
  44/*
  45 * Estimate write bandwidth at 200ms intervals.
  46 */
  47#define BANDWIDTH_INTERVAL	max(HZ/5, 1)
  48
  49/*
  50 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
  51 * will look to see if it needs to force writeback or throttling.
  52 */
  53static long ratelimit_pages = 32;
  54
  55/*
  56 * When balance_dirty_pages decides that the caller needs to perform some
  57 * non-background writeback, this is how many pages it will attempt to write.
  58 * It should be somewhat larger than dirtied pages to ensure that reasonably
  59 * large amounts of I/O are submitted.
  60 */
  61static inline long sync_writeback_pages(unsigned long dirtied)
  62{
  63	if (dirtied < ratelimit_pages)
  64		dirtied = ratelimit_pages;
  65
  66	return dirtied + dirtied / 2;
  67}
  68
  69/* The following parameters are exported via /proc/sys/vm */
  70
  71/*
  72 * Start background writeback (via writeback threads) at this percentage
  73 */
  74int dirty_background_ratio = 10;
  75
  76/*
  77 * dirty_background_bytes starts at 0 (disabled) so that it is a function of
  78 * dirty_background_ratio * the amount of dirtyable memory
  79 */
  80unsigned long dirty_background_bytes;
  81
  82/*
  83 * free highmem will not be subtracted from the total free memory
  84 * for calculating free ratios if vm_highmem_is_dirtyable is true
  85 */
  86int vm_highmem_is_dirtyable;
  87
  88/*
  89 * The generator of dirty data starts writeback at this percentage
  90 */
  91int vm_dirty_ratio = 20;
  92
  93/*
  94 * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
  95 * vm_dirty_ratio * the amount of dirtyable memory
  96 */
  97unsigned long vm_dirty_bytes;
  98
  99/*
 100 * The interval between `kupdate'-style writebacks
 101 */
 102unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
 103
 104/*
 105 * The longest time for which data is allowed to remain dirty
 106 */
 107unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
 108
 109/*
 110 * Flag that makes the machine dump writes/reads and block dirtyings.
 111 */
 112int block_dump;
 113
 114/*
 115 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
 116 * a full sync is triggered after this time elapses without any disk activity.
 117 */
 118int laptop_mode;
 119
 120EXPORT_SYMBOL(laptop_mode);
 121
 122/* End of sysctl-exported parameters */
 123
 124unsigned long global_dirty_limit;
 125
 126/*
 127 * Scale the writeback cache size proportional to the relative writeout speeds.
 128 *
 129 * We do this by keeping a floating proportion between BDIs, based on page
 130 * writeback completions [end_page_writeback()]. Those devices that write out
 131 * pages fastest will get the larger share, while the slower will get a smaller
 132 * share.
 133 *
 134 * We use page writeout completions because we are interested in getting rid of
 135 * dirty pages. Having them written out is the primary goal.
 136 *
 137 * We introduce a concept of time, a period over which we measure these events,
 138 * because demand can/will vary over time. The length of this period itself is
 139 * measured in page writeback completions.
 140 *
 141 */
 142static struct prop_descriptor vm_completions;
 143static struct prop_descriptor vm_dirties;
 144
 145/*
 146 * couple the period to the dirty_ratio:
 147 *
 148 *   period/2 ~ roundup_pow_of_two(dirty limit)
 149 */
 150static int calc_period_shift(void)
 151{
 152	unsigned long dirty_total;
 153
 154	if (vm_dirty_bytes)
 155		dirty_total = vm_dirty_bytes / PAGE_SIZE;
 156	else
 157		dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) /
 158				100;
 159	return 2 + ilog2(dirty_total - 1);
 160}
 161
 162/*
 163 * update the period when the dirty threshold changes.
 164 */
 165static void update_completion_period(void)
 166{
 167	int shift = calc_period_shift();
 168	prop_change_shift(&vm_completions, shift);
 169	prop_change_shift(&vm_dirties, shift);
 170}
 171
 172int dirty_background_ratio_handler(struct ctl_table *table, int write,
 173		void __user *buffer, size_t *lenp,
 174		loff_t *ppos)
 175{
 176	int ret;
 177
 178	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 179	if (ret == 0 && write)
 180		dirty_background_bytes = 0;
 181	return ret;
 182}
 183
 184int dirty_background_bytes_handler(struct ctl_table *table, int write,
 185		void __user *buffer, size_t *lenp,
 186		loff_t *ppos)
 187{
 188	int ret;
 189
 190	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 191	if (ret == 0 && write)
 192		dirty_background_ratio = 0;
 193	return ret;
 194}
 195
 196int dirty_ratio_handler(struct ctl_table *table, int write,
 197		void __user *buffer, size_t *lenp,
 198		loff_t *ppos)
 199{
 200	int old_ratio = vm_dirty_ratio;
 201	int ret;
 202
 203	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 204	if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
 205		update_completion_period();
 206		vm_dirty_bytes = 0;
 207	}
 208	return ret;
 209}
 210
 211
 212int dirty_bytes_handler(struct ctl_table *table, int write,
 213		void __user *buffer, size_t *lenp,
 214		loff_t *ppos)
 215{
 216	unsigned long old_bytes = vm_dirty_bytes;
 217	int ret;
 218
 219	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 220	if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
 221		update_completion_period();
 222		vm_dirty_ratio = 0;
 223	}
 224	return ret;
 225}
 226
 227/*
 228 * Increment the BDI's writeout completion count and the global writeout
 229 * completion count. Called from test_clear_page_writeback().
 230 */
 231static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
 232{
 233	__inc_bdi_stat(bdi, BDI_WRITTEN);
 234	__prop_inc_percpu_max(&vm_completions, &bdi->completions,
 235			      bdi->max_prop_frac);
 236}
 237
 238void bdi_writeout_inc(struct backing_dev_info *bdi)
 239{
 240	unsigned long flags;
 241
 242	local_irq_save(flags);
 243	__bdi_writeout_inc(bdi);
 244	local_irq_restore(flags);
 245}
 246EXPORT_SYMBOL_GPL(bdi_writeout_inc);
 247
 248void task_dirty_inc(struct task_struct *tsk)
 249{
 250	prop_inc_single(&vm_dirties, &tsk->dirties);
 251}
 252
 253/*
 254 * Obtain an accurate fraction of the BDI's portion.
 255 */
 256static void bdi_writeout_fraction(struct backing_dev_info *bdi,
 257		long *numerator, long *denominator)
 258{
 259	prop_fraction_percpu(&vm_completions, &bdi->completions,
 260				numerator, denominator);
 261}
 262
 263static inline void task_dirties_fraction(struct task_struct *tsk,
 264		long *numerator, long *denominator)
 265{
 266	prop_fraction_single(&vm_dirties, &tsk->dirties,
 267				numerator, denominator);
 268}
 269
 270/*
 271 * task_dirty_limit - scale down dirty throttling threshold for one task
 272 *
 273 * task specific dirty limit:
 274 *
 275 *   dirty -= (dirty/8) * p_{t}
 276 *
 277 * To protect light/slow dirtying tasks from heavier/fast ones, we start
 278 * throttling individual tasks before reaching the bdi dirty limit.
 279 * Relatively low thresholds will be allocated to heavy dirtiers. So when
 280 * dirty pages grow large, heavy dirtiers will be throttled first, which will
 281 * effectively curb the growth of dirty pages. Light dirtiers with high enough
 282 * dirty threshold may never get throttled.
 283 */
 284#define TASK_LIMIT_FRACTION 8
 285static unsigned long task_dirty_limit(struct task_struct *tsk,
 286				       unsigned long bdi_dirty)
 287{
 288	long numerator, denominator;
 289	unsigned long dirty = bdi_dirty;
 290	u64 inv = dirty / TASK_LIMIT_FRACTION;
 291
 292	task_dirties_fraction(tsk, &numerator, &denominator);
 293	inv *= numerator;
 294	do_div(inv, denominator);
 295
 296	dirty -= inv;
 297
 298	return max(dirty, bdi_dirty/2);
 299}
 300
 301/* Minimum limit for any task */
 302static unsigned long task_min_dirty_limit(unsigned long bdi_dirty)
 303{
 304	return bdi_dirty - bdi_dirty / TASK_LIMIT_FRACTION;
 305}
 306
 307/*
 308 *
 309 */
 310static unsigned int bdi_min_ratio;
 311
 312int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
 313{
 314	int ret = 0;
 315
 316	spin_lock_bh(&bdi_lock);
 317	if (min_ratio > bdi->max_ratio) {
 318		ret = -EINVAL;
 319	} else {
 320		min_ratio -= bdi->min_ratio;
 321		if (bdi_min_ratio + min_ratio < 100) {
 322			bdi_min_ratio += min_ratio;
 323			bdi->min_ratio += min_ratio;
 324		} else {
 325			ret = -EINVAL;
 326		}
 327	}
 328	spin_unlock_bh(&bdi_lock);
 329
 330	return ret;
 331}
 332
 333int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
 334{
 335	int ret = 0;
 336
 337	if (max_ratio > 100)
 338		return -EINVAL;
 339
 340	spin_lock_bh(&bdi_lock);
 341	if (bdi->min_ratio > max_ratio) {
 342		ret = -EINVAL;
 343	} else {
 344		bdi->max_ratio = max_ratio;
 345		bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
 346	}
 347	spin_unlock_bh(&bdi_lock);
 348
 349	return ret;
 350}
 351EXPORT_SYMBOL(bdi_set_max_ratio);
 352
 353/*
 354 * Work out the current dirty-memory clamping and background writeout
 355 * thresholds.
 356 *
 357 * The main aim here is to lower them aggressively if there is a lot of mapped
 358 * memory around.  To avoid stressing page reclaim with lots of unreclaimable
 359 * pages.  It is better to clamp down on writers than to start swapping, and
 360 * performing lots of scanning.
 361 *
 362 * We only allow 1/2 of the currently-unmapped memory to be dirtied.
 363 *
 364 * We don't permit the clamping level to fall below 5% - that is getting rather
 365 * excessive.
 366 *
 367 * We make sure that the background writeout level is below the adjusted
 368 * clamping level.
 369 */
 370
 371static unsigned long highmem_dirtyable_memory(unsigned long total)
 372{
 373#ifdef CONFIG_HIGHMEM
 374	int node;
 375	unsigned long x = 0;
 376
 377	for_each_node_state(node, N_HIGH_MEMORY) {
 378		struct zone *z =
 379			&NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
 380
 381		x += zone_page_state(z, NR_FREE_PAGES) +
 382		     zone_reclaimable_pages(z);
 383	}
 384	/*
 385	 * Make sure that the number of highmem pages is never larger
 386	 * than the number of the total dirtyable memory. This can only
 387	 * occur in very strange VM situations but we want to make sure
 388	 * that this does not occur.
 389	 */
 390	return min(x, total);
 391#else
 392	return 0;
 393#endif
 394}
 395
 396/**
 397 * determine_dirtyable_memory - amount of memory that may be used
 398 *
 399 * Returns the numebr of pages that can currently be freed and used
 400 * by the kernel for direct mappings.
 401 */
 402unsigned long determine_dirtyable_memory(void)
 403{
 404	unsigned long x;
 405
 406	x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
 407
 408	if (!vm_highmem_is_dirtyable)
 409		x -= highmem_dirtyable_memory(x);
 410
 411	return x + 1;	/* Ensure that we never return 0 */
 412}
 413
 414static unsigned long hard_dirty_limit(unsigned long thresh)
 415{
 416	return max(thresh, global_dirty_limit);
 417}
 418
 419/*
 420 * global_dirty_limits - background-writeback and dirty-throttling thresholds
 421 *
 422 * Calculate the dirty thresholds based on sysctl parameters
 423 * - vm.dirty_background_ratio  or  vm.dirty_background_bytes
 424 * - vm.dirty_ratio             or  vm.dirty_bytes
 425 * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
 426 * real-time tasks.
 427 */
 428void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
 429{
 430	unsigned long background;
 431	unsigned long dirty;
 432	unsigned long uninitialized_var(available_memory);
 433	struct task_struct *tsk;
 434
 435	if (!vm_dirty_bytes || !dirty_background_bytes)
 436		available_memory = determine_dirtyable_memory();
 437
 438	if (vm_dirty_bytes)
 439		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
 440	else
 441		dirty = (vm_dirty_ratio * available_memory) / 100;
 442
 443	if (dirty_background_bytes)
 444		background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
 445	else
 446		background = (dirty_background_ratio * available_memory) / 100;
 447
 448	if (background >= dirty)
 449		background = dirty / 2;
 450	tsk = current;
 451	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
 452		background += background / 4;
 453		dirty += dirty / 4;
 454	}
 455	*pbackground = background;
 456	*pdirty = dirty;
 457	trace_global_dirty_state(background, dirty);
 458}
 459
 460/**
 461 * bdi_dirty_limit - @bdi's share of dirty throttling threshold
 462 * @bdi: the backing_dev_info to query
 463 * @dirty: global dirty limit in pages
 464 *
 465 * Returns @bdi's dirty limit in pages. The term "dirty" in the context of
 466 * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
 467 * And the "limit" in the name is not seriously taken as hard limit in
 468 * balance_dirty_pages().
 469 *
 470 * It allocates high/low dirty limits to fast/slow devices, in order to prevent
 471 * - starving fast devices
 472 * - piling up dirty pages (that will take long time to sync) on slow devices
 473 *
 474 * The bdi's share of dirty limit will be adapting to its throughput and
 475 * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
 476 */
 477unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
 478{
 479	u64 bdi_dirty;
 480	long numerator, denominator;
 481
 482	/*
 483	 * Calculate this BDI's share of the dirty ratio.
 484	 */
 485	bdi_writeout_fraction(bdi, &numerator, &denominator);
 486
 487	bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
 488	bdi_dirty *= numerator;
 489	do_div(bdi_dirty, denominator);
 490
 491	bdi_dirty += (dirty * bdi->min_ratio) / 100;
 492	if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
 493		bdi_dirty = dirty * bdi->max_ratio / 100;
 494
 495	return bdi_dirty;
 496}
 497
 498static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
 499				       unsigned long elapsed,
 500				       unsigned long written)
 501{
 502	const unsigned long period = roundup_pow_of_two(3 * HZ);
 503	unsigned long avg = bdi->avg_write_bandwidth;
 504	unsigned long old = bdi->write_bandwidth;
 505	u64 bw;
 506
 507	/*
 508	 * bw = written * HZ / elapsed
 509	 *
 510	 *                   bw * elapsed + write_bandwidth * (period - elapsed)
 511	 * write_bandwidth = ---------------------------------------------------
 512	 *                                          period
 513	 */
 514	bw = written - bdi->written_stamp;
 515	bw *= HZ;
 516	if (unlikely(elapsed > period)) {
 517		do_div(bw, elapsed);
 518		avg = bw;
 519		goto out;
 520	}
 521	bw += (u64)bdi->write_bandwidth * (period - elapsed);
 522	bw >>= ilog2(period);
 523
 524	/*
 525	 * one more level of smoothing, for filtering out sudden spikes
 526	 */
 527	if (avg > old && old >= (unsigned long)bw)
 528		avg -= (avg - old) >> 3;
 529
 530	if (avg < old && old <= (unsigned long)bw)
 531		avg += (old - avg) >> 3;
 532
 533out:
 534	bdi->write_bandwidth = bw;
 535	bdi->avg_write_bandwidth = avg;
 536}
 537
 538/*
 539 * The global dirtyable memory and dirty threshold could be suddenly knocked
 540 * down by a large amount (eg. on the startup of KVM in a swapless system).
 541 * This may throw the system into deep dirty exceeded state and throttle
 542 * heavy/light dirtiers alike. To retain good responsiveness, maintain
 543 * global_dirty_limit for tracking slowly down to the knocked down dirty
 544 * threshold.
 545 */
 546static void update_dirty_limit(unsigned long thresh, unsigned long dirty)
 547{
 548	unsigned long limit = global_dirty_limit;
 549
 550	/*
 551	 * Follow up in one step.
 552	 */
 553	if (limit < thresh) {
 554		limit = thresh;
 555		goto update;
 556	}
 557
 558	/*
 559	 * Follow down slowly. Use the higher one as the target, because thresh
 560	 * may drop below dirty. This is exactly the reason to introduce
 561	 * global_dirty_limit which is guaranteed to lie above the dirty pages.
 562	 */
 563	thresh = max(thresh, dirty);
 564	if (limit > thresh) {
 565		limit -= (limit - thresh) >> 5;
 566		goto update;
 567	}
 568	return;
 569update:
 570	global_dirty_limit = limit;
 571}
 572
 573static void global_update_bandwidth(unsigned long thresh,
 574				    unsigned long dirty,
 575				    unsigned long now)
 576{
 577	static DEFINE_SPINLOCK(dirty_lock);
 578	static unsigned long update_time;
 579
 580	/*
 581	 * check locklessly first to optimize away locking for the most time
 582	 */
 583	if (time_before(now, update_time + BANDWIDTH_INTERVAL))
 584		return;
 585
 586	spin_lock(&dirty_lock);
 587	if (time_after_eq(now, update_time + BANDWIDTH_INTERVAL)) {
 588		update_dirty_limit(thresh, dirty);
 589		update_time = now;
 590	}
 591	spin_unlock(&dirty_lock);
 592}
 593
 594void __bdi_update_bandwidth(struct backing_dev_info *bdi,
 595			    unsigned long thresh,
 596			    unsigned long dirty,
 597			    unsigned long bdi_thresh,
 598			    unsigned long bdi_dirty,
 599			    unsigned long start_time)
 600{
 601	unsigned long now = jiffies;
 602	unsigned long elapsed = now - bdi->bw_time_stamp;
 603	unsigned long written;
 604
 605	/*
 606	 * rate-limit, only update once every 200ms.
 607	 */
 608	if (elapsed < BANDWIDTH_INTERVAL)
 609		return;
 610
 611	written = percpu_counter_read(&bdi->bdi_stat[BDI_WRITTEN]);
 612
 613	/*
 614	 * Skip quiet periods when disk bandwidth is under-utilized.
 615	 * (at least 1s idle time between two flusher runs)
 616	 */
 617	if (elapsed > HZ && time_before(bdi->bw_time_stamp, start_time))
 618		goto snapshot;
 619
 620	if (thresh)
 621		global_update_bandwidth(thresh, dirty, now);
 622
 623	bdi_update_write_bandwidth(bdi, elapsed, written);
 624
 625snapshot:
 626	bdi->written_stamp = written;
 627	bdi->bw_time_stamp = now;
 628}
 629
 630static void bdi_update_bandwidth(struct backing_dev_info *bdi,
 631				 unsigned long thresh,
 632				 unsigned long dirty,
 633				 unsigned long bdi_thresh,
 634				 unsigned long bdi_dirty,
 635				 unsigned long start_time)
 636{
 637	if (time_is_after_eq_jiffies(bdi->bw_time_stamp + BANDWIDTH_INTERVAL))
 638		return;
 639	spin_lock(&bdi->wb.list_lock);
 640	__bdi_update_bandwidth(bdi, thresh, dirty, bdi_thresh, bdi_dirty,
 641			       start_time);
 642	spin_unlock(&bdi->wb.list_lock);
 643}
 644
 645/*
 646 * balance_dirty_pages() must be called by processes which are generating dirty
 647 * data.  It looks at the number of dirty pages in the machine and will force
 648 * the caller to perform writeback if the system is over `vm_dirty_ratio'.
 649 * If we're over `background_thresh' then the writeback threads are woken to
 650 * perform some writeout.
 651 */
 652static void balance_dirty_pages(struct address_space *mapping,
 653				unsigned long write_chunk)
 654{
 655	unsigned long nr_reclaimable, bdi_nr_reclaimable;
 656	unsigned long nr_dirty;  /* = file_dirty + writeback + unstable_nfs */
 657	unsigned long bdi_dirty;
 658	unsigned long background_thresh;
 659	unsigned long dirty_thresh;
 660	unsigned long bdi_thresh;
 661	unsigned long task_bdi_thresh;
 662	unsigned long min_task_bdi_thresh;
 663	unsigned long pages_written = 0;
 664	unsigned long pause = 1;
 665	bool dirty_exceeded = false;
 666	bool clear_dirty_exceeded = true;
 667	struct backing_dev_info *bdi = mapping->backing_dev_info;
 668	unsigned long start_time = jiffies;
 669
 670	for (;;) {
 671		nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
 672					global_page_state(NR_UNSTABLE_NFS);
 673		nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK);
 674
 675		global_dirty_limits(&background_thresh, &dirty_thresh);
 676
 677		/*
 678		 * Throttle it only when the background writeback cannot
 679		 * catch-up. This avoids (excessively) small writeouts
 680		 * when the bdi limits are ramping up.
 681		 */
 682		if (nr_dirty <= (background_thresh + dirty_thresh) / 2)
 683			break;
 684
 685		bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
 686		min_task_bdi_thresh = task_min_dirty_limit(bdi_thresh);
 687		task_bdi_thresh = task_dirty_limit(current, bdi_thresh);
 688
 689		/*
 690		 * In order to avoid the stacked BDI deadlock we need
 691		 * to ensure we accurately count the 'dirty' pages when
 692		 * the threshold is low.
 693		 *
 694		 * Otherwise it would be possible to get thresh+n pages
 695		 * reported dirty, even though there are thresh-m pages
 696		 * actually dirty; with m+n sitting in the percpu
 697		 * deltas.
 698		 */
 699		if (task_bdi_thresh < 2 * bdi_stat_error(bdi)) {
 700			bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
 701			bdi_dirty = bdi_nr_reclaimable +
 702				    bdi_stat_sum(bdi, BDI_WRITEBACK);
 703		} else {
 704			bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
 705			bdi_dirty = bdi_nr_reclaimable +
 706				    bdi_stat(bdi, BDI_WRITEBACK);
 707		}
 708
 709		/*
 710		 * The bdi thresh is somehow "soft" limit derived from the
 711		 * global "hard" limit. The former helps to prevent heavy IO
 712		 * bdi or process from holding back light ones; The latter is
 713		 * the last resort safeguard.
 714		 */
 715		dirty_exceeded = (bdi_dirty > task_bdi_thresh) ||
 716				  (nr_dirty > dirty_thresh);
 717		clear_dirty_exceeded = (bdi_dirty <= min_task_bdi_thresh) &&
 718					(nr_dirty <= dirty_thresh);
 719
 720		if (!dirty_exceeded)
 721			break;
 722
 723		if (!bdi->dirty_exceeded)
 724			bdi->dirty_exceeded = 1;
 725
 726		bdi_update_bandwidth(bdi, dirty_thresh, nr_dirty,
 727				     bdi_thresh, bdi_dirty, start_time);
 728
 729		/* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
 730		 * Unstable writes are a feature of certain networked
 731		 * filesystems (i.e. NFS) in which data may have been
 732		 * written to the server's write cache, but has not yet
 733		 * been flushed to permanent storage.
 734		 * Only move pages to writeback if this bdi is over its
 735		 * threshold otherwise wait until the disk writes catch
 736		 * up.
 737		 */
 738		trace_balance_dirty_start(bdi);
 739		if (bdi_nr_reclaimable > task_bdi_thresh) {
 740			pages_written += writeback_inodes_wb(&bdi->wb,
 741							     write_chunk);
 742			trace_balance_dirty_written(bdi, pages_written);
 743			if (pages_written >= write_chunk)
 744				break;		/* We've done our duty */
 745		}
 746		__set_current_state(TASK_UNINTERRUPTIBLE);
 747		io_schedule_timeout(pause);
 748		trace_balance_dirty_wait(bdi);
 749
 750		dirty_thresh = hard_dirty_limit(dirty_thresh);
 751		/*
 752		 * max-pause area. If dirty exceeded but still within this
 753		 * area, no need to sleep for more than 200ms: (a) 8 pages per
 754		 * 200ms is typically more than enough to curb heavy dirtiers;
 755		 * (b) the pause time limit makes the dirtiers more responsive.
 756		 */
 757		if (nr_dirty < dirty_thresh &&
 758		    bdi_dirty < (task_bdi_thresh + bdi_thresh) / 2 &&
 759		    time_after(jiffies, start_time + MAX_PAUSE))
 760			break;
 761
 762		/*
 763		 * Increase the delay for each loop, up to our previous
 764		 * default of taking a 100ms nap.
 765		 */
 766		pause <<= 1;
 767		if (pause > HZ / 10)
 768			pause = HZ / 10;
 769	}
 770
 771	/* Clear dirty_exceeded flag only when no task can exceed the limit */
 772	if (clear_dirty_exceeded && bdi->dirty_exceeded)
 773		bdi->dirty_exceeded = 0;
 774
 775	if (writeback_in_progress(bdi))
 776		return;
 777
 778	/*
 779	 * In laptop mode, we wait until hitting the higher threshold before
 780	 * starting background writeout, and then write out all the way down
 781	 * to the lower threshold.  So slow writers cause minimal disk activity.
 782	 *
 783	 * In normal mode, we start background writeout at the lower
 784	 * background_thresh, to keep the amount of dirty memory low.
 785	 */
 786	if ((laptop_mode && pages_written) ||
 787	    (!laptop_mode && (nr_reclaimable > background_thresh)))
 788		bdi_start_background_writeback(bdi);
 789}
 790
 791void set_page_dirty_balance(struct page *page, int page_mkwrite)
 792{
 793	if (set_page_dirty(page) || page_mkwrite) {
 794		struct address_space *mapping = page_mapping(page);
 795
 796		if (mapping)
 797			balance_dirty_pages_ratelimited(mapping);
 798	}
 799}
 800
 801static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0;
 802
 803/**
 804 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
 805 * @mapping: address_space which was dirtied
 806 * @nr_pages_dirtied: number of pages which the caller has just dirtied
 807 *
 808 * Processes which are dirtying memory should call in here once for each page
 809 * which was newly dirtied.  The function will periodically check the system's
 810 * dirty state and will initiate writeback if needed.
 811 *
 812 * On really big machines, get_writeback_state is expensive, so try to avoid
 813 * calling it too often (ratelimiting).  But once we're over the dirty memory
 814 * limit we decrease the ratelimiting by a lot, to prevent individual processes
 815 * from overshooting the limit by (ratelimit_pages) each.
 816 */
 817void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
 818					unsigned long nr_pages_dirtied)
 819{
 820	struct backing_dev_info *bdi = mapping->backing_dev_info;
 821	unsigned long ratelimit;
 822	unsigned long *p;
 823
 824	if (!bdi_cap_account_dirty(bdi))
 825		return;
 826
 827	ratelimit = ratelimit_pages;
 828	if (mapping->backing_dev_info->dirty_exceeded)
 829		ratelimit = 8;
 830
 831	/*
 832	 * Check the rate limiting. Also, we do not want to throttle real-time
 833	 * tasks in balance_dirty_pages(). Period.
 834	 */
 835	preempt_disable();
 836	p =  &__get_cpu_var(bdp_ratelimits);
 837	*p += nr_pages_dirtied;
 838	if (unlikely(*p >= ratelimit)) {
 839		ratelimit = sync_writeback_pages(*p);
 840		*p = 0;
 841		preempt_enable();
 842		balance_dirty_pages(mapping, ratelimit);
 843		return;
 844	}
 845	preempt_enable();
 846}
 847EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
 848
 849void throttle_vm_writeout(gfp_t gfp_mask)
 850{
 851	unsigned long background_thresh;
 852	unsigned long dirty_thresh;
 853
 854        for ( ; ; ) {
 855		global_dirty_limits(&background_thresh, &dirty_thresh);
 856
 857                /*
 858                 * Boost the allowable dirty threshold a bit for page
 859                 * allocators so they don't get DoS'ed by heavy writers
 860                 */
 861                dirty_thresh += dirty_thresh / 10;      /* wheeee... */
 862
 863                if (global_page_state(NR_UNSTABLE_NFS) +
 864			global_page_state(NR_WRITEBACK) <= dirty_thresh)
 865                        	break;
 866                congestion_wait(BLK_RW_ASYNC, HZ/10);
 867
 868		/*
 869		 * The caller might hold locks which can prevent IO completion
 870		 * or progress in the filesystem.  So we cannot just sit here
 871		 * waiting for IO to complete.
 872		 */
 873		if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
 874			break;
 875        }
 876}
 877
 878/*
 879 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
 880 */
 881int dirty_writeback_centisecs_handler(ctl_table *table, int write,
 882	void __user *buffer, size_t *length, loff_t *ppos)
 883{
 884	proc_dointvec(table, write, buffer, length, ppos);
 885	bdi_arm_supers_timer();
 886	return 0;
 887}
 888
 889#ifdef CONFIG_BLOCK
 890void laptop_mode_timer_fn(unsigned long data)
 891{
 892	struct request_queue *q = (struct request_queue *)data;
 893	int nr_pages = global_page_state(NR_FILE_DIRTY) +
 894		global_page_state(NR_UNSTABLE_NFS);
 895
 896	/*
 897	 * We want to write everything out, not just down to the dirty
 898	 * threshold
 899	 */
 900	if (bdi_has_dirty_io(&q->backing_dev_info))
 901		bdi_start_writeback(&q->backing_dev_info, nr_pages);
 902}
 903
 904/*
 905 * We've spun up the disk and we're in laptop mode: schedule writeback
 906 * of all dirty data a few seconds from now.  If the flush is already scheduled
 907 * then push it back - the user is still using the disk.
 908 */
 909void laptop_io_completion(struct backing_dev_info *info)
 910{
 911	mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
 912}
 913
 914/*
 915 * We're in laptop mode and we've just synced. The sync's writes will have
 916 * caused another writeback to be scheduled by laptop_io_completion.
 917 * Nothing needs to be written back anymore, so we unschedule the writeback.
 918 */
 919void laptop_sync_completion(void)
 920{
 921	struct backing_dev_info *bdi;
 922
 923	rcu_read_lock();
 924
 925	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
 926		del_timer(&bdi->laptop_mode_wb_timer);
 927
 928	rcu_read_unlock();
 929}
 930#endif
 931
 932/*
 933 * If ratelimit_pages is too high then we can get into dirty-data overload
 934 * if a large number of processes all perform writes at the same time.
 935 * If it is too low then SMP machines will call the (expensive)
 936 * get_writeback_state too often.
 937 *
 938 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
 939 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
 940 * thresholds before writeback cuts in.
 941 *
 942 * But the limit should not be set too high.  Because it also controls the
 943 * amount of memory which the balance_dirty_pages() caller has to write back.
 944 * If this is too large then the caller will block on the IO queue all the
 945 * time.  So limit it to four megabytes - the balance_dirty_pages() caller
 946 * will write six megabyte chunks, max.
 947 */
 948
 949void writeback_set_ratelimit(void)
 950{
 951	ratelimit_pages = vm_total_pages / (num_online_cpus() * 32);
 952	if (ratelimit_pages < 16)
 953		ratelimit_pages = 16;
 954	if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
 955		ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
 956}
 957
 958static int __cpuinit
 959ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
 960{
 961	writeback_set_ratelimit();
 962	return NOTIFY_DONE;
 963}
 964
 965static struct notifier_block __cpuinitdata ratelimit_nb = {
 966	.notifier_call	= ratelimit_handler,
 967	.next		= NULL,
 968};
 969
 970/*
 971 * Called early on to tune the page writeback dirty limits.
 972 *
 973 * We used to scale dirty pages according to how total memory
 974 * related to pages that could be allocated for buffers (by
 975 * comparing nr_free_buffer_pages() to vm_total_pages.
 976 *
 977 * However, that was when we used "dirty_ratio" to scale with
 978 * all memory, and we don't do that any more. "dirty_ratio"
 979 * is now applied to total non-HIGHPAGE memory (by subtracting
 980 * totalhigh_pages from vm_total_pages), and as such we can't
 981 * get into the old insane situation any more where we had
 982 * large amounts of dirty pages compared to a small amount of
 983 * non-HIGHMEM memory.
 984 *
 985 * But we might still want to scale the dirty_ratio by how
 986 * much memory the box has..
 987 */
 988void __init page_writeback_init(void)
 989{
 990	int shift;
 991
 992	writeback_set_ratelimit();
 993	register_cpu_notifier(&ratelimit_nb);
 994
 995	shift = calc_period_shift();
 996	prop_descriptor_init(&vm_completions, shift);
 997	prop_descriptor_init(&vm_dirties, shift);
 998}
 999
1000/**
1001 * tag_pages_for_writeback - tag pages to be written by write_cache_pages
1002 * @mapping: address space structure to write
1003 * @start: starting page index
1004 * @end: ending page index (inclusive)
1005 *
1006 * This function scans the page range from @start to @end (inclusive) and tags
1007 * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is
1008 * that write_cache_pages (or whoever calls this function) will then use
1009 * TOWRITE tag to identify pages eligible for writeback.  This mechanism is
1010 * used to avoid livelocking of writeback by a process steadily creating new
1011 * dirty pages in the file (thus it is important for this function to be quick
1012 * so that it can tag pages faster than a dirtying process can create them).
1013 */
1014/*
1015 * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency.
1016 */
1017void tag_pages_for_writeback(struct address_space *mapping,
1018			     pgoff_t start, pgoff_t end)
1019{
1020#define WRITEBACK_TAG_BATCH 4096
1021	unsigned long tagged;
1022
1023	do {
1024		spin_lock_irq(&mapping->tree_lock);
1025		tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree,
1026				&start, end, WRITEBACK_TAG_BATCH,
1027				PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE);
1028		spin_unlock_irq(&mapping->tree_lock);
1029		WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);
1030		cond_resched();
1031		/* We check 'start' to handle wrapping when end == ~0UL */
1032	} while (tagged >= WRITEBACK_TAG_BATCH && start);
1033}
1034EXPORT_SYMBOL(tag_pages_for_writeback);
1035
1036/**
1037 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
1038 * @mapping: address space structure to write
1039 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1040 * @writepage: function called for each page
1041 * @data: data passed to writepage function
1042 *
1043 * If a page is already under I/O, write_cache_pages() skips it, even
1044 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
1045 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
1046 * and msync() need to guarantee that all the data which was dirty at the time
1047 * the call was made get new I/O started against them.  If wbc->sync_mode is
1048 * WB_SYNC_ALL then we were called for data integrity and we must wait for
1049 * existing IO to complete.
1050 *
1051 * To avoid livelocks (when other process dirties new pages), we first tag
1052 * pages which should be written back with TOWRITE tag and only then start
1053 * writing them. For data-integrity sync we have to be careful so that we do
1054 * not miss some pages (e.g., because some other process has cleared TOWRITE
1055 * tag we set). The rule we follow is that TOWRITE tag can be cleared only
1056 * by the process clearing the DIRTY tag (and submitting the page for IO).
1057 */
1058int write_cache_pages(struct address_space *mapping,
1059		      struct writeback_control *wbc, writepage_t writepage,
1060		      void *data)
1061{
1062	int ret = 0;
1063	int done = 0;
1064	struct pagevec pvec;
1065	int nr_pages;
1066	pgoff_t uninitialized_var(writeback_index);
1067	pgoff_t index;
1068	pgoff_t end;		/* Inclusive */
1069	pgoff_t done_index;
1070	int cycled;
1071	int range_whole = 0;
1072	int tag;
1073
1074	pagevec_init(&pvec, 0);
1075	if (wbc->range_cyclic) {
1076		writeback_index = mapping->writeback_index; /* prev offset */
1077		index = writeback_index;
1078		if (index == 0)
1079			cycled = 1;
1080		else
1081			cycled = 0;
1082		end = -1;
1083	} else {
1084		index = wbc->range_start >> PAGE_CACHE_SHIFT;
1085		end = wbc->range_end >> PAGE_CACHE_SHIFT;
1086		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1087			range_whole = 1;
1088		cycled = 1; /* ignore range_cyclic tests */
1089	}
1090	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1091		tag = PAGECACHE_TAG_TOWRITE;
1092	else
1093		tag = PAGECACHE_TAG_DIRTY;
1094retry:
1095	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1096		tag_pages_for_writeback(mapping, index, end);
1097	done_index = index;
1098	while (!done && (index <= end)) {
1099		int i;
1100
1101		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
1102			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1103		if (nr_pages == 0)
1104			break;
1105
1106		for (i = 0; i < nr_pages; i++) {
1107			struct page *page = pvec.pages[i];
1108
1109			/*
1110			 * At this point, the page may be truncated or
1111			 * invalidated (changing page->mapping to NULL), or
1112			 * even swizzled back from swapper_space to tmpfs file
1113			 * mapping. However, page->index will not change
1114			 * because we have a reference on the page.
1115			 */
1116			if (page->index > end) {
1117				/*
1118				 * can't be range_cyclic (1st pass) because
1119				 * end == -1 in that case.
1120				 */
1121				done = 1;
1122				break;
1123			}
1124
1125			done_index = page->index;
1126
1127			lock_page(page);
1128
1129			/*
1130			 * Page truncated or invalidated. We can freely skip it
1131			 * then, even for data integrity operations: the page
1132			 * has disappeared concurrently, so there could be no
1133			 * real expectation of this data interity operation
1134			 * even if there is now a new, dirty page at the same
1135			 * pagecache address.
1136			 */
1137			if (unlikely(page->mapping != mapping)) {
1138continue_unlock:
1139				unlock_page(page);
1140				continue;
1141			}
1142
1143			if (!PageDirty(page)) {
1144				/* someone wrote it for us */
1145				goto continue_unlock;
1146			}
1147
1148			if (PageWriteback(page)) {
1149				if (wbc->sync_mode != WB_SYNC_NONE)
1150					wait_on_page_writeback(page);
1151				else
1152					goto continue_unlock;
1153			}
1154
1155			BUG_ON(PageWriteback(page));
1156			if (!clear_page_dirty_for_io(page))
1157				goto continue_unlock;
1158
1159			trace_wbc_writepage(wbc, mapping->backing_dev_info);
1160			ret = (*writepage)(page, wbc, data);
1161			if (unlikely(ret)) {
1162				if (ret == AOP_WRITEPAGE_ACTIVATE) {
1163					unlock_page(page);
1164					ret = 0;
1165				} else {
1166					/*
1167					 * done_index is set past this page,
1168					 * so media errors will not choke
1169					 * background writeout for the entire
1170					 * file. This has consequences for
1171					 * range_cyclic semantics (ie. it may
1172					 * not be suitable for data integrity
1173					 * writeout).
1174					 */
1175					done_index = page->index + 1;
1176					done = 1;
1177					break;
1178				}
1179			}
1180
1181			/*
1182			 * We stop writing back only if we are not doing
1183			 * integrity sync. In case of integrity sync we have to
1184			 * keep going until we have written all the pages
1185			 * we tagged for writeback prior to entering this loop.
1186			 */
1187			if (--wbc->nr_to_write <= 0 &&
1188			    wbc->sync_mode == WB_SYNC_NONE) {
1189				done = 1;
1190				break;
1191			}
1192		}
1193		pagevec_release(&pvec);
1194		cond_resched();
1195	}
1196	if (!cycled && !done) {
1197		/*
1198		 * range_cyclic:
1199		 * We hit the last page and there is more work to be done: wrap
1200		 * back to the start of the file
1201		 */
1202		cycled = 1;
1203		index = 0;
1204		end = writeback_index - 1;
1205		goto retry;
1206	}
1207	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1208		mapping->writeback_index = done_index;
1209
1210	return ret;
1211}
1212EXPORT_SYMBOL(write_cache_pages);
1213
1214/*
1215 * Function used by generic_writepages to call the real writepage
1216 * function and set the mapping flags on error
1217 */
1218static int __writepage(struct page *page, struct writeback_control *wbc,
1219		       void *data)
1220{
1221	struct address_space *mapping = data;
1222	int ret = mapping->a_ops->writepage(page, wbc);
1223	mapping_set_error(mapping, ret);
1224	return ret;
1225}
1226
1227/**
1228 * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
1229 * @mapping: address space structure to write
1230 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1231 *
1232 * This is a library function, which implements the writepages()
1233 * address_space_operation.
1234 */
1235int generic_writepages(struct address_space *mapping,
1236		       struct writeback_control *wbc)
1237{
1238	struct blk_plug plug;
1239	int ret;
1240
1241	/* deal with chardevs and other special file */
1242	if (!mapping->a_ops->writepage)
1243		return 0;
1244
1245	blk_start_plug(&plug);
1246	ret = write_cache_pages(mapping, wbc, __writepage, mapping);
1247	blk_finish_plug(&plug);
1248	return ret;
1249}
1250
1251EXPORT_SYMBOL(generic_writepages);
1252
1253int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
1254{
1255	int ret;
1256
1257	if (wbc->nr_to_write <= 0)
1258		return 0;
1259	if (mapping->a_ops->writepages)
1260		ret = mapping->a_ops->writepages(mapping, wbc);
1261	else
1262		ret = generic_writepages(mapping, wbc);
1263	return ret;
1264}
1265
1266/**
1267 * write_one_page - write out a single page and optionally wait on I/O
1268 * @page: the page to write
1269 * @wait: if true, wait on writeout
1270 *
1271 * The page must be locked by the caller and will be unlocked upon return.
1272 *
1273 * write_one_page() returns a negative error code if I/O failed.
1274 */
1275int write_one_page(struct page *page, int wait)
1276{
1277	struct address_space *mapping = page->mapping;
1278	int ret = 0;
1279	struct writeback_control wbc = {
1280		.sync_mode = WB_SYNC_ALL,
1281		.nr_to_write = 1,
1282	};
1283
1284	BUG_ON(!PageLocked(page));
1285
1286	if (wait)
1287		wait_on_page_writeback(page);
1288
1289	if (clear_page_dirty_for_io(page)) {
1290		page_cache_get(page);
1291		ret = mapping->a_ops->writepage(page, &wbc);
1292		if (ret == 0 && wait) {
1293			wait_on_page_writeback(page);
1294			if (PageError(page))
1295				ret = -EIO;
1296		}
1297		page_cache_release(page);
1298	} else {
1299		unlock_page(page);
1300	}
1301	return ret;
1302}
1303EXPORT_SYMBOL(write_one_page);
1304
1305/*
1306 * For address_spaces which do not use buffers nor write back.
1307 */
1308int __set_page_dirty_no_writeback(struct page *page)
1309{
1310	if (!PageDirty(page))
1311		return !TestSetPageDirty(page);
1312	return 0;
1313}
1314
1315/*
1316 * Helper function for set_page_dirty family.
1317 * NOTE: This relies on being atomic wrt interrupts.
1318 */
1319void account_page_dirtied(struct page *page, struct address_space *mapping)
1320{
1321	if (mapping_cap_account_dirty(mapping)) {
1322		__inc_zone_page_state(page, NR_FILE_DIRTY);
1323		__inc_zone_page_state(page, NR_DIRTIED);
1324		__inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
1325		task_dirty_inc(current);
1326		task_io_account_write(PAGE_CACHE_SIZE);
1327	}
1328}
1329EXPORT_SYMBOL(account_page_dirtied);
1330
1331/*
1332 * Helper function for set_page_writeback family.
1333 * NOTE: Unlike account_page_dirtied this does not rely on being atomic
1334 * wrt interrupts.
1335 */
1336void account_page_writeback(struct page *page)
1337{
1338	inc_zone_page_state(page, NR_WRITEBACK);
1339}
1340EXPORT_SYMBOL(account_page_writeback);
1341
1342/*
1343 * For address_spaces which do not use buffers.  Just tag the page as dirty in
1344 * its radix tree.
1345 *
1346 * This is also used when a single buffer is being dirtied: we want to set the
1347 * page dirty in that case, but not all the buffers.  This is a "bottom-up"
1348 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
1349 *
1350 * Most callers have locked the page, which pins the address_space in memory.
1351 * But zap_pte_range() does not lock the page, however in that case the
1352 * mapping is pinned by the vma's ->vm_file reference.
1353 *
1354 * We take care to handle the case where the page was truncated from the
1355 * mapping by re-checking page_mapping() inside tree_lock.
1356 */
1357int __set_page_dirty_nobuffers(struct page *page)
1358{
1359	if (!TestSetPageDirty(page)) {
1360		struct address_space *mapping = page_mapping(page);
1361		struct address_space *mapping2;
1362
1363		if (!mapping)
1364			return 1;
1365
1366		spin_lock_irq(&mapping->tree_lock);
1367		mapping2 = page_mapping(page);
1368		if (mapping2) { /* Race with truncate? */
1369			BUG_ON(mapping2 != mapping);
1370			WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
1371			account_page_dirtied(page, mapping);
1372			radix_tree_tag_set(&mapping->page_tree,
1373				page_index(page), PAGECACHE_TAG_DIRTY);
1374		}
1375		spin_unlock_irq(&mapping->tree_lock);
1376		if (mapping->host) {
1377			/* !PageAnon && !swapper_space */
1378			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1379		}
1380		return 1;
1381	}
1382	return 0;
1383}
1384EXPORT_SYMBOL(__set_page_dirty_nobuffers);
1385
1386/*
1387 * When a writepage implementation decides that it doesn't want to write this
1388 * page for some reason, it should redirty the locked page via
1389 * redirty_page_for_writepage() and it should then unlock the page and return 0
1390 */
1391int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
1392{
1393	wbc->pages_skipped++;
1394	return __set_page_dirty_nobuffers(page);
1395}
1396EXPORT_SYMBOL(redirty_page_for_writepage);
1397
1398/*
1399 * Dirty a page.
1400 *
1401 * For pages with a mapping this should be done under the page lock
1402 * for the benefit of asynchronous memory errors who prefer a consistent
1403 * dirty state. This rule can be broken in some special cases,
1404 * but should be better not to.
1405 *
1406 * If the mapping doesn't provide a set_page_dirty a_op, then
1407 * just fall through and assume that it wants buffer_heads.
1408 */
1409int set_page_dirty(struct page *page)
1410{
1411	struct address_space *mapping = page_mapping(page);
1412
1413	if (likely(mapping)) {
1414		int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
1415		/*
1416		 * readahead/lru_deactivate_page could remain
1417		 * PG_readahead/PG_reclaim due to race with end_page_writeback
1418		 * About readahead, if the page is written, the flags would be
1419		 * reset. So no problem.
1420		 * About lru_deactivate_page, if the page is redirty, the flag
1421		 * will be reset. So no problem. but if the page is used by readahead
1422		 * it will confuse readahead and make it restart the size rampup
1423		 * process. But it's a trivial problem.
1424		 */
1425		ClearPageReclaim(page);
1426#ifdef CONFIG_BLOCK
1427		if (!spd)
1428			spd = __set_page_dirty_buffers;
1429#endif
1430		return (*spd)(page);
1431	}
1432	if (!PageDirty(page)) {
1433		if (!TestSetPageDirty(page))
1434			return 1;
1435	}
1436	return 0;
1437}
1438EXPORT_SYMBOL(set_page_dirty);
1439
1440/*
1441 * set_page_dirty() is racy if the caller has no reference against
1442 * page->mapping->host, and if the page is unlocked.  This is because another
1443 * CPU could truncate the page off the mapping and then free the mapping.
1444 *
1445 * Usually, the page _is_ locked, or the caller is a user-space process which
1446 * holds a reference on the inode by having an open file.
1447 *
1448 * In other cases, the page should be locked before running set_page_dirty().
1449 */
1450int set_page_dirty_lock(struct page *page)
1451{
1452	int ret;
1453
1454	lock_page(page);
1455	ret = set_page_dirty(page);
1456	unlock_page(page);
1457	return ret;
1458}
1459EXPORT_SYMBOL(set_page_dirty_lock);
1460
1461/*
1462 * Clear a page's dirty flag, while caring for dirty memory accounting.
1463 * Returns true if the page was previously dirty.
1464 *
1465 * This is for preparing to put the page under writeout.  We leave the page
1466 * tagged as dirty in the radix tree so that a concurrent write-for-sync
1467 * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage
1468 * implementation will run either set_page_writeback() or set_page_dirty(),
1469 * at which stage we bring the page's dirty flag and radix-tree dirty tag
1470 * back into sync.
1471 *
1472 * This incoherency between the page's dirty flag and radix-tree tag is
1473 * unfortunate, but it only exists while the page is locked.
1474 */
1475int clear_page_dirty_for_io(struct page *page)
1476{
1477	struct address_space *mapping = page_mapping(page);
1478
1479	BUG_ON(!PageLocked(page));
1480
1481	if (mapping && mapping_cap_account_dirty(mapping)) {
1482		/*
1483		 * Yes, Virginia, this is indeed insane.
1484		 *
1485		 * We use this sequence to make sure that
1486		 *  (a) we account for dirty stats properly
1487		 *  (b) we tell the low-level filesystem to
1488		 *      mark the whole page dirty if it was
1489		 *      dirty in a pagetable. Only to then
1490		 *  (c) clean the page again and return 1 to
1491		 *      cause the writeback.
1492		 *
1493		 * This way we avoid all nasty races with the
1494		 * dirty bit in multiple places and clearing
1495		 * them concurrently from different threads.
1496		 *
1497		 * Note! Normally the "set_page_dirty(page)"
1498		 * has no effect on the actual dirty bit - since
1499		 * that will already usually be set. But we
1500		 * need the side effects, and it can help us
1501		 * avoid races.
1502		 *
1503		 * We basically use the page "master dirty bit"
1504		 * as a serialization point for all the different
1505		 * threads doing their things.
1506		 */
1507		if (page_mkclean(page))
1508			set_page_dirty(page);
1509		/*
1510		 * We carefully synchronise fault handlers against
1511		 * installing a dirty pte and marking the page dirty
1512		 * at this point. We do this by having them hold the
1513		 * page lock at some point after installing their
1514		 * pte, but before marking the page dirty.
1515		 * Pages are always locked coming in here, so we get
1516		 * the desired exclusion. See mm/memory.c:do_wp_page()
1517		 * for more comments.
1518		 */
1519		if (TestClearPageDirty(page)) {
1520			dec_zone_page_state(page, NR_FILE_DIRTY);
1521			dec_bdi_stat(mapping->backing_dev_info,
1522					BDI_RECLAIMABLE);
1523			return 1;
1524		}
1525		return 0;
1526	}
1527	return TestClearPageDirty(page);
1528}
1529EXPORT_SYMBOL(clear_page_dirty_for_io);
1530
1531int test_clear_page_writeback(struct page *page)
1532{
1533	struct address_space *mapping = page_mapping(page);
1534	int ret;
1535
1536	if (mapping) {
1537		struct backing_dev_info *bdi = mapping->backing_dev_info;
1538		unsigned long flags;
1539
1540		spin_lock_irqsave(&mapping->tree_lock, flags);
1541		ret = TestClearPageWriteback(page);
1542		if (ret) {
1543			radix_tree_tag_clear(&mapping->page_tree,
1544						page_index(page),
1545						PAGECACHE_TAG_WRITEBACK);
1546			if (bdi_cap_account_writeback(bdi)) {
1547				__dec_bdi_stat(bdi, BDI_WRITEBACK);
1548				__bdi_writeout_inc(bdi);
1549			}
1550		}
1551		spin_unlock_irqrestore(&mapping->tree_lock, flags);
1552	} else {
1553		ret = TestClearPageWriteback(page);
1554	}
1555	if (ret) {
1556		dec_zone_page_state(page, NR_WRITEBACK);
1557		inc_zone_page_state(page, NR_WRITTEN);
1558	}
1559	return ret;
1560}
1561
1562int test_set_page_writeback(struct page *page)
1563{
1564	struct address_space *mapping = page_mapping(page);
1565	int ret;
1566
1567	if (mapping) {
1568		struct backing_dev_info *bdi = mapping->backing_dev_info;
1569		unsigned long flags;
1570
1571		spin_lock_irqsave(&mapping->tree_lock, flags);
1572		ret = TestSetPageWriteback(page);
1573		if (!ret) {
1574			radix_tree_tag_set(&mapping->page_tree,
1575						page_index(page),
1576						PAGECACHE_TAG_WRITEBACK);
1577			if (bdi_cap_account_writeback(bdi))
1578				__inc_bdi_stat(bdi, BDI_WRITEBACK);
1579		}
1580		if (!PageDirty(page))
1581			radix_tree_tag_clear(&mapping->page_tree,
1582						page_index(page),
1583						PAGECACHE_TAG_DIRTY);
1584		radix_tree_tag_clear(&mapping->page_tree,
1585				     page_index(page),
1586				     PAGECACHE_TAG_TOWRITE);
1587		spin_unlock_irqrestore(&mapping->tree_lock, flags);
1588	} else {
1589		ret = TestSetPageWriteback(page);
1590	}
1591	if (!ret)
1592		account_page_writeback(page);
1593	return ret;
1594
1595}
1596EXPORT_SYMBOL(test_set_page_writeback);
1597
1598/*
1599 * Return true if any of the pages in the mapping are marked with the
1600 * passed tag.
1601 */
1602int mapping_tagged(struct address_space *mapping, int tag)
1603{
1604	return radix_tree_tagged(&mapping->page_tree, tag);
1605}
1606EXPORT_SYMBOL(mapping_tagged);
v3.1
   1/*
   2 * mm/page-writeback.c
   3 *
   4 * Copyright (C) 2002, Linus Torvalds.
   5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
   6 *
   7 * Contains functions related to writing back dirty pages at the
   8 * address_space level.
   9 *
  10 * 10Apr2002	Andrew Morton
  11 *		Initial version
  12 */
  13
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/spinlock.h>
  17#include <linux/fs.h>
  18#include <linux/mm.h>
  19#include <linux/swap.h>
  20#include <linux/slab.h>
  21#include <linux/pagemap.h>
  22#include <linux/writeback.h>
  23#include <linux/init.h>
  24#include <linux/backing-dev.h>
  25#include <linux/task_io_accounting_ops.h>
  26#include <linux/blkdev.h>
  27#include <linux/mpage.h>
  28#include <linux/rmap.h>
  29#include <linux/percpu.h>
  30#include <linux/notifier.h>
  31#include <linux/smp.h>
  32#include <linux/sysctl.h>
  33#include <linux/cpu.h>
  34#include <linux/syscalls.h>
  35#include <linux/buffer_head.h>
  36#include <linux/pagevec.h>
  37#include <trace/events/writeback.h>
  38
  39/*
  40 * Sleep at most 200ms at a time in balance_dirty_pages().
  41 */
  42#define MAX_PAUSE		max(HZ/5, 1)
  43
  44/*
  45 * Estimate write bandwidth at 200ms intervals.
  46 */
  47#define BANDWIDTH_INTERVAL	max(HZ/5, 1)
  48
  49/*
  50 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
  51 * will look to see if it needs to force writeback or throttling.
  52 */
  53static long ratelimit_pages = 32;
  54
  55/*
  56 * When balance_dirty_pages decides that the caller needs to perform some
  57 * non-background writeback, this is how many pages it will attempt to write.
  58 * It should be somewhat larger than dirtied pages to ensure that reasonably
  59 * large amounts of I/O are submitted.
  60 */
  61static inline long sync_writeback_pages(unsigned long dirtied)
  62{
  63	if (dirtied < ratelimit_pages)
  64		dirtied = ratelimit_pages;
  65
  66	return dirtied + dirtied / 2;
  67}
  68
  69/* The following parameters are exported via /proc/sys/vm */
  70
  71/*
  72 * Start background writeback (via writeback threads) at this percentage
  73 */
  74int dirty_background_ratio = 10;
  75
  76/*
  77 * dirty_background_bytes starts at 0 (disabled) so that it is a function of
  78 * dirty_background_ratio * the amount of dirtyable memory
  79 */
  80unsigned long dirty_background_bytes;
  81
  82/*
  83 * free highmem will not be subtracted from the total free memory
  84 * for calculating free ratios if vm_highmem_is_dirtyable is true
  85 */
  86int vm_highmem_is_dirtyable;
  87
  88/*
  89 * The generator of dirty data starts writeback at this percentage
  90 */
  91int vm_dirty_ratio = 20;
  92
  93/*
  94 * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
  95 * vm_dirty_ratio * the amount of dirtyable memory
  96 */
  97unsigned long vm_dirty_bytes;
  98
  99/*
 100 * The interval between `kupdate'-style writebacks
 101 */
 102unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
 103
 104/*
 105 * The longest time for which data is allowed to remain dirty
 106 */
 107unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
 108
 109/*
 110 * Flag that makes the machine dump writes/reads and block dirtyings.
 111 */
 112int block_dump;
 113
 114/*
 115 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
 116 * a full sync is triggered after this time elapses without any disk activity.
 117 */
 118int laptop_mode;
 119
 120EXPORT_SYMBOL(laptop_mode);
 121
 122/* End of sysctl-exported parameters */
 123
 124unsigned long global_dirty_limit;
 125
 126/*
 127 * Scale the writeback cache size proportional to the relative writeout speeds.
 128 *
 129 * We do this by keeping a floating proportion between BDIs, based on page
 130 * writeback completions [end_page_writeback()]. Those devices that write out
 131 * pages fastest will get the larger share, while the slower will get a smaller
 132 * share.
 133 *
 134 * We use page writeout completions because we are interested in getting rid of
 135 * dirty pages. Having them written out is the primary goal.
 136 *
 137 * We introduce a concept of time, a period over which we measure these events,
 138 * because demand can/will vary over time. The length of this period itself is
 139 * measured in page writeback completions.
 140 *
 141 */
 142static struct prop_descriptor vm_completions;
 143static struct prop_descriptor vm_dirties;
 144
 145/*
 146 * couple the period to the dirty_ratio:
 147 *
 148 *   period/2 ~ roundup_pow_of_two(dirty limit)
 149 */
 150static int calc_period_shift(void)
 151{
 152	unsigned long dirty_total;
 153
 154	if (vm_dirty_bytes)
 155		dirty_total = vm_dirty_bytes / PAGE_SIZE;
 156	else
 157		dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) /
 158				100;
 159	return 2 + ilog2(dirty_total - 1);
 160}
 161
 162/*
 163 * update the period when the dirty threshold changes.
 164 */
 165static void update_completion_period(void)
 166{
 167	int shift = calc_period_shift();
 168	prop_change_shift(&vm_completions, shift);
 169	prop_change_shift(&vm_dirties, shift);
 170}
 171
 172int dirty_background_ratio_handler(struct ctl_table *table, int write,
 173		void __user *buffer, size_t *lenp,
 174		loff_t *ppos)
 175{
 176	int ret;
 177
 178	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 179	if (ret == 0 && write)
 180		dirty_background_bytes = 0;
 181	return ret;
 182}
 183
 184int dirty_background_bytes_handler(struct ctl_table *table, int write,
 185		void __user *buffer, size_t *lenp,
 186		loff_t *ppos)
 187{
 188	int ret;
 189
 190	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 191	if (ret == 0 && write)
 192		dirty_background_ratio = 0;
 193	return ret;
 194}
 195
 196int dirty_ratio_handler(struct ctl_table *table, int write,
 197		void __user *buffer, size_t *lenp,
 198		loff_t *ppos)
 199{
 200	int old_ratio = vm_dirty_ratio;
 201	int ret;
 202
 203	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 204	if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
 205		update_completion_period();
 206		vm_dirty_bytes = 0;
 207	}
 208	return ret;
 209}
 210
 211
 212int dirty_bytes_handler(struct ctl_table *table, int write,
 213		void __user *buffer, size_t *lenp,
 214		loff_t *ppos)
 215{
 216	unsigned long old_bytes = vm_dirty_bytes;
 217	int ret;
 218
 219	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 220	if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
 221		update_completion_period();
 222		vm_dirty_ratio = 0;
 223	}
 224	return ret;
 225}
 226
 227/*
 228 * Increment the BDI's writeout completion count and the global writeout
 229 * completion count. Called from test_clear_page_writeback().
 230 */
 231static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
 232{
 233	__inc_bdi_stat(bdi, BDI_WRITTEN);
 234	__prop_inc_percpu_max(&vm_completions, &bdi->completions,
 235			      bdi->max_prop_frac);
 236}
 237
 238void bdi_writeout_inc(struct backing_dev_info *bdi)
 239{
 240	unsigned long flags;
 241
 242	local_irq_save(flags);
 243	__bdi_writeout_inc(bdi);
 244	local_irq_restore(flags);
 245}
 246EXPORT_SYMBOL_GPL(bdi_writeout_inc);
 247
 248void task_dirty_inc(struct task_struct *tsk)
 249{
 250	prop_inc_single(&vm_dirties, &tsk->dirties);
 251}
 252
 253/*
 254 * Obtain an accurate fraction of the BDI's portion.
 255 */
 256static void bdi_writeout_fraction(struct backing_dev_info *bdi,
 257		long *numerator, long *denominator)
 258{
 259	prop_fraction_percpu(&vm_completions, &bdi->completions,
 260				numerator, denominator);
 261}
 262
 263static inline void task_dirties_fraction(struct task_struct *tsk,
 264		long *numerator, long *denominator)
 265{
 266	prop_fraction_single(&vm_dirties, &tsk->dirties,
 267				numerator, denominator);
 268}
 269
 270/*
 271 * task_dirty_limit - scale down dirty throttling threshold for one task
 272 *
 273 * task specific dirty limit:
 274 *
 275 *   dirty -= (dirty/8) * p_{t}
 276 *
 277 * To protect light/slow dirtying tasks from heavier/fast ones, we start
 278 * throttling individual tasks before reaching the bdi dirty limit.
 279 * Relatively low thresholds will be allocated to heavy dirtiers. So when
 280 * dirty pages grow large, heavy dirtiers will be throttled first, which will
 281 * effectively curb the growth of dirty pages. Light dirtiers with high enough
 282 * dirty threshold may never get throttled.
 283 */
 284#define TASK_LIMIT_FRACTION 8
 285static unsigned long task_dirty_limit(struct task_struct *tsk,
 286				       unsigned long bdi_dirty)
 287{
 288	long numerator, denominator;
 289	unsigned long dirty = bdi_dirty;
 290	u64 inv = dirty / TASK_LIMIT_FRACTION;
 291
 292	task_dirties_fraction(tsk, &numerator, &denominator);
 293	inv *= numerator;
 294	do_div(inv, denominator);
 295
 296	dirty -= inv;
 297
 298	return max(dirty, bdi_dirty/2);
 299}
 300
 301/* Minimum limit for any task */
 302static unsigned long task_min_dirty_limit(unsigned long bdi_dirty)
 303{
 304	return bdi_dirty - bdi_dirty / TASK_LIMIT_FRACTION;
 305}
 306
 307/*
 308 *
 309 */
 310static unsigned int bdi_min_ratio;
 311
 312int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
 313{
 314	int ret = 0;
 315
 316	spin_lock_bh(&bdi_lock);
 317	if (min_ratio > bdi->max_ratio) {
 318		ret = -EINVAL;
 319	} else {
 320		min_ratio -= bdi->min_ratio;
 321		if (bdi_min_ratio + min_ratio < 100) {
 322			bdi_min_ratio += min_ratio;
 323			bdi->min_ratio += min_ratio;
 324		} else {
 325			ret = -EINVAL;
 326		}
 327	}
 328	spin_unlock_bh(&bdi_lock);
 329
 330	return ret;
 331}
 332
 333int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
 334{
 335	int ret = 0;
 336
 337	if (max_ratio > 100)
 338		return -EINVAL;
 339
 340	spin_lock_bh(&bdi_lock);
 341	if (bdi->min_ratio > max_ratio) {
 342		ret = -EINVAL;
 343	} else {
 344		bdi->max_ratio = max_ratio;
 345		bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
 346	}
 347	spin_unlock_bh(&bdi_lock);
 348
 349	return ret;
 350}
 351EXPORT_SYMBOL(bdi_set_max_ratio);
 352
 353/*
 354 * Work out the current dirty-memory clamping and background writeout
 355 * thresholds.
 356 *
 357 * The main aim here is to lower them aggressively if there is a lot of mapped
 358 * memory around.  To avoid stressing page reclaim with lots of unreclaimable
 359 * pages.  It is better to clamp down on writers than to start swapping, and
 360 * performing lots of scanning.
 361 *
 362 * We only allow 1/2 of the currently-unmapped memory to be dirtied.
 363 *
 364 * We don't permit the clamping level to fall below 5% - that is getting rather
 365 * excessive.
 366 *
 367 * We make sure that the background writeout level is below the adjusted
 368 * clamping level.
 369 */
 370
 371static unsigned long highmem_dirtyable_memory(unsigned long total)
 372{
 373#ifdef CONFIG_HIGHMEM
 374	int node;
 375	unsigned long x = 0;
 376
 377	for_each_node_state(node, N_HIGH_MEMORY) {
 378		struct zone *z =
 379			&NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
 380
 381		x += zone_page_state(z, NR_FREE_PAGES) +
 382		     zone_reclaimable_pages(z);
 383	}
 384	/*
 385	 * Make sure that the number of highmem pages is never larger
 386	 * than the number of the total dirtyable memory. This can only
 387	 * occur in very strange VM situations but we want to make sure
 388	 * that this does not occur.
 389	 */
 390	return min(x, total);
 391#else
 392	return 0;
 393#endif
 394}
 395
 396/**
 397 * determine_dirtyable_memory - amount of memory that may be used
 398 *
 399 * Returns the numebr of pages that can currently be freed and used
 400 * by the kernel for direct mappings.
 401 */
 402unsigned long determine_dirtyable_memory(void)
 403{
 404	unsigned long x;
 405
 406	x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
 407
 408	if (!vm_highmem_is_dirtyable)
 409		x -= highmem_dirtyable_memory(x);
 410
 411	return x + 1;	/* Ensure that we never return 0 */
 412}
 413
 414static unsigned long hard_dirty_limit(unsigned long thresh)
 415{
 416	return max(thresh, global_dirty_limit);
 417}
 418
 419/*
 420 * global_dirty_limits - background-writeback and dirty-throttling thresholds
 421 *
 422 * Calculate the dirty thresholds based on sysctl parameters
 423 * - vm.dirty_background_ratio  or  vm.dirty_background_bytes
 424 * - vm.dirty_ratio             or  vm.dirty_bytes
 425 * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
 426 * real-time tasks.
 427 */
 428void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
 429{
 430	unsigned long background;
 431	unsigned long dirty;
 432	unsigned long uninitialized_var(available_memory);
 433	struct task_struct *tsk;
 434
 435	if (!vm_dirty_bytes || !dirty_background_bytes)
 436		available_memory = determine_dirtyable_memory();
 437
 438	if (vm_dirty_bytes)
 439		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
 440	else
 441		dirty = (vm_dirty_ratio * available_memory) / 100;
 442
 443	if (dirty_background_bytes)
 444		background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
 445	else
 446		background = (dirty_background_ratio * available_memory) / 100;
 447
 448	if (background >= dirty)
 449		background = dirty / 2;
 450	tsk = current;
 451	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
 452		background += background / 4;
 453		dirty += dirty / 4;
 454	}
 455	*pbackground = background;
 456	*pdirty = dirty;
 457	trace_global_dirty_state(background, dirty);
 458}
 459
 460/**
 461 * bdi_dirty_limit - @bdi's share of dirty throttling threshold
 462 * @bdi: the backing_dev_info to query
 463 * @dirty: global dirty limit in pages
 464 *
 465 * Returns @bdi's dirty limit in pages. The term "dirty" in the context of
 466 * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
 467 * And the "limit" in the name is not seriously taken as hard limit in
 468 * balance_dirty_pages().
 469 *
 470 * It allocates high/low dirty limits to fast/slow devices, in order to prevent
 471 * - starving fast devices
 472 * - piling up dirty pages (that will take long time to sync) on slow devices
 473 *
 474 * The bdi's share of dirty limit will be adapting to its throughput and
 475 * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
 476 */
 477unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
 478{
 479	u64 bdi_dirty;
 480	long numerator, denominator;
 481
 482	/*
 483	 * Calculate this BDI's share of the dirty ratio.
 484	 */
 485	bdi_writeout_fraction(bdi, &numerator, &denominator);
 486
 487	bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
 488	bdi_dirty *= numerator;
 489	do_div(bdi_dirty, denominator);
 490
 491	bdi_dirty += (dirty * bdi->min_ratio) / 100;
 492	if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
 493		bdi_dirty = dirty * bdi->max_ratio / 100;
 494
 495	return bdi_dirty;
 496}
 497
 498static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
 499				       unsigned long elapsed,
 500				       unsigned long written)
 501{
 502	const unsigned long period = roundup_pow_of_two(3 * HZ);
 503	unsigned long avg = bdi->avg_write_bandwidth;
 504	unsigned long old = bdi->write_bandwidth;
 505	u64 bw;
 506
 507	/*
 508	 * bw = written * HZ / elapsed
 509	 *
 510	 *                   bw * elapsed + write_bandwidth * (period - elapsed)
 511	 * write_bandwidth = ---------------------------------------------------
 512	 *                                          period
 513	 */
 514	bw = written - bdi->written_stamp;
 515	bw *= HZ;
 516	if (unlikely(elapsed > period)) {
 517		do_div(bw, elapsed);
 518		avg = bw;
 519		goto out;
 520	}
 521	bw += (u64)bdi->write_bandwidth * (period - elapsed);
 522	bw >>= ilog2(period);
 523
 524	/*
 525	 * one more level of smoothing, for filtering out sudden spikes
 526	 */
 527	if (avg > old && old >= (unsigned long)bw)
 528		avg -= (avg - old) >> 3;
 529
 530	if (avg < old && old <= (unsigned long)bw)
 531		avg += (old - avg) >> 3;
 532
 533out:
 534	bdi->write_bandwidth = bw;
 535	bdi->avg_write_bandwidth = avg;
 536}
 537
 538/*
 539 * The global dirtyable memory and dirty threshold could be suddenly knocked
 540 * down by a large amount (eg. on the startup of KVM in a swapless system).
 541 * This may throw the system into deep dirty exceeded state and throttle
 542 * heavy/light dirtiers alike. To retain good responsiveness, maintain
 543 * global_dirty_limit for tracking slowly down to the knocked down dirty
 544 * threshold.
 545 */
 546static void update_dirty_limit(unsigned long thresh, unsigned long dirty)
 547{
 548	unsigned long limit = global_dirty_limit;
 549
 550	/*
 551	 * Follow up in one step.
 552	 */
 553	if (limit < thresh) {
 554		limit = thresh;
 555		goto update;
 556	}
 557
 558	/*
 559	 * Follow down slowly. Use the higher one as the target, because thresh
 560	 * may drop below dirty. This is exactly the reason to introduce
 561	 * global_dirty_limit which is guaranteed to lie above the dirty pages.
 562	 */
 563	thresh = max(thresh, dirty);
 564	if (limit > thresh) {
 565		limit -= (limit - thresh) >> 5;
 566		goto update;
 567	}
 568	return;
 569update:
 570	global_dirty_limit = limit;
 571}
 572
 573static void global_update_bandwidth(unsigned long thresh,
 574				    unsigned long dirty,
 575				    unsigned long now)
 576{
 577	static DEFINE_SPINLOCK(dirty_lock);
 578	static unsigned long update_time;
 579
 580	/*
 581	 * check locklessly first to optimize away locking for the most time
 582	 */
 583	if (time_before(now, update_time + BANDWIDTH_INTERVAL))
 584		return;
 585
 586	spin_lock(&dirty_lock);
 587	if (time_after_eq(now, update_time + BANDWIDTH_INTERVAL)) {
 588		update_dirty_limit(thresh, dirty);
 589		update_time = now;
 590	}
 591	spin_unlock(&dirty_lock);
 592}
 593
 594void __bdi_update_bandwidth(struct backing_dev_info *bdi,
 595			    unsigned long thresh,
 596			    unsigned long dirty,
 597			    unsigned long bdi_thresh,
 598			    unsigned long bdi_dirty,
 599			    unsigned long start_time)
 600{
 601	unsigned long now = jiffies;
 602	unsigned long elapsed = now - bdi->bw_time_stamp;
 603	unsigned long written;
 604
 605	/*
 606	 * rate-limit, only update once every 200ms.
 607	 */
 608	if (elapsed < BANDWIDTH_INTERVAL)
 609		return;
 610
 611	written = percpu_counter_read(&bdi->bdi_stat[BDI_WRITTEN]);
 612
 613	/*
 614	 * Skip quiet periods when disk bandwidth is under-utilized.
 615	 * (at least 1s idle time between two flusher runs)
 616	 */
 617	if (elapsed > HZ && time_before(bdi->bw_time_stamp, start_time))
 618		goto snapshot;
 619
 620	if (thresh)
 621		global_update_bandwidth(thresh, dirty, now);
 622
 623	bdi_update_write_bandwidth(bdi, elapsed, written);
 624
 625snapshot:
 626	bdi->written_stamp = written;
 627	bdi->bw_time_stamp = now;
 628}
 629
 630static void bdi_update_bandwidth(struct backing_dev_info *bdi,
 631				 unsigned long thresh,
 632				 unsigned long dirty,
 633				 unsigned long bdi_thresh,
 634				 unsigned long bdi_dirty,
 635				 unsigned long start_time)
 636{
 637	if (time_is_after_eq_jiffies(bdi->bw_time_stamp + BANDWIDTH_INTERVAL))
 638		return;
 639	spin_lock(&bdi->wb.list_lock);
 640	__bdi_update_bandwidth(bdi, thresh, dirty, bdi_thresh, bdi_dirty,
 641			       start_time);
 642	spin_unlock(&bdi->wb.list_lock);
 643}
 644
 645/*
 646 * balance_dirty_pages() must be called by processes which are generating dirty
 647 * data.  It looks at the number of dirty pages in the machine and will force
 648 * the caller to perform writeback if the system is over `vm_dirty_ratio'.
 649 * If we're over `background_thresh' then the writeback threads are woken to
 650 * perform some writeout.
 651 */
 652static void balance_dirty_pages(struct address_space *mapping,
 653				unsigned long write_chunk)
 654{
 655	unsigned long nr_reclaimable, bdi_nr_reclaimable;
 656	unsigned long nr_dirty;  /* = file_dirty + writeback + unstable_nfs */
 657	unsigned long bdi_dirty;
 658	unsigned long background_thresh;
 659	unsigned long dirty_thresh;
 660	unsigned long bdi_thresh;
 661	unsigned long task_bdi_thresh;
 662	unsigned long min_task_bdi_thresh;
 663	unsigned long pages_written = 0;
 664	unsigned long pause = 1;
 665	bool dirty_exceeded = false;
 666	bool clear_dirty_exceeded = true;
 667	struct backing_dev_info *bdi = mapping->backing_dev_info;
 668	unsigned long start_time = jiffies;
 669
 670	for (;;) {
 671		nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
 672					global_page_state(NR_UNSTABLE_NFS);
 673		nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK);
 674
 675		global_dirty_limits(&background_thresh, &dirty_thresh);
 676
 677		/*
 678		 * Throttle it only when the background writeback cannot
 679		 * catch-up. This avoids (excessively) small writeouts
 680		 * when the bdi limits are ramping up.
 681		 */
 682		if (nr_dirty <= (background_thresh + dirty_thresh) / 2)
 683			break;
 684
 685		bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
 686		min_task_bdi_thresh = task_min_dirty_limit(bdi_thresh);
 687		task_bdi_thresh = task_dirty_limit(current, bdi_thresh);
 688
 689		/*
 690		 * In order to avoid the stacked BDI deadlock we need
 691		 * to ensure we accurately count the 'dirty' pages when
 692		 * the threshold is low.
 693		 *
 694		 * Otherwise it would be possible to get thresh+n pages
 695		 * reported dirty, even though there are thresh-m pages
 696		 * actually dirty; with m+n sitting in the percpu
 697		 * deltas.
 698		 */
 699		if (task_bdi_thresh < 2 * bdi_stat_error(bdi)) {
 700			bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
 701			bdi_dirty = bdi_nr_reclaimable +
 702				    bdi_stat_sum(bdi, BDI_WRITEBACK);
 703		} else {
 704			bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
 705			bdi_dirty = bdi_nr_reclaimable +
 706				    bdi_stat(bdi, BDI_WRITEBACK);
 707		}
 708
 709		/*
 710		 * The bdi thresh is somehow "soft" limit derived from the
 711		 * global "hard" limit. The former helps to prevent heavy IO
 712		 * bdi or process from holding back light ones; The latter is
 713		 * the last resort safeguard.
 714		 */
 715		dirty_exceeded = (bdi_dirty > task_bdi_thresh) ||
 716				  (nr_dirty > dirty_thresh);
 717		clear_dirty_exceeded = (bdi_dirty <= min_task_bdi_thresh) &&
 718					(nr_dirty <= dirty_thresh);
 719
 720		if (!dirty_exceeded)
 721			break;
 722
 723		if (!bdi->dirty_exceeded)
 724			bdi->dirty_exceeded = 1;
 725
 726		bdi_update_bandwidth(bdi, dirty_thresh, nr_dirty,
 727				     bdi_thresh, bdi_dirty, start_time);
 728
 729		/* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
 730		 * Unstable writes are a feature of certain networked
 731		 * filesystems (i.e. NFS) in which data may have been
 732		 * written to the server's write cache, but has not yet
 733		 * been flushed to permanent storage.
 734		 * Only move pages to writeback if this bdi is over its
 735		 * threshold otherwise wait until the disk writes catch
 736		 * up.
 737		 */
 738		trace_balance_dirty_start(bdi);
 739		if (bdi_nr_reclaimable > task_bdi_thresh) {
 740			pages_written += writeback_inodes_wb(&bdi->wb,
 741							     write_chunk);
 742			trace_balance_dirty_written(bdi, pages_written);
 743			if (pages_written >= write_chunk)
 744				break;		/* We've done our duty */
 745		}
 746		__set_current_state(TASK_UNINTERRUPTIBLE);
 747		io_schedule_timeout(pause);
 748		trace_balance_dirty_wait(bdi);
 749
 750		dirty_thresh = hard_dirty_limit(dirty_thresh);
 751		/*
 752		 * max-pause area. If dirty exceeded but still within this
 753		 * area, no need to sleep for more than 200ms: (a) 8 pages per
 754		 * 200ms is typically more than enough to curb heavy dirtiers;
 755		 * (b) the pause time limit makes the dirtiers more responsive.
 756		 */
 757		if (nr_dirty < dirty_thresh &&
 758		    bdi_dirty < (task_bdi_thresh + bdi_thresh) / 2 &&
 759		    time_after(jiffies, start_time + MAX_PAUSE))
 760			break;
 761
 762		/*
 763		 * Increase the delay for each loop, up to our previous
 764		 * default of taking a 100ms nap.
 765		 */
 766		pause <<= 1;
 767		if (pause > HZ / 10)
 768			pause = HZ / 10;
 769	}
 770
 771	/* Clear dirty_exceeded flag only when no task can exceed the limit */
 772	if (clear_dirty_exceeded && bdi->dirty_exceeded)
 773		bdi->dirty_exceeded = 0;
 774
 775	if (writeback_in_progress(bdi))
 776		return;
 777
 778	/*
 779	 * In laptop mode, we wait until hitting the higher threshold before
 780	 * starting background writeout, and then write out all the way down
 781	 * to the lower threshold.  So slow writers cause minimal disk activity.
 782	 *
 783	 * In normal mode, we start background writeout at the lower
 784	 * background_thresh, to keep the amount of dirty memory low.
 785	 */
 786	if ((laptop_mode && pages_written) ||
 787	    (!laptop_mode && (nr_reclaimable > background_thresh)))
 788		bdi_start_background_writeback(bdi);
 789}
 790
 791void set_page_dirty_balance(struct page *page, int page_mkwrite)
 792{
 793	if (set_page_dirty(page) || page_mkwrite) {
 794		struct address_space *mapping = page_mapping(page);
 795
 796		if (mapping)
 797			balance_dirty_pages_ratelimited(mapping);
 798	}
 799}
 800
 801static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0;
 802
 803/**
 804 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
 805 * @mapping: address_space which was dirtied
 806 * @nr_pages_dirtied: number of pages which the caller has just dirtied
 807 *
 808 * Processes which are dirtying memory should call in here once for each page
 809 * which was newly dirtied.  The function will periodically check the system's
 810 * dirty state and will initiate writeback if needed.
 811 *
 812 * On really big machines, get_writeback_state is expensive, so try to avoid
 813 * calling it too often (ratelimiting).  But once we're over the dirty memory
 814 * limit we decrease the ratelimiting by a lot, to prevent individual processes
 815 * from overshooting the limit by (ratelimit_pages) each.
 816 */
 817void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
 818					unsigned long nr_pages_dirtied)
 819{
 820	struct backing_dev_info *bdi = mapping->backing_dev_info;
 821	unsigned long ratelimit;
 822	unsigned long *p;
 823
 824	if (!bdi_cap_account_dirty(bdi))
 825		return;
 826
 827	ratelimit = ratelimit_pages;
 828	if (mapping->backing_dev_info->dirty_exceeded)
 829		ratelimit = 8;
 830
 831	/*
 832	 * Check the rate limiting. Also, we do not want to throttle real-time
 833	 * tasks in balance_dirty_pages(). Period.
 834	 */
 835	preempt_disable();
 836	p =  &__get_cpu_var(bdp_ratelimits);
 837	*p += nr_pages_dirtied;
 838	if (unlikely(*p >= ratelimit)) {
 839		ratelimit = sync_writeback_pages(*p);
 840		*p = 0;
 841		preempt_enable();
 842		balance_dirty_pages(mapping, ratelimit);
 843		return;
 844	}
 845	preempt_enable();
 846}
 847EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
 848
 849void throttle_vm_writeout(gfp_t gfp_mask)
 850{
 851	unsigned long background_thresh;
 852	unsigned long dirty_thresh;
 853
 854        for ( ; ; ) {
 855		global_dirty_limits(&background_thresh, &dirty_thresh);
 856
 857                /*
 858                 * Boost the allowable dirty threshold a bit for page
 859                 * allocators so they don't get DoS'ed by heavy writers
 860                 */
 861                dirty_thresh += dirty_thresh / 10;      /* wheeee... */
 862
 863                if (global_page_state(NR_UNSTABLE_NFS) +
 864			global_page_state(NR_WRITEBACK) <= dirty_thresh)
 865                        	break;
 866                congestion_wait(BLK_RW_ASYNC, HZ/10);
 867
 868		/*
 869		 * The caller might hold locks which can prevent IO completion
 870		 * or progress in the filesystem.  So we cannot just sit here
 871		 * waiting for IO to complete.
 872		 */
 873		if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
 874			break;
 875        }
 876}
 877
 878/*
 879 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
 880 */
 881int dirty_writeback_centisecs_handler(ctl_table *table, int write,
 882	void __user *buffer, size_t *length, loff_t *ppos)
 883{
 884	proc_dointvec(table, write, buffer, length, ppos);
 885	bdi_arm_supers_timer();
 886	return 0;
 887}
 888
 889#ifdef CONFIG_BLOCK
 890void laptop_mode_timer_fn(unsigned long data)
 891{
 892	struct request_queue *q = (struct request_queue *)data;
 893	int nr_pages = global_page_state(NR_FILE_DIRTY) +
 894		global_page_state(NR_UNSTABLE_NFS);
 895
 896	/*
 897	 * We want to write everything out, not just down to the dirty
 898	 * threshold
 899	 */
 900	if (bdi_has_dirty_io(&q->backing_dev_info))
 901		bdi_start_writeback(&q->backing_dev_info, nr_pages);
 902}
 903
 904/*
 905 * We've spun up the disk and we're in laptop mode: schedule writeback
 906 * of all dirty data a few seconds from now.  If the flush is already scheduled
 907 * then push it back - the user is still using the disk.
 908 */
 909void laptop_io_completion(struct backing_dev_info *info)
 910{
 911	mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
 912}
 913
 914/*
 915 * We're in laptop mode and we've just synced. The sync's writes will have
 916 * caused another writeback to be scheduled by laptop_io_completion.
 917 * Nothing needs to be written back anymore, so we unschedule the writeback.
 918 */
 919void laptop_sync_completion(void)
 920{
 921	struct backing_dev_info *bdi;
 922
 923	rcu_read_lock();
 924
 925	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
 926		del_timer(&bdi->laptop_mode_wb_timer);
 927
 928	rcu_read_unlock();
 929}
 930#endif
 931
 932/*
 933 * If ratelimit_pages is too high then we can get into dirty-data overload
 934 * if a large number of processes all perform writes at the same time.
 935 * If it is too low then SMP machines will call the (expensive)
 936 * get_writeback_state too often.
 937 *
 938 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
 939 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
 940 * thresholds before writeback cuts in.
 941 *
 942 * But the limit should not be set too high.  Because it also controls the
 943 * amount of memory which the balance_dirty_pages() caller has to write back.
 944 * If this is too large then the caller will block on the IO queue all the
 945 * time.  So limit it to four megabytes - the balance_dirty_pages() caller
 946 * will write six megabyte chunks, max.
 947 */
 948
 949void writeback_set_ratelimit(void)
 950{
 951	ratelimit_pages = vm_total_pages / (num_online_cpus() * 32);
 952	if (ratelimit_pages < 16)
 953		ratelimit_pages = 16;
 954	if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
 955		ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
 956}
 957
 958static int __cpuinit
 959ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
 960{
 961	writeback_set_ratelimit();
 962	return NOTIFY_DONE;
 963}
 964
 965static struct notifier_block __cpuinitdata ratelimit_nb = {
 966	.notifier_call	= ratelimit_handler,
 967	.next		= NULL,
 968};
 969
 970/*
 971 * Called early on to tune the page writeback dirty limits.
 972 *
 973 * We used to scale dirty pages according to how total memory
 974 * related to pages that could be allocated for buffers (by
 975 * comparing nr_free_buffer_pages() to vm_total_pages.
 976 *
 977 * However, that was when we used "dirty_ratio" to scale with
 978 * all memory, and we don't do that any more. "dirty_ratio"
 979 * is now applied to total non-HIGHPAGE memory (by subtracting
 980 * totalhigh_pages from vm_total_pages), and as such we can't
 981 * get into the old insane situation any more where we had
 982 * large amounts of dirty pages compared to a small amount of
 983 * non-HIGHMEM memory.
 984 *
 985 * But we might still want to scale the dirty_ratio by how
 986 * much memory the box has..
 987 */
 988void __init page_writeback_init(void)
 989{
 990	int shift;
 991
 992	writeback_set_ratelimit();
 993	register_cpu_notifier(&ratelimit_nb);
 994
 995	shift = calc_period_shift();
 996	prop_descriptor_init(&vm_completions, shift);
 997	prop_descriptor_init(&vm_dirties, shift);
 998}
 999
1000/**
1001 * tag_pages_for_writeback - tag pages to be written by write_cache_pages
1002 * @mapping: address space structure to write
1003 * @start: starting page index
1004 * @end: ending page index (inclusive)
1005 *
1006 * This function scans the page range from @start to @end (inclusive) and tags
1007 * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is
1008 * that write_cache_pages (or whoever calls this function) will then use
1009 * TOWRITE tag to identify pages eligible for writeback.  This mechanism is
1010 * used to avoid livelocking of writeback by a process steadily creating new
1011 * dirty pages in the file (thus it is important for this function to be quick
1012 * so that it can tag pages faster than a dirtying process can create them).
1013 */
1014/*
1015 * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency.
1016 */
1017void tag_pages_for_writeback(struct address_space *mapping,
1018			     pgoff_t start, pgoff_t end)
1019{
1020#define WRITEBACK_TAG_BATCH 4096
1021	unsigned long tagged;
1022
1023	do {
1024		spin_lock_irq(&mapping->tree_lock);
1025		tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree,
1026				&start, end, WRITEBACK_TAG_BATCH,
1027				PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE);
1028		spin_unlock_irq(&mapping->tree_lock);
1029		WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);
1030		cond_resched();
1031		/* We check 'start' to handle wrapping when end == ~0UL */
1032	} while (tagged >= WRITEBACK_TAG_BATCH && start);
1033}
1034EXPORT_SYMBOL(tag_pages_for_writeback);
1035
1036/**
1037 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
1038 * @mapping: address space structure to write
1039 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1040 * @writepage: function called for each page
1041 * @data: data passed to writepage function
1042 *
1043 * If a page is already under I/O, write_cache_pages() skips it, even
1044 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
1045 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
1046 * and msync() need to guarantee that all the data which was dirty at the time
1047 * the call was made get new I/O started against them.  If wbc->sync_mode is
1048 * WB_SYNC_ALL then we were called for data integrity and we must wait for
1049 * existing IO to complete.
1050 *
1051 * To avoid livelocks (when other process dirties new pages), we first tag
1052 * pages which should be written back with TOWRITE tag and only then start
1053 * writing them. For data-integrity sync we have to be careful so that we do
1054 * not miss some pages (e.g., because some other process has cleared TOWRITE
1055 * tag we set). The rule we follow is that TOWRITE tag can be cleared only
1056 * by the process clearing the DIRTY tag (and submitting the page for IO).
1057 */
1058int write_cache_pages(struct address_space *mapping,
1059		      struct writeback_control *wbc, writepage_t writepage,
1060		      void *data)
1061{
1062	int ret = 0;
1063	int done = 0;
1064	struct pagevec pvec;
1065	int nr_pages;
1066	pgoff_t uninitialized_var(writeback_index);
1067	pgoff_t index;
1068	pgoff_t end;		/* Inclusive */
1069	pgoff_t done_index;
1070	int cycled;
1071	int range_whole = 0;
1072	int tag;
1073
1074	pagevec_init(&pvec, 0);
1075	if (wbc->range_cyclic) {
1076		writeback_index = mapping->writeback_index; /* prev offset */
1077		index = writeback_index;
1078		if (index == 0)
1079			cycled = 1;
1080		else
1081			cycled = 0;
1082		end = -1;
1083	} else {
1084		index = wbc->range_start >> PAGE_CACHE_SHIFT;
1085		end = wbc->range_end >> PAGE_CACHE_SHIFT;
1086		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1087			range_whole = 1;
1088		cycled = 1; /* ignore range_cyclic tests */
1089	}
1090	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1091		tag = PAGECACHE_TAG_TOWRITE;
1092	else
1093		tag = PAGECACHE_TAG_DIRTY;
1094retry:
1095	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1096		tag_pages_for_writeback(mapping, index, end);
1097	done_index = index;
1098	while (!done && (index <= end)) {
1099		int i;
1100
1101		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
1102			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1103		if (nr_pages == 0)
1104			break;
1105
1106		for (i = 0; i < nr_pages; i++) {
1107			struct page *page = pvec.pages[i];
1108
1109			/*
1110			 * At this point, the page may be truncated or
1111			 * invalidated (changing page->mapping to NULL), or
1112			 * even swizzled back from swapper_space to tmpfs file
1113			 * mapping. However, page->index will not change
1114			 * because we have a reference on the page.
1115			 */
1116			if (page->index > end) {
1117				/*
1118				 * can't be range_cyclic (1st pass) because
1119				 * end == -1 in that case.
1120				 */
1121				done = 1;
1122				break;
1123			}
1124
1125			done_index = page->index;
1126
1127			lock_page(page);
1128
1129			/*
1130			 * Page truncated or invalidated. We can freely skip it
1131			 * then, even for data integrity operations: the page
1132			 * has disappeared concurrently, so there could be no
1133			 * real expectation of this data interity operation
1134			 * even if there is now a new, dirty page at the same
1135			 * pagecache address.
1136			 */
1137			if (unlikely(page->mapping != mapping)) {
1138continue_unlock:
1139				unlock_page(page);
1140				continue;
1141			}
1142
1143			if (!PageDirty(page)) {
1144				/* someone wrote it for us */
1145				goto continue_unlock;
1146			}
1147
1148			if (PageWriteback(page)) {
1149				if (wbc->sync_mode != WB_SYNC_NONE)
1150					wait_on_page_writeback(page);
1151				else
1152					goto continue_unlock;
1153			}
1154
1155			BUG_ON(PageWriteback(page));
1156			if (!clear_page_dirty_for_io(page))
1157				goto continue_unlock;
1158
1159			trace_wbc_writepage(wbc, mapping->backing_dev_info);
1160			ret = (*writepage)(page, wbc, data);
1161			if (unlikely(ret)) {
1162				if (ret == AOP_WRITEPAGE_ACTIVATE) {
1163					unlock_page(page);
1164					ret = 0;
1165				} else {
1166					/*
1167					 * done_index is set past this page,
1168					 * so media errors will not choke
1169					 * background writeout for the entire
1170					 * file. This has consequences for
1171					 * range_cyclic semantics (ie. it may
1172					 * not be suitable for data integrity
1173					 * writeout).
1174					 */
1175					done_index = page->index + 1;
1176					done = 1;
1177					break;
1178				}
1179			}
1180
1181			/*
1182			 * We stop writing back only if we are not doing
1183			 * integrity sync. In case of integrity sync we have to
1184			 * keep going until we have written all the pages
1185			 * we tagged for writeback prior to entering this loop.
1186			 */
1187			if (--wbc->nr_to_write <= 0 &&
1188			    wbc->sync_mode == WB_SYNC_NONE) {
1189				done = 1;
1190				break;
1191			}
1192		}
1193		pagevec_release(&pvec);
1194		cond_resched();
1195	}
1196	if (!cycled && !done) {
1197		/*
1198		 * range_cyclic:
1199		 * We hit the last page and there is more work to be done: wrap
1200		 * back to the start of the file
1201		 */
1202		cycled = 1;
1203		index = 0;
1204		end = writeback_index - 1;
1205		goto retry;
1206	}
1207	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1208		mapping->writeback_index = done_index;
1209
1210	return ret;
1211}
1212EXPORT_SYMBOL(write_cache_pages);
1213
1214/*
1215 * Function used by generic_writepages to call the real writepage
1216 * function and set the mapping flags on error
1217 */
1218static int __writepage(struct page *page, struct writeback_control *wbc,
1219		       void *data)
1220{
1221	struct address_space *mapping = data;
1222	int ret = mapping->a_ops->writepage(page, wbc);
1223	mapping_set_error(mapping, ret);
1224	return ret;
1225}
1226
1227/**
1228 * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
1229 * @mapping: address space structure to write
1230 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1231 *
1232 * This is a library function, which implements the writepages()
1233 * address_space_operation.
1234 */
1235int generic_writepages(struct address_space *mapping,
1236		       struct writeback_control *wbc)
1237{
1238	struct blk_plug plug;
1239	int ret;
1240
1241	/* deal with chardevs and other special file */
1242	if (!mapping->a_ops->writepage)
1243		return 0;
1244
1245	blk_start_plug(&plug);
1246	ret = write_cache_pages(mapping, wbc, __writepage, mapping);
1247	blk_finish_plug(&plug);
1248	return ret;
1249}
1250
1251EXPORT_SYMBOL(generic_writepages);
1252
1253int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
1254{
1255	int ret;
1256
1257	if (wbc->nr_to_write <= 0)
1258		return 0;
1259	if (mapping->a_ops->writepages)
1260		ret = mapping->a_ops->writepages(mapping, wbc);
1261	else
1262		ret = generic_writepages(mapping, wbc);
1263	return ret;
1264}
1265
1266/**
1267 * write_one_page - write out a single page and optionally wait on I/O
1268 * @page: the page to write
1269 * @wait: if true, wait on writeout
1270 *
1271 * The page must be locked by the caller and will be unlocked upon return.
1272 *
1273 * write_one_page() returns a negative error code if I/O failed.
1274 */
1275int write_one_page(struct page *page, int wait)
1276{
1277	struct address_space *mapping = page->mapping;
1278	int ret = 0;
1279	struct writeback_control wbc = {
1280		.sync_mode = WB_SYNC_ALL,
1281		.nr_to_write = 1,
1282	};
1283
1284	BUG_ON(!PageLocked(page));
1285
1286	if (wait)
1287		wait_on_page_writeback(page);
1288
1289	if (clear_page_dirty_for_io(page)) {
1290		page_cache_get(page);
1291		ret = mapping->a_ops->writepage(page, &wbc);
1292		if (ret == 0 && wait) {
1293			wait_on_page_writeback(page);
1294			if (PageError(page))
1295				ret = -EIO;
1296		}
1297		page_cache_release(page);
1298	} else {
1299		unlock_page(page);
1300	}
1301	return ret;
1302}
1303EXPORT_SYMBOL(write_one_page);
1304
1305/*
1306 * For address_spaces which do not use buffers nor write back.
1307 */
1308int __set_page_dirty_no_writeback(struct page *page)
1309{
1310	if (!PageDirty(page))
1311		return !TestSetPageDirty(page);
1312	return 0;
1313}
1314
1315/*
1316 * Helper function for set_page_dirty family.
1317 * NOTE: This relies on being atomic wrt interrupts.
1318 */
1319void account_page_dirtied(struct page *page, struct address_space *mapping)
1320{
1321	if (mapping_cap_account_dirty(mapping)) {
1322		__inc_zone_page_state(page, NR_FILE_DIRTY);
1323		__inc_zone_page_state(page, NR_DIRTIED);
1324		__inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
1325		task_dirty_inc(current);
1326		task_io_account_write(PAGE_CACHE_SIZE);
1327	}
1328}
1329EXPORT_SYMBOL(account_page_dirtied);
1330
1331/*
1332 * Helper function for set_page_writeback family.
1333 * NOTE: Unlike account_page_dirtied this does not rely on being atomic
1334 * wrt interrupts.
1335 */
1336void account_page_writeback(struct page *page)
1337{
1338	inc_zone_page_state(page, NR_WRITEBACK);
1339}
1340EXPORT_SYMBOL(account_page_writeback);
1341
1342/*
1343 * For address_spaces which do not use buffers.  Just tag the page as dirty in
1344 * its radix tree.
1345 *
1346 * This is also used when a single buffer is being dirtied: we want to set the
1347 * page dirty in that case, but not all the buffers.  This is a "bottom-up"
1348 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
1349 *
1350 * Most callers have locked the page, which pins the address_space in memory.
1351 * But zap_pte_range() does not lock the page, however in that case the
1352 * mapping is pinned by the vma's ->vm_file reference.
1353 *
1354 * We take care to handle the case where the page was truncated from the
1355 * mapping by re-checking page_mapping() inside tree_lock.
1356 */
1357int __set_page_dirty_nobuffers(struct page *page)
1358{
1359	if (!TestSetPageDirty(page)) {
1360		struct address_space *mapping = page_mapping(page);
1361		struct address_space *mapping2;
1362
1363		if (!mapping)
1364			return 1;
1365
1366		spin_lock_irq(&mapping->tree_lock);
1367		mapping2 = page_mapping(page);
1368		if (mapping2) { /* Race with truncate? */
1369			BUG_ON(mapping2 != mapping);
1370			WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
1371			account_page_dirtied(page, mapping);
1372			radix_tree_tag_set(&mapping->page_tree,
1373				page_index(page), PAGECACHE_TAG_DIRTY);
1374		}
1375		spin_unlock_irq(&mapping->tree_lock);
1376		if (mapping->host) {
1377			/* !PageAnon && !swapper_space */
1378			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1379		}
1380		return 1;
1381	}
1382	return 0;
1383}
1384EXPORT_SYMBOL(__set_page_dirty_nobuffers);
1385
1386/*
1387 * When a writepage implementation decides that it doesn't want to write this
1388 * page for some reason, it should redirty the locked page via
1389 * redirty_page_for_writepage() and it should then unlock the page and return 0
1390 */
1391int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
1392{
1393	wbc->pages_skipped++;
1394	return __set_page_dirty_nobuffers(page);
1395}
1396EXPORT_SYMBOL(redirty_page_for_writepage);
1397
1398/*
1399 * Dirty a page.
1400 *
1401 * For pages with a mapping this should be done under the page lock
1402 * for the benefit of asynchronous memory errors who prefer a consistent
1403 * dirty state. This rule can be broken in some special cases,
1404 * but should be better not to.
1405 *
1406 * If the mapping doesn't provide a set_page_dirty a_op, then
1407 * just fall through and assume that it wants buffer_heads.
1408 */
1409int set_page_dirty(struct page *page)
1410{
1411	struct address_space *mapping = page_mapping(page);
1412
1413	if (likely(mapping)) {
1414		int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
1415		/*
1416		 * readahead/lru_deactivate_page could remain
1417		 * PG_readahead/PG_reclaim due to race with end_page_writeback
1418		 * About readahead, if the page is written, the flags would be
1419		 * reset. So no problem.
1420		 * About lru_deactivate_page, if the page is redirty, the flag
1421		 * will be reset. So no problem. but if the page is used by readahead
1422		 * it will confuse readahead and make it restart the size rampup
1423		 * process. But it's a trivial problem.
1424		 */
1425		ClearPageReclaim(page);
1426#ifdef CONFIG_BLOCK
1427		if (!spd)
1428			spd = __set_page_dirty_buffers;
1429#endif
1430		return (*spd)(page);
1431	}
1432	if (!PageDirty(page)) {
1433		if (!TestSetPageDirty(page))
1434			return 1;
1435	}
1436	return 0;
1437}
1438EXPORT_SYMBOL(set_page_dirty);
1439
1440/*
1441 * set_page_dirty() is racy if the caller has no reference against
1442 * page->mapping->host, and if the page is unlocked.  This is because another
1443 * CPU could truncate the page off the mapping and then free the mapping.
1444 *
1445 * Usually, the page _is_ locked, or the caller is a user-space process which
1446 * holds a reference on the inode by having an open file.
1447 *
1448 * In other cases, the page should be locked before running set_page_dirty().
1449 */
1450int set_page_dirty_lock(struct page *page)
1451{
1452	int ret;
1453
1454	lock_page(page);
1455	ret = set_page_dirty(page);
1456	unlock_page(page);
1457	return ret;
1458}
1459EXPORT_SYMBOL(set_page_dirty_lock);
1460
1461/*
1462 * Clear a page's dirty flag, while caring for dirty memory accounting.
1463 * Returns true if the page was previously dirty.
1464 *
1465 * This is for preparing to put the page under writeout.  We leave the page
1466 * tagged as dirty in the radix tree so that a concurrent write-for-sync
1467 * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage
1468 * implementation will run either set_page_writeback() or set_page_dirty(),
1469 * at which stage we bring the page's dirty flag and radix-tree dirty tag
1470 * back into sync.
1471 *
1472 * This incoherency between the page's dirty flag and radix-tree tag is
1473 * unfortunate, but it only exists while the page is locked.
1474 */
1475int clear_page_dirty_for_io(struct page *page)
1476{
1477	struct address_space *mapping = page_mapping(page);
1478
1479	BUG_ON(!PageLocked(page));
1480
1481	if (mapping && mapping_cap_account_dirty(mapping)) {
1482		/*
1483		 * Yes, Virginia, this is indeed insane.
1484		 *
1485		 * We use this sequence to make sure that
1486		 *  (a) we account for dirty stats properly
1487		 *  (b) we tell the low-level filesystem to
1488		 *      mark the whole page dirty if it was
1489		 *      dirty in a pagetable. Only to then
1490		 *  (c) clean the page again and return 1 to
1491		 *      cause the writeback.
1492		 *
1493		 * This way we avoid all nasty races with the
1494		 * dirty bit in multiple places and clearing
1495		 * them concurrently from different threads.
1496		 *
1497		 * Note! Normally the "set_page_dirty(page)"
1498		 * has no effect on the actual dirty bit - since
1499		 * that will already usually be set. But we
1500		 * need the side effects, and it can help us
1501		 * avoid races.
1502		 *
1503		 * We basically use the page "master dirty bit"
1504		 * as a serialization point for all the different
1505		 * threads doing their things.
1506		 */
1507		if (page_mkclean(page))
1508			set_page_dirty(page);
1509		/*
1510		 * We carefully synchronise fault handlers against
1511		 * installing a dirty pte and marking the page dirty
1512		 * at this point. We do this by having them hold the
1513		 * page lock at some point after installing their
1514		 * pte, but before marking the page dirty.
1515		 * Pages are always locked coming in here, so we get
1516		 * the desired exclusion. See mm/memory.c:do_wp_page()
1517		 * for more comments.
1518		 */
1519		if (TestClearPageDirty(page)) {
1520			dec_zone_page_state(page, NR_FILE_DIRTY);
1521			dec_bdi_stat(mapping->backing_dev_info,
1522					BDI_RECLAIMABLE);
1523			return 1;
1524		}
1525		return 0;
1526	}
1527	return TestClearPageDirty(page);
1528}
1529EXPORT_SYMBOL(clear_page_dirty_for_io);
1530
1531int test_clear_page_writeback(struct page *page)
1532{
1533	struct address_space *mapping = page_mapping(page);
1534	int ret;
1535
1536	if (mapping) {
1537		struct backing_dev_info *bdi = mapping->backing_dev_info;
1538		unsigned long flags;
1539
1540		spin_lock_irqsave(&mapping->tree_lock, flags);
1541		ret = TestClearPageWriteback(page);
1542		if (ret) {
1543			radix_tree_tag_clear(&mapping->page_tree,
1544						page_index(page),
1545						PAGECACHE_TAG_WRITEBACK);
1546			if (bdi_cap_account_writeback(bdi)) {
1547				__dec_bdi_stat(bdi, BDI_WRITEBACK);
1548				__bdi_writeout_inc(bdi);
1549			}
1550		}
1551		spin_unlock_irqrestore(&mapping->tree_lock, flags);
1552	} else {
1553		ret = TestClearPageWriteback(page);
1554	}
1555	if (ret) {
1556		dec_zone_page_state(page, NR_WRITEBACK);
1557		inc_zone_page_state(page, NR_WRITTEN);
1558	}
1559	return ret;
1560}
1561
1562int test_set_page_writeback(struct page *page)
1563{
1564	struct address_space *mapping = page_mapping(page);
1565	int ret;
1566
1567	if (mapping) {
1568		struct backing_dev_info *bdi = mapping->backing_dev_info;
1569		unsigned long flags;
1570
1571		spin_lock_irqsave(&mapping->tree_lock, flags);
1572		ret = TestSetPageWriteback(page);
1573		if (!ret) {
1574			radix_tree_tag_set(&mapping->page_tree,
1575						page_index(page),
1576						PAGECACHE_TAG_WRITEBACK);
1577			if (bdi_cap_account_writeback(bdi))
1578				__inc_bdi_stat(bdi, BDI_WRITEBACK);
1579		}
1580		if (!PageDirty(page))
1581			radix_tree_tag_clear(&mapping->page_tree,
1582						page_index(page),
1583						PAGECACHE_TAG_DIRTY);
1584		radix_tree_tag_clear(&mapping->page_tree,
1585				     page_index(page),
1586				     PAGECACHE_TAG_TOWRITE);
1587		spin_unlock_irqrestore(&mapping->tree_lock, flags);
1588	} else {
1589		ret = TestSetPageWriteback(page);
1590	}
1591	if (!ret)
1592		account_page_writeback(page);
1593	return ret;
1594
1595}
1596EXPORT_SYMBOL(test_set_page_writeback);
1597
1598/*
1599 * Return true if any of the pages in the mapping are marked with the
1600 * passed tag.
1601 */
1602int mapping_tagged(struct address_space *mapping, int tag)
1603{
1604	return radix_tree_tagged(&mapping->page_tree, tag);
1605}
1606EXPORT_SYMBOL(mapping_tagged);