Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * mm/page-writeback.c
   3 *
   4 * Copyright (C) 2002, Linus Torvalds.
   5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
   6 *
   7 * Contains functions related to writing back dirty pages at the
   8 * address_space level.
   9 *
  10 * 10Apr2002	Andrew Morton
  11 *		Initial version
  12 */
  13
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/spinlock.h>
  17#include <linux/fs.h>
  18#include <linux/mm.h>
  19#include <linux/swap.h>
  20#include <linux/slab.h>
  21#include <linux/pagemap.h>
  22#include <linux/writeback.h>
  23#include <linux/init.h>
  24#include <linux/backing-dev.h>
  25#include <linux/task_io_accounting_ops.h>
  26#include <linux/blkdev.h>
  27#include <linux/mpage.h>
  28#include <linux/rmap.h>
  29#include <linux/percpu.h>
  30#include <linux/notifier.h>
  31#include <linux/smp.h>
  32#include <linux/sysctl.h>
  33#include <linux/cpu.h>
  34#include <linux/syscalls.h>
  35#include <linux/buffer_head.h>
  36#include <linux/pagevec.h>
 
 
 
  37#include <trace/events/writeback.h>
  38
 
 
  39/*
  40 * Sleep at most 200ms at a time in balance_dirty_pages().
  41 */
  42#define MAX_PAUSE		max(HZ/5, 1)
  43
  44/*
 
 
 
 
 
 
  45 * Estimate write bandwidth at 200ms intervals.
  46 */
  47#define BANDWIDTH_INTERVAL	max(HZ/5, 1)
  48
 
 
  49/*
  50 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
  51 * will look to see if it needs to force writeback or throttling.
  52 */
  53static long ratelimit_pages = 32;
  54
  55/*
  56 * When balance_dirty_pages decides that the caller needs to perform some
  57 * non-background writeback, this is how many pages it will attempt to write.
  58 * It should be somewhat larger than dirtied pages to ensure that reasonably
  59 * large amounts of I/O are submitted.
  60 */
  61static inline long sync_writeback_pages(unsigned long dirtied)
  62{
  63	if (dirtied < ratelimit_pages)
  64		dirtied = ratelimit_pages;
  65
  66	return dirtied + dirtied / 2;
  67}
  68
  69/* The following parameters are exported via /proc/sys/vm */
  70
  71/*
  72 * Start background writeback (via writeback threads) at this percentage
  73 */
  74int dirty_background_ratio = 10;
  75
  76/*
  77 * dirty_background_bytes starts at 0 (disabled) so that it is a function of
  78 * dirty_background_ratio * the amount of dirtyable memory
  79 */
  80unsigned long dirty_background_bytes;
  81
  82/*
  83 * free highmem will not be subtracted from the total free memory
  84 * for calculating free ratios if vm_highmem_is_dirtyable is true
  85 */
  86int vm_highmem_is_dirtyable;
  87
  88/*
  89 * The generator of dirty data starts writeback at this percentage
  90 */
  91int vm_dirty_ratio = 20;
  92
  93/*
  94 * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
  95 * vm_dirty_ratio * the amount of dirtyable memory
  96 */
  97unsigned long vm_dirty_bytes;
  98
  99/*
 100 * The interval between `kupdate'-style writebacks
 101 */
 102unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
 103
 
 
 104/*
 105 * The longest time for which data is allowed to remain dirty
 106 */
 107unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
 108
 109/*
 110 * Flag that makes the machine dump writes/reads and block dirtyings.
 111 */
 112int block_dump;
 113
 114/*
 115 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
 116 * a full sync is triggered after this time elapses without any disk activity.
 117 */
 118int laptop_mode;
 119
 120EXPORT_SYMBOL(laptop_mode);
 121
 122/* End of sysctl-exported parameters */
 123
 124unsigned long global_dirty_limit;
 125
 126/*
 127 * Scale the writeback cache size proportional to the relative writeout speeds.
 128 *
 129 * We do this by keeping a floating proportion between BDIs, based on page
 130 * writeback completions [end_page_writeback()]. Those devices that write out
 131 * pages fastest will get the larger share, while the slower will get a smaller
 132 * share.
 133 *
 134 * We use page writeout completions because we are interested in getting rid of
 135 * dirty pages. Having them written out is the primary goal.
 136 *
 137 * We introduce a concept of time, a period over which we measure these events,
 138 * because demand can/will vary over time. The length of this period itself is
 139 * measured in page writeback completions.
 140 *
 141 */
 142static struct prop_descriptor vm_completions;
 143static struct prop_descriptor vm_dirties;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 144
 145/*
 146 * couple the period to the dirty_ratio:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 147 *
 148 *   period/2 ~ roundup_pow_of_two(dirty limit)
 
 149 */
 150static int calc_period_shift(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 151{
 152	unsigned long dirty_total;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 153
 154	if (vm_dirty_bytes)
 155		dirty_total = vm_dirty_bytes / PAGE_SIZE;
 156	else
 157		dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) /
 158				100;
 159	return 2 + ilog2(dirty_total - 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 160}
 161
 162/*
 163 * update the period when the dirty threshold changes.
 
 
 
 
 164 */
 165static void update_completion_period(void)
 166{
 167	int shift = calc_period_shift();
 168	prop_change_shift(&vm_completions, shift);
 169	prop_change_shift(&vm_dirties, shift);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 170}
 171
 172int dirty_background_ratio_handler(struct ctl_table *table, int write,
 173		void __user *buffer, size_t *lenp,
 174		loff_t *ppos)
 175{
 176	int ret;
 177
 178	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 179	if (ret == 0 && write)
 180		dirty_background_bytes = 0;
 181	return ret;
 182}
 183
 184int dirty_background_bytes_handler(struct ctl_table *table, int write,
 185		void __user *buffer, size_t *lenp,
 186		loff_t *ppos)
 187{
 188	int ret;
 189
 190	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 191	if (ret == 0 && write)
 192		dirty_background_ratio = 0;
 193	return ret;
 194}
 195
 196int dirty_ratio_handler(struct ctl_table *table, int write,
 197		void __user *buffer, size_t *lenp,
 198		loff_t *ppos)
 199{
 200	int old_ratio = vm_dirty_ratio;
 201	int ret;
 202
 203	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 204	if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
 205		update_completion_period();
 206		vm_dirty_bytes = 0;
 207	}
 208	return ret;
 209}
 210
 211
 212int dirty_bytes_handler(struct ctl_table *table, int write,
 213		void __user *buffer, size_t *lenp,
 214		loff_t *ppos)
 215{
 216	unsigned long old_bytes = vm_dirty_bytes;
 217	int ret;
 218
 219	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 220	if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
 221		update_completion_period();
 222		vm_dirty_ratio = 0;
 223	}
 224	return ret;
 225}
 226
 
 
 
 
 
 
 
 
 
 227/*
 228 * Increment the BDI's writeout completion count and the global writeout
 229 * completion count. Called from test_clear_page_writeback().
 230 */
 231static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
 232{
 233	__inc_bdi_stat(bdi, BDI_WRITTEN);
 234	__prop_inc_percpu_max(&vm_completions, &bdi->completions,
 235			      bdi->max_prop_frac);
 
 
 
 
 
 
 
 
 
 
 
 236}
 237
 238void bdi_writeout_inc(struct backing_dev_info *bdi)
 239{
 240	unsigned long flags;
 241
 242	local_irq_save(flags);
 243	__bdi_writeout_inc(bdi);
 244	local_irq_restore(flags);
 245}
 246EXPORT_SYMBOL_GPL(bdi_writeout_inc);
 247
 248void task_dirty_inc(struct task_struct *tsk)
 249{
 250	prop_inc_single(&vm_dirties, &tsk->dirties);
 251}
 252
 253/*
 254 * Obtain an accurate fraction of the BDI's portion.
 255 */
 256static void bdi_writeout_fraction(struct backing_dev_info *bdi,
 257		long *numerator, long *denominator)
 258{
 259	prop_fraction_percpu(&vm_completions, &bdi->completions,
 260				numerator, denominator);
 261}
 262
 263static inline void task_dirties_fraction(struct task_struct *tsk,
 264		long *numerator, long *denominator)
 265{
 266	prop_fraction_single(&vm_dirties, &tsk->dirties,
 267				numerator, denominator);
 268}
 269
 270/*
 271 * task_dirty_limit - scale down dirty throttling threshold for one task
 272 *
 273 * task specific dirty limit:
 274 *
 275 *   dirty -= (dirty/8) * p_{t}
 276 *
 277 * To protect light/slow dirtying tasks from heavier/fast ones, we start
 278 * throttling individual tasks before reaching the bdi dirty limit.
 279 * Relatively low thresholds will be allocated to heavy dirtiers. So when
 280 * dirty pages grow large, heavy dirtiers will be throttled first, which will
 281 * effectively curb the growth of dirty pages. Light dirtiers with high enough
 282 * dirty threshold may never get throttled.
 283 */
 284#define TASK_LIMIT_FRACTION 8
 285static unsigned long task_dirty_limit(struct task_struct *tsk,
 286				       unsigned long bdi_dirty)
 287{
 288	long numerator, denominator;
 289	unsigned long dirty = bdi_dirty;
 290	u64 inv = dirty / TASK_LIMIT_FRACTION;
 291
 292	task_dirties_fraction(tsk, &numerator, &denominator);
 293	inv *= numerator;
 294	do_div(inv, denominator);
 295
 296	dirty -= inv;
 297
 298	return max(dirty, bdi_dirty/2);
 299}
 300
 301/* Minimum limit for any task */
 302static unsigned long task_min_dirty_limit(unsigned long bdi_dirty)
 303{
 304	return bdi_dirty - bdi_dirty / TASK_LIMIT_FRACTION;
 
 
 305}
 306
 307/*
 308 *
 
 
 309 */
 310static unsigned int bdi_min_ratio;
 311
 312int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
 313{
 314	int ret = 0;
 315
 316	spin_lock_bh(&bdi_lock);
 317	if (min_ratio > bdi->max_ratio) {
 318		ret = -EINVAL;
 319	} else {
 320		min_ratio -= bdi->min_ratio;
 321		if (bdi_min_ratio + min_ratio < 100) {
 322			bdi_min_ratio += min_ratio;
 323			bdi->min_ratio += min_ratio;
 324		} else {
 325			ret = -EINVAL;
 326		}
 327	}
 328	spin_unlock_bh(&bdi_lock);
 329
 330	return ret;
 331}
 332
 333int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
 334{
 335	int ret = 0;
 336
 337	if (max_ratio > 100)
 338		return -EINVAL;
 339
 340	spin_lock_bh(&bdi_lock);
 341	if (bdi->min_ratio > max_ratio) {
 342		ret = -EINVAL;
 343	} else {
 344		bdi->max_ratio = max_ratio;
 345		bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
 346	}
 347	spin_unlock_bh(&bdi_lock);
 348
 349	return ret;
 350}
 351EXPORT_SYMBOL(bdi_set_max_ratio);
 352
 353/*
 354 * Work out the current dirty-memory clamping and background writeout
 355 * thresholds.
 356 *
 357 * The main aim here is to lower them aggressively if there is a lot of mapped
 358 * memory around.  To avoid stressing page reclaim with lots of unreclaimable
 359 * pages.  It is better to clamp down on writers than to start swapping, and
 360 * performing lots of scanning.
 361 *
 362 * We only allow 1/2 of the currently-unmapped memory to be dirtied.
 363 *
 364 * We don't permit the clamping level to fall below 5% - that is getting rather
 365 * excessive.
 366 *
 367 * We make sure that the background writeout level is below the adjusted
 368 * clamping level.
 369 */
 370
 371static unsigned long highmem_dirtyable_memory(unsigned long total)
 372{
 373#ifdef CONFIG_HIGHMEM
 374	int node;
 375	unsigned long x = 0;
 376
 377	for_each_node_state(node, N_HIGH_MEMORY) {
 378		struct zone *z =
 379			&NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
 380
 381		x += zone_page_state(z, NR_FREE_PAGES) +
 382		     zone_reclaimable_pages(z);
 383	}
 384	/*
 385	 * Make sure that the number of highmem pages is never larger
 386	 * than the number of the total dirtyable memory. This can only
 387	 * occur in very strange VM situations but we want to make sure
 388	 * that this does not occur.
 389	 */
 390	return min(x, total);
 391#else
 392	return 0;
 393#endif
 394}
 395
 396/**
 397 * determine_dirtyable_memory - amount of memory that may be used
 398 *
 399 * Returns the numebr of pages that can currently be freed and used
 400 * by the kernel for direct mappings.
 401 */
 402unsigned long determine_dirtyable_memory(void)
 403{
 404	unsigned long x;
 405
 406	x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
 407
 408	if (!vm_highmem_is_dirtyable)
 409		x -= highmem_dirtyable_memory(x);
 410
 411	return x + 1;	/* Ensure that we never return 0 */
 412}
 413
 414static unsigned long hard_dirty_limit(unsigned long thresh)
 415{
 416	return max(thresh, global_dirty_limit);
 417}
 418
 419/*
 420 * global_dirty_limits - background-writeback and dirty-throttling thresholds
 421 *
 422 * Calculate the dirty thresholds based on sysctl parameters
 423 * - vm.dirty_background_ratio  or  vm.dirty_background_bytes
 424 * - vm.dirty_ratio             or  vm.dirty_bytes
 425 * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
 426 * real-time tasks.
 427 */
 428void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
 429{
 430	unsigned long background;
 431	unsigned long dirty;
 432	unsigned long uninitialized_var(available_memory);
 433	struct task_struct *tsk;
 434
 435	if (!vm_dirty_bytes || !dirty_background_bytes)
 436		available_memory = determine_dirtyable_memory();
 437
 438	if (vm_dirty_bytes)
 439		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
 440	else
 441		dirty = (vm_dirty_ratio * available_memory) / 100;
 442
 443	if (dirty_background_bytes)
 444		background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
 445	else
 446		background = (dirty_background_ratio * available_memory) / 100;
 447
 448	if (background >= dirty)
 449		background = dirty / 2;
 450	tsk = current;
 451	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
 452		background += background / 4;
 453		dirty += dirty / 4;
 454	}
 455	*pbackground = background;
 456	*pdirty = dirty;
 457	trace_global_dirty_state(background, dirty);
 458}
 459
 460/**
 461 * bdi_dirty_limit - @bdi's share of dirty throttling threshold
 462 * @bdi: the backing_dev_info to query
 463 * @dirty: global dirty limit in pages
 464 *
 465 * Returns @bdi's dirty limit in pages. The term "dirty" in the context of
 466 * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
 467 * And the "limit" in the name is not seriously taken as hard limit in
 468 * balance_dirty_pages().
 
 
 
 
 
 469 *
 470 * It allocates high/low dirty limits to fast/slow devices, in order to prevent
 471 * - starving fast devices
 472 * - piling up dirty pages (that will take long time to sync) on slow devices
 473 *
 474 * The bdi's share of dirty limit will be adapting to its throughput and
 475 * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
 476 */
 477unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
 478{
 479	u64 bdi_dirty;
 480	long numerator, denominator;
 481
 482	/*
 483	 * Calculate this BDI's share of the dirty ratio.
 484	 */
 485	bdi_writeout_fraction(bdi, &numerator, &denominator);
 486
 487	bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
 488	bdi_dirty *= numerator;
 489	do_div(bdi_dirty, denominator);
 490
 491	bdi_dirty += (dirty * bdi->min_ratio) / 100;
 492	if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
 493		bdi_dirty = dirty * bdi->max_ratio / 100;
 494
 495	return bdi_dirty;
 496}
 497
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 498static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
 499				       unsigned long elapsed,
 500				       unsigned long written)
 501{
 502	const unsigned long period = roundup_pow_of_two(3 * HZ);
 503	unsigned long avg = bdi->avg_write_bandwidth;
 504	unsigned long old = bdi->write_bandwidth;
 505	u64 bw;
 506
 507	/*
 508	 * bw = written * HZ / elapsed
 509	 *
 510	 *                   bw * elapsed + write_bandwidth * (period - elapsed)
 511	 * write_bandwidth = ---------------------------------------------------
 512	 *                                          period
 513	 */
 514	bw = written - bdi->written_stamp;
 515	bw *= HZ;
 516	if (unlikely(elapsed > period)) {
 517		do_div(bw, elapsed);
 518		avg = bw;
 519		goto out;
 520	}
 521	bw += (u64)bdi->write_bandwidth * (period - elapsed);
 522	bw >>= ilog2(period);
 523
 524	/*
 525	 * one more level of smoothing, for filtering out sudden spikes
 526	 */
 527	if (avg > old && old >= (unsigned long)bw)
 528		avg -= (avg - old) >> 3;
 529
 530	if (avg < old && old <= (unsigned long)bw)
 531		avg += (old - avg) >> 3;
 532
 533out:
 534	bdi->write_bandwidth = bw;
 535	bdi->avg_write_bandwidth = avg;
 536}
 537
 538/*
 539 * The global dirtyable memory and dirty threshold could be suddenly knocked
 540 * down by a large amount (eg. on the startup of KVM in a swapless system).
 541 * This may throw the system into deep dirty exceeded state and throttle
 542 * heavy/light dirtiers alike. To retain good responsiveness, maintain
 543 * global_dirty_limit for tracking slowly down to the knocked down dirty
 544 * threshold.
 545 */
 546static void update_dirty_limit(unsigned long thresh, unsigned long dirty)
 547{
 548	unsigned long limit = global_dirty_limit;
 549
 550	/*
 551	 * Follow up in one step.
 552	 */
 553	if (limit < thresh) {
 554		limit = thresh;
 555		goto update;
 556	}
 557
 558	/*
 559	 * Follow down slowly. Use the higher one as the target, because thresh
 560	 * may drop below dirty. This is exactly the reason to introduce
 561	 * global_dirty_limit which is guaranteed to lie above the dirty pages.
 562	 */
 563	thresh = max(thresh, dirty);
 564	if (limit > thresh) {
 565		limit -= (limit - thresh) >> 5;
 566		goto update;
 567	}
 568	return;
 569update:
 570	global_dirty_limit = limit;
 571}
 572
 573static void global_update_bandwidth(unsigned long thresh,
 574				    unsigned long dirty,
 575				    unsigned long now)
 576{
 577	static DEFINE_SPINLOCK(dirty_lock);
 578	static unsigned long update_time;
 579
 580	/*
 581	 * check locklessly first to optimize away locking for the most time
 582	 */
 583	if (time_before(now, update_time + BANDWIDTH_INTERVAL))
 584		return;
 585
 586	spin_lock(&dirty_lock);
 587	if (time_after_eq(now, update_time + BANDWIDTH_INTERVAL)) {
 588		update_dirty_limit(thresh, dirty);
 589		update_time = now;
 590	}
 591	spin_unlock(&dirty_lock);
 592}
 593
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 594void __bdi_update_bandwidth(struct backing_dev_info *bdi,
 595			    unsigned long thresh,
 
 596			    unsigned long dirty,
 597			    unsigned long bdi_thresh,
 598			    unsigned long bdi_dirty,
 599			    unsigned long start_time)
 600{
 601	unsigned long now = jiffies;
 602	unsigned long elapsed = now - bdi->bw_time_stamp;
 
 603	unsigned long written;
 604
 605	/*
 606	 * rate-limit, only update once every 200ms.
 607	 */
 608	if (elapsed < BANDWIDTH_INTERVAL)
 609		return;
 610
 
 611	written = percpu_counter_read(&bdi->bdi_stat[BDI_WRITTEN]);
 612
 613	/*
 614	 * Skip quiet periods when disk bandwidth is under-utilized.
 615	 * (at least 1s idle time between two flusher runs)
 616	 */
 617	if (elapsed > HZ && time_before(bdi->bw_time_stamp, start_time))
 618		goto snapshot;
 619
 620	if (thresh)
 621		global_update_bandwidth(thresh, dirty, now);
 622
 
 
 
 623	bdi_update_write_bandwidth(bdi, elapsed, written);
 624
 625snapshot:
 
 626	bdi->written_stamp = written;
 627	bdi->bw_time_stamp = now;
 628}
 629
 630static void bdi_update_bandwidth(struct backing_dev_info *bdi,
 631				 unsigned long thresh,
 
 632				 unsigned long dirty,
 633				 unsigned long bdi_thresh,
 634				 unsigned long bdi_dirty,
 635				 unsigned long start_time)
 636{
 637	if (time_is_after_eq_jiffies(bdi->bw_time_stamp + BANDWIDTH_INTERVAL))
 638		return;
 639	spin_lock(&bdi->wb.list_lock);
 640	__bdi_update_bandwidth(bdi, thresh, dirty, bdi_thresh, bdi_dirty,
 641			       start_time);
 642	spin_unlock(&bdi->wb.list_lock);
 643}
 644
 645/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 646 * balance_dirty_pages() must be called by processes which are generating dirty
 647 * data.  It looks at the number of dirty pages in the machine and will force
 648 * the caller to perform writeback if the system is over `vm_dirty_ratio'.
 649 * If we're over `background_thresh' then the writeback threads are woken to
 650 * perform some writeout.
 651 */
 652static void balance_dirty_pages(struct address_space *mapping,
 653				unsigned long write_chunk)
 654{
 655	unsigned long nr_reclaimable, bdi_nr_reclaimable;
 656	unsigned long nr_dirty;  /* = file_dirty + writeback + unstable_nfs */
 657	unsigned long bdi_dirty;
 658	unsigned long background_thresh;
 659	unsigned long dirty_thresh;
 660	unsigned long bdi_thresh;
 661	unsigned long task_bdi_thresh;
 662	unsigned long min_task_bdi_thresh;
 663	unsigned long pages_written = 0;
 664	unsigned long pause = 1;
 665	bool dirty_exceeded = false;
 666	bool clear_dirty_exceeded = true;
 
 
 667	struct backing_dev_info *bdi = mapping->backing_dev_info;
 
 668	unsigned long start_time = jiffies;
 669
 670	for (;;) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 671		nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
 672					global_page_state(NR_UNSTABLE_NFS);
 673		nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK);
 674
 675		global_dirty_limits(&background_thresh, &dirty_thresh);
 676
 677		/*
 678		 * Throttle it only when the background writeback cannot
 679		 * catch-up. This avoids (excessively) small writeouts
 680		 * when the bdi limits are ramping up.
 681		 */
 682		if (nr_dirty <= (background_thresh + dirty_thresh) / 2)
 683			break;
 684
 685		bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
 686		min_task_bdi_thresh = task_min_dirty_limit(bdi_thresh);
 687		task_bdi_thresh = task_dirty_limit(current, bdi_thresh);
 688
 689		/*
 690		 * In order to avoid the stacked BDI deadlock we need
 691		 * to ensure we accurately count the 'dirty' pages when
 692		 * the threshold is low.
 693		 *
 694		 * Otherwise it would be possible to get thresh+n pages
 695		 * reported dirty, even though there are thresh-m pages
 696		 * actually dirty; with m+n sitting in the percpu
 697		 * deltas.
 698		 */
 699		if (task_bdi_thresh < 2 * bdi_stat_error(bdi)) {
 700			bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
 701			bdi_dirty = bdi_nr_reclaimable +
 702				    bdi_stat_sum(bdi, BDI_WRITEBACK);
 703		} else {
 704			bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
 705			bdi_dirty = bdi_nr_reclaimable +
 706				    bdi_stat(bdi, BDI_WRITEBACK);
 707		}
 708
 709		/*
 710		 * The bdi thresh is somehow "soft" limit derived from the
 711		 * global "hard" limit. The former helps to prevent heavy IO
 712		 * bdi or process from holding back light ones; The latter is
 713		 * the last resort safeguard.
 
 
 
 714		 */
 715		dirty_exceeded = (bdi_dirty > task_bdi_thresh) ||
 716				  (nr_dirty > dirty_thresh);
 717		clear_dirty_exceeded = (bdi_dirty <= min_task_bdi_thresh) &&
 718					(nr_dirty <= dirty_thresh);
 719
 720		if (!dirty_exceeded)
 721			break;
 
 722
 723		if (!bdi->dirty_exceeded)
 724			bdi->dirty_exceeded = 1;
 725
 726		bdi_update_bandwidth(bdi, dirty_thresh, nr_dirty,
 727				     bdi_thresh, bdi_dirty, start_time);
 
 
 
 
 
 
 728
 729		/* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
 730		 * Unstable writes are a feature of certain networked
 731		 * filesystems (i.e. NFS) in which data may have been
 732		 * written to the server's write cache, but has not yet
 733		 * been flushed to permanent storage.
 734		 * Only move pages to writeback if this bdi is over its
 735		 * threshold otherwise wait until the disk writes catch
 736		 * up.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 737		 */
 738		trace_balance_dirty_start(bdi);
 739		if (bdi_nr_reclaimable > task_bdi_thresh) {
 740			pages_written += writeback_inodes_wb(&bdi->wb,
 741							     write_chunk);
 742			trace_balance_dirty_written(bdi, pages_written);
 743			if (pages_written >= write_chunk)
 744				break;		/* We've done our duty */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 745		}
 746		__set_current_state(TASK_UNINTERRUPTIBLE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 747		io_schedule_timeout(pause);
 748		trace_balance_dirty_wait(bdi);
 749
 750		dirty_thresh = hard_dirty_limit(dirty_thresh);
 
 
 
 751		/*
 752		 * max-pause area. If dirty exceeded but still within this
 753		 * area, no need to sleep for more than 200ms: (a) 8 pages per
 754		 * 200ms is typically more than enough to curb heavy dirtiers;
 755		 * (b) the pause time limit makes the dirtiers more responsive.
 756		 */
 757		if (nr_dirty < dirty_thresh &&
 758		    bdi_dirty < (task_bdi_thresh + bdi_thresh) / 2 &&
 759		    time_after(jiffies, start_time + MAX_PAUSE))
 760			break;
 761
 762		/*
 763		 * Increase the delay for each loop, up to our previous
 764		 * default of taking a 100ms nap.
 
 
 
 
 
 
 765		 */
 766		pause <<= 1;
 767		if (pause > HZ / 10)
 768			pause = HZ / 10;
 
 
 769	}
 770
 771	/* Clear dirty_exceeded flag only when no task can exceed the limit */
 772	if (clear_dirty_exceeded && bdi->dirty_exceeded)
 773		bdi->dirty_exceeded = 0;
 774
 775	if (writeback_in_progress(bdi))
 776		return;
 777
 778	/*
 779	 * In laptop mode, we wait until hitting the higher threshold before
 780	 * starting background writeout, and then write out all the way down
 781	 * to the lower threshold.  So slow writers cause minimal disk activity.
 782	 *
 783	 * In normal mode, we start background writeout at the lower
 784	 * background_thresh, to keep the amount of dirty memory low.
 785	 */
 786	if ((laptop_mode && pages_written) ||
 787	    (!laptop_mode && (nr_reclaimable > background_thresh)))
 
 
 788		bdi_start_background_writeback(bdi);
 789}
 790
 791void set_page_dirty_balance(struct page *page, int page_mkwrite)
 792{
 793	if (set_page_dirty(page) || page_mkwrite) {
 794		struct address_space *mapping = page_mapping(page);
 795
 796		if (mapping)
 797			balance_dirty_pages_ratelimited(mapping);
 798	}
 799}
 800
 801static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 802
 803/**
 804 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
 805 * @mapping: address_space which was dirtied
 806 * @nr_pages_dirtied: number of pages which the caller has just dirtied
 807 *
 808 * Processes which are dirtying memory should call in here once for each page
 809 * which was newly dirtied.  The function will periodically check the system's
 810 * dirty state and will initiate writeback if needed.
 811 *
 812 * On really big machines, get_writeback_state is expensive, so try to avoid
 813 * calling it too often (ratelimiting).  But once we're over the dirty memory
 814 * limit we decrease the ratelimiting by a lot, to prevent individual processes
 815 * from overshooting the limit by (ratelimit_pages) each.
 816 */
 817void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
 818					unsigned long nr_pages_dirtied)
 819{
 820	struct backing_dev_info *bdi = mapping->backing_dev_info;
 821	unsigned long ratelimit;
 822	unsigned long *p;
 823
 824	if (!bdi_cap_account_dirty(bdi))
 825		return;
 826
 827	ratelimit = ratelimit_pages;
 828	if (mapping->backing_dev_info->dirty_exceeded)
 829		ratelimit = 8;
 830
 
 831	/*
 832	 * Check the rate limiting. Also, we do not want to throttle real-time
 833	 * tasks in balance_dirty_pages(). Period.
 
 
 834	 */
 835	preempt_disable();
 836	p =  &__get_cpu_var(bdp_ratelimits);
 837	*p += nr_pages_dirtied;
 838	if (unlikely(*p >= ratelimit)) {
 839		ratelimit = sync_writeback_pages(*p);
 840		*p = 0;
 841		preempt_enable();
 842		balance_dirty_pages(mapping, ratelimit);
 843		return;
 
 
 
 
 
 
 
 
 
 
 
 
 844	}
 845	preempt_enable();
 
 
 
 846}
 847EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
 848
 849void throttle_vm_writeout(gfp_t gfp_mask)
 850{
 851	unsigned long background_thresh;
 852	unsigned long dirty_thresh;
 853
 854        for ( ; ; ) {
 855		global_dirty_limits(&background_thresh, &dirty_thresh);
 
 856
 857                /*
 858                 * Boost the allowable dirty threshold a bit for page
 859                 * allocators so they don't get DoS'ed by heavy writers
 860                 */
 861                dirty_thresh += dirty_thresh / 10;      /* wheeee... */
 862
 863                if (global_page_state(NR_UNSTABLE_NFS) +
 864			global_page_state(NR_WRITEBACK) <= dirty_thresh)
 865                        	break;
 866                congestion_wait(BLK_RW_ASYNC, HZ/10);
 867
 868		/*
 869		 * The caller might hold locks which can prevent IO completion
 870		 * or progress in the filesystem.  So we cannot just sit here
 871		 * waiting for IO to complete.
 872		 */
 873		if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
 874			break;
 875        }
 876}
 877
 878/*
 879 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
 880 */
 881int dirty_writeback_centisecs_handler(ctl_table *table, int write,
 882	void __user *buffer, size_t *length, loff_t *ppos)
 883{
 884	proc_dointvec(table, write, buffer, length, ppos);
 885	bdi_arm_supers_timer();
 886	return 0;
 887}
 888
 889#ifdef CONFIG_BLOCK
 890void laptop_mode_timer_fn(unsigned long data)
 891{
 892	struct request_queue *q = (struct request_queue *)data;
 893	int nr_pages = global_page_state(NR_FILE_DIRTY) +
 894		global_page_state(NR_UNSTABLE_NFS);
 895
 896	/*
 897	 * We want to write everything out, not just down to the dirty
 898	 * threshold
 899	 */
 900	if (bdi_has_dirty_io(&q->backing_dev_info))
 901		bdi_start_writeback(&q->backing_dev_info, nr_pages);
 
 902}
 903
 904/*
 905 * We've spun up the disk and we're in laptop mode: schedule writeback
 906 * of all dirty data a few seconds from now.  If the flush is already scheduled
 907 * then push it back - the user is still using the disk.
 908 */
 909void laptop_io_completion(struct backing_dev_info *info)
 910{
 911	mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
 912}
 913
 914/*
 915 * We're in laptop mode and we've just synced. The sync's writes will have
 916 * caused another writeback to be scheduled by laptop_io_completion.
 917 * Nothing needs to be written back anymore, so we unschedule the writeback.
 918 */
 919void laptop_sync_completion(void)
 920{
 921	struct backing_dev_info *bdi;
 922
 923	rcu_read_lock();
 924
 925	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
 926		del_timer(&bdi->laptop_mode_wb_timer);
 927
 928	rcu_read_unlock();
 929}
 930#endif
 931
 932/*
 933 * If ratelimit_pages is too high then we can get into dirty-data overload
 934 * if a large number of processes all perform writes at the same time.
 935 * If it is too low then SMP machines will call the (expensive)
 936 * get_writeback_state too often.
 937 *
 938 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
 939 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
 940 * thresholds before writeback cuts in.
 941 *
 942 * But the limit should not be set too high.  Because it also controls the
 943 * amount of memory which the balance_dirty_pages() caller has to write back.
 944 * If this is too large then the caller will block on the IO queue all the
 945 * time.  So limit it to four megabytes - the balance_dirty_pages() caller
 946 * will write six megabyte chunks, max.
 947 */
 948
 949void writeback_set_ratelimit(void)
 950{
 951	ratelimit_pages = vm_total_pages / (num_online_cpus() * 32);
 
 
 
 
 952	if (ratelimit_pages < 16)
 953		ratelimit_pages = 16;
 954	if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
 955		ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
 956}
 957
 958static int __cpuinit
 959ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
 
 960{
 961	writeback_set_ratelimit();
 962	return NOTIFY_DONE;
 
 
 
 
 
 
 
 963}
 964
 965static struct notifier_block __cpuinitdata ratelimit_nb = {
 966	.notifier_call	= ratelimit_handler,
 967	.next		= NULL,
 968};
 969
 970/*
 971 * Called early on to tune the page writeback dirty limits.
 972 *
 973 * We used to scale dirty pages according to how total memory
 974 * related to pages that could be allocated for buffers (by
 975 * comparing nr_free_buffer_pages() to vm_total_pages.
 976 *
 977 * However, that was when we used "dirty_ratio" to scale with
 978 * all memory, and we don't do that any more. "dirty_ratio"
 979 * is now applied to total non-HIGHPAGE memory (by subtracting
 980 * totalhigh_pages from vm_total_pages), and as such we can't
 981 * get into the old insane situation any more where we had
 982 * large amounts of dirty pages compared to a small amount of
 983 * non-HIGHMEM memory.
 984 *
 985 * But we might still want to scale the dirty_ratio by how
 986 * much memory the box has..
 987 */
 988void __init page_writeback_init(void)
 989{
 990	int shift;
 991
 992	writeback_set_ratelimit();
 993	register_cpu_notifier(&ratelimit_nb);
 994
 995	shift = calc_period_shift();
 996	prop_descriptor_init(&vm_completions, shift);
 997	prop_descriptor_init(&vm_dirties, shift);
 998}
 999
1000/**
1001 * tag_pages_for_writeback - tag pages to be written by write_cache_pages
1002 * @mapping: address space structure to write
1003 * @start: starting page index
1004 * @end: ending page index (inclusive)
1005 *
1006 * This function scans the page range from @start to @end (inclusive) and tags
1007 * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is
1008 * that write_cache_pages (or whoever calls this function) will then use
1009 * TOWRITE tag to identify pages eligible for writeback.  This mechanism is
1010 * used to avoid livelocking of writeback by a process steadily creating new
1011 * dirty pages in the file (thus it is important for this function to be quick
1012 * so that it can tag pages faster than a dirtying process can create them).
1013 */
1014/*
1015 * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency.
1016 */
1017void tag_pages_for_writeback(struct address_space *mapping,
1018			     pgoff_t start, pgoff_t end)
1019{
1020#define WRITEBACK_TAG_BATCH 4096
1021	unsigned long tagged;
1022
1023	do {
1024		spin_lock_irq(&mapping->tree_lock);
1025		tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree,
1026				&start, end, WRITEBACK_TAG_BATCH,
1027				PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE);
1028		spin_unlock_irq(&mapping->tree_lock);
1029		WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);
1030		cond_resched();
1031		/* We check 'start' to handle wrapping when end == ~0UL */
1032	} while (tagged >= WRITEBACK_TAG_BATCH && start);
1033}
1034EXPORT_SYMBOL(tag_pages_for_writeback);
1035
1036/**
1037 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
1038 * @mapping: address space structure to write
1039 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1040 * @writepage: function called for each page
1041 * @data: data passed to writepage function
1042 *
1043 * If a page is already under I/O, write_cache_pages() skips it, even
1044 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
1045 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
1046 * and msync() need to guarantee that all the data which was dirty at the time
1047 * the call was made get new I/O started against them.  If wbc->sync_mode is
1048 * WB_SYNC_ALL then we were called for data integrity and we must wait for
1049 * existing IO to complete.
1050 *
1051 * To avoid livelocks (when other process dirties new pages), we first tag
1052 * pages which should be written back with TOWRITE tag and only then start
1053 * writing them. For data-integrity sync we have to be careful so that we do
1054 * not miss some pages (e.g., because some other process has cleared TOWRITE
1055 * tag we set). The rule we follow is that TOWRITE tag can be cleared only
1056 * by the process clearing the DIRTY tag (and submitting the page for IO).
1057 */
1058int write_cache_pages(struct address_space *mapping,
1059		      struct writeback_control *wbc, writepage_t writepage,
1060		      void *data)
1061{
1062	int ret = 0;
1063	int done = 0;
1064	struct pagevec pvec;
1065	int nr_pages;
1066	pgoff_t uninitialized_var(writeback_index);
1067	pgoff_t index;
1068	pgoff_t end;		/* Inclusive */
1069	pgoff_t done_index;
1070	int cycled;
1071	int range_whole = 0;
1072	int tag;
1073
1074	pagevec_init(&pvec, 0);
1075	if (wbc->range_cyclic) {
1076		writeback_index = mapping->writeback_index; /* prev offset */
1077		index = writeback_index;
1078		if (index == 0)
1079			cycled = 1;
1080		else
1081			cycled = 0;
1082		end = -1;
1083	} else {
1084		index = wbc->range_start >> PAGE_CACHE_SHIFT;
1085		end = wbc->range_end >> PAGE_CACHE_SHIFT;
1086		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1087			range_whole = 1;
1088		cycled = 1; /* ignore range_cyclic tests */
1089	}
1090	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1091		tag = PAGECACHE_TAG_TOWRITE;
1092	else
1093		tag = PAGECACHE_TAG_DIRTY;
1094retry:
1095	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1096		tag_pages_for_writeback(mapping, index, end);
1097	done_index = index;
1098	while (!done && (index <= end)) {
1099		int i;
1100
1101		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
1102			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1103		if (nr_pages == 0)
1104			break;
1105
1106		for (i = 0; i < nr_pages; i++) {
1107			struct page *page = pvec.pages[i];
1108
1109			/*
1110			 * At this point, the page may be truncated or
1111			 * invalidated (changing page->mapping to NULL), or
1112			 * even swizzled back from swapper_space to tmpfs file
1113			 * mapping. However, page->index will not change
1114			 * because we have a reference on the page.
1115			 */
1116			if (page->index > end) {
1117				/*
1118				 * can't be range_cyclic (1st pass) because
1119				 * end == -1 in that case.
1120				 */
1121				done = 1;
1122				break;
1123			}
1124
1125			done_index = page->index;
1126
1127			lock_page(page);
1128
1129			/*
1130			 * Page truncated or invalidated. We can freely skip it
1131			 * then, even for data integrity operations: the page
1132			 * has disappeared concurrently, so there could be no
1133			 * real expectation of this data interity operation
1134			 * even if there is now a new, dirty page at the same
1135			 * pagecache address.
1136			 */
1137			if (unlikely(page->mapping != mapping)) {
1138continue_unlock:
1139				unlock_page(page);
1140				continue;
1141			}
1142
1143			if (!PageDirty(page)) {
1144				/* someone wrote it for us */
1145				goto continue_unlock;
1146			}
1147
1148			if (PageWriteback(page)) {
1149				if (wbc->sync_mode != WB_SYNC_NONE)
1150					wait_on_page_writeback(page);
1151				else
1152					goto continue_unlock;
1153			}
1154
1155			BUG_ON(PageWriteback(page));
1156			if (!clear_page_dirty_for_io(page))
1157				goto continue_unlock;
1158
1159			trace_wbc_writepage(wbc, mapping->backing_dev_info);
1160			ret = (*writepage)(page, wbc, data);
1161			if (unlikely(ret)) {
1162				if (ret == AOP_WRITEPAGE_ACTIVATE) {
1163					unlock_page(page);
1164					ret = 0;
1165				} else {
1166					/*
1167					 * done_index is set past this page,
1168					 * so media errors will not choke
1169					 * background writeout for the entire
1170					 * file. This has consequences for
1171					 * range_cyclic semantics (ie. it may
1172					 * not be suitable for data integrity
1173					 * writeout).
1174					 */
1175					done_index = page->index + 1;
1176					done = 1;
1177					break;
1178				}
1179			}
1180
1181			/*
1182			 * We stop writing back only if we are not doing
1183			 * integrity sync. In case of integrity sync we have to
1184			 * keep going until we have written all the pages
1185			 * we tagged for writeback prior to entering this loop.
1186			 */
1187			if (--wbc->nr_to_write <= 0 &&
1188			    wbc->sync_mode == WB_SYNC_NONE) {
1189				done = 1;
1190				break;
1191			}
1192		}
1193		pagevec_release(&pvec);
1194		cond_resched();
1195	}
1196	if (!cycled && !done) {
1197		/*
1198		 * range_cyclic:
1199		 * We hit the last page and there is more work to be done: wrap
1200		 * back to the start of the file
1201		 */
1202		cycled = 1;
1203		index = 0;
1204		end = writeback_index - 1;
1205		goto retry;
1206	}
1207	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1208		mapping->writeback_index = done_index;
1209
1210	return ret;
1211}
1212EXPORT_SYMBOL(write_cache_pages);
1213
1214/*
1215 * Function used by generic_writepages to call the real writepage
1216 * function and set the mapping flags on error
1217 */
1218static int __writepage(struct page *page, struct writeback_control *wbc,
1219		       void *data)
1220{
1221	struct address_space *mapping = data;
1222	int ret = mapping->a_ops->writepage(page, wbc);
1223	mapping_set_error(mapping, ret);
1224	return ret;
1225}
1226
1227/**
1228 * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
1229 * @mapping: address space structure to write
1230 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1231 *
1232 * This is a library function, which implements the writepages()
1233 * address_space_operation.
1234 */
1235int generic_writepages(struct address_space *mapping,
1236		       struct writeback_control *wbc)
1237{
1238	struct blk_plug plug;
1239	int ret;
1240
1241	/* deal with chardevs and other special file */
1242	if (!mapping->a_ops->writepage)
1243		return 0;
1244
1245	blk_start_plug(&plug);
1246	ret = write_cache_pages(mapping, wbc, __writepage, mapping);
1247	blk_finish_plug(&plug);
1248	return ret;
1249}
1250
1251EXPORT_SYMBOL(generic_writepages);
1252
1253int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
1254{
1255	int ret;
1256
1257	if (wbc->nr_to_write <= 0)
1258		return 0;
1259	if (mapping->a_ops->writepages)
1260		ret = mapping->a_ops->writepages(mapping, wbc);
1261	else
1262		ret = generic_writepages(mapping, wbc);
1263	return ret;
1264}
1265
1266/**
1267 * write_one_page - write out a single page and optionally wait on I/O
1268 * @page: the page to write
1269 * @wait: if true, wait on writeout
1270 *
1271 * The page must be locked by the caller and will be unlocked upon return.
1272 *
1273 * write_one_page() returns a negative error code if I/O failed.
1274 */
1275int write_one_page(struct page *page, int wait)
1276{
1277	struct address_space *mapping = page->mapping;
1278	int ret = 0;
1279	struct writeback_control wbc = {
1280		.sync_mode = WB_SYNC_ALL,
1281		.nr_to_write = 1,
1282	};
1283
1284	BUG_ON(!PageLocked(page));
1285
1286	if (wait)
1287		wait_on_page_writeback(page);
1288
1289	if (clear_page_dirty_for_io(page)) {
1290		page_cache_get(page);
1291		ret = mapping->a_ops->writepage(page, &wbc);
1292		if (ret == 0 && wait) {
1293			wait_on_page_writeback(page);
1294			if (PageError(page))
1295				ret = -EIO;
1296		}
1297		page_cache_release(page);
1298	} else {
1299		unlock_page(page);
1300	}
1301	return ret;
1302}
1303EXPORT_SYMBOL(write_one_page);
1304
1305/*
1306 * For address_spaces which do not use buffers nor write back.
1307 */
1308int __set_page_dirty_no_writeback(struct page *page)
1309{
1310	if (!PageDirty(page))
1311		return !TestSetPageDirty(page);
1312	return 0;
1313}
1314
1315/*
1316 * Helper function for set_page_dirty family.
1317 * NOTE: This relies on being atomic wrt interrupts.
1318 */
1319void account_page_dirtied(struct page *page, struct address_space *mapping)
1320{
 
 
1321	if (mapping_cap_account_dirty(mapping)) {
1322		__inc_zone_page_state(page, NR_FILE_DIRTY);
1323		__inc_zone_page_state(page, NR_DIRTIED);
1324		__inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
1325		task_dirty_inc(current);
1326		task_io_account_write(PAGE_CACHE_SIZE);
 
 
1327	}
1328}
1329EXPORT_SYMBOL(account_page_dirtied);
1330
1331/*
1332 * Helper function for set_page_writeback family.
 
 
 
 
 
1333 * NOTE: Unlike account_page_dirtied this does not rely on being atomic
1334 * wrt interrupts.
1335 */
1336void account_page_writeback(struct page *page)
1337{
 
1338	inc_zone_page_state(page, NR_WRITEBACK);
1339}
1340EXPORT_SYMBOL(account_page_writeback);
1341
1342/*
1343 * For address_spaces which do not use buffers.  Just tag the page as dirty in
1344 * its radix tree.
1345 *
1346 * This is also used when a single buffer is being dirtied: we want to set the
1347 * page dirty in that case, but not all the buffers.  This is a "bottom-up"
1348 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
1349 *
1350 * Most callers have locked the page, which pins the address_space in memory.
1351 * But zap_pte_range() does not lock the page, however in that case the
1352 * mapping is pinned by the vma's ->vm_file reference.
1353 *
1354 * We take care to handle the case where the page was truncated from the
1355 * mapping by re-checking page_mapping() inside tree_lock.
1356 */
1357int __set_page_dirty_nobuffers(struct page *page)
1358{
1359	if (!TestSetPageDirty(page)) {
1360		struct address_space *mapping = page_mapping(page);
1361		struct address_space *mapping2;
 
1362
1363		if (!mapping)
1364			return 1;
1365
1366		spin_lock_irq(&mapping->tree_lock);
1367		mapping2 = page_mapping(page);
1368		if (mapping2) { /* Race with truncate? */
1369			BUG_ON(mapping2 != mapping);
1370			WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
1371			account_page_dirtied(page, mapping);
1372			radix_tree_tag_set(&mapping->page_tree,
1373				page_index(page), PAGECACHE_TAG_DIRTY);
1374		}
1375		spin_unlock_irq(&mapping->tree_lock);
1376		if (mapping->host) {
1377			/* !PageAnon && !swapper_space */
1378			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1379		}
1380		return 1;
1381	}
1382	return 0;
1383}
1384EXPORT_SYMBOL(__set_page_dirty_nobuffers);
1385
1386/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1387 * When a writepage implementation decides that it doesn't want to write this
1388 * page for some reason, it should redirty the locked page via
1389 * redirty_page_for_writepage() and it should then unlock the page and return 0
1390 */
1391int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
1392{
1393	wbc->pages_skipped++;
 
1394	return __set_page_dirty_nobuffers(page);
1395}
1396EXPORT_SYMBOL(redirty_page_for_writepage);
1397
1398/*
1399 * Dirty a page.
1400 *
1401 * For pages with a mapping this should be done under the page lock
1402 * for the benefit of asynchronous memory errors who prefer a consistent
1403 * dirty state. This rule can be broken in some special cases,
1404 * but should be better not to.
1405 *
1406 * If the mapping doesn't provide a set_page_dirty a_op, then
1407 * just fall through and assume that it wants buffer_heads.
1408 */
1409int set_page_dirty(struct page *page)
1410{
1411	struct address_space *mapping = page_mapping(page);
1412
1413	if (likely(mapping)) {
1414		int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
1415		/*
1416		 * readahead/lru_deactivate_page could remain
1417		 * PG_readahead/PG_reclaim due to race with end_page_writeback
1418		 * About readahead, if the page is written, the flags would be
1419		 * reset. So no problem.
1420		 * About lru_deactivate_page, if the page is redirty, the flag
1421		 * will be reset. So no problem. but if the page is used by readahead
1422		 * it will confuse readahead and make it restart the size rampup
1423		 * process. But it's a trivial problem.
1424		 */
1425		ClearPageReclaim(page);
1426#ifdef CONFIG_BLOCK
1427		if (!spd)
1428			spd = __set_page_dirty_buffers;
1429#endif
1430		return (*spd)(page);
1431	}
1432	if (!PageDirty(page)) {
1433		if (!TestSetPageDirty(page))
1434			return 1;
1435	}
1436	return 0;
1437}
1438EXPORT_SYMBOL(set_page_dirty);
1439
1440/*
1441 * set_page_dirty() is racy if the caller has no reference against
1442 * page->mapping->host, and if the page is unlocked.  This is because another
1443 * CPU could truncate the page off the mapping and then free the mapping.
1444 *
1445 * Usually, the page _is_ locked, or the caller is a user-space process which
1446 * holds a reference on the inode by having an open file.
1447 *
1448 * In other cases, the page should be locked before running set_page_dirty().
1449 */
1450int set_page_dirty_lock(struct page *page)
1451{
1452	int ret;
1453
1454	lock_page(page);
1455	ret = set_page_dirty(page);
1456	unlock_page(page);
1457	return ret;
1458}
1459EXPORT_SYMBOL(set_page_dirty_lock);
1460
1461/*
1462 * Clear a page's dirty flag, while caring for dirty memory accounting.
1463 * Returns true if the page was previously dirty.
1464 *
1465 * This is for preparing to put the page under writeout.  We leave the page
1466 * tagged as dirty in the radix tree so that a concurrent write-for-sync
1467 * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage
1468 * implementation will run either set_page_writeback() or set_page_dirty(),
1469 * at which stage we bring the page's dirty flag and radix-tree dirty tag
1470 * back into sync.
1471 *
1472 * This incoherency between the page's dirty flag and radix-tree tag is
1473 * unfortunate, but it only exists while the page is locked.
1474 */
1475int clear_page_dirty_for_io(struct page *page)
1476{
1477	struct address_space *mapping = page_mapping(page);
1478
1479	BUG_ON(!PageLocked(page));
1480
1481	if (mapping && mapping_cap_account_dirty(mapping)) {
1482		/*
1483		 * Yes, Virginia, this is indeed insane.
1484		 *
1485		 * We use this sequence to make sure that
1486		 *  (a) we account for dirty stats properly
1487		 *  (b) we tell the low-level filesystem to
1488		 *      mark the whole page dirty if it was
1489		 *      dirty in a pagetable. Only to then
1490		 *  (c) clean the page again and return 1 to
1491		 *      cause the writeback.
1492		 *
1493		 * This way we avoid all nasty races with the
1494		 * dirty bit in multiple places and clearing
1495		 * them concurrently from different threads.
1496		 *
1497		 * Note! Normally the "set_page_dirty(page)"
1498		 * has no effect on the actual dirty bit - since
1499		 * that will already usually be set. But we
1500		 * need the side effects, and it can help us
1501		 * avoid races.
1502		 *
1503		 * We basically use the page "master dirty bit"
1504		 * as a serialization point for all the different
1505		 * threads doing their things.
1506		 */
1507		if (page_mkclean(page))
1508			set_page_dirty(page);
1509		/*
1510		 * We carefully synchronise fault handlers against
1511		 * installing a dirty pte and marking the page dirty
1512		 * at this point. We do this by having them hold the
1513		 * page lock at some point after installing their
1514		 * pte, but before marking the page dirty.
1515		 * Pages are always locked coming in here, so we get
1516		 * the desired exclusion. See mm/memory.c:do_wp_page()
1517		 * for more comments.
1518		 */
1519		if (TestClearPageDirty(page)) {
1520			dec_zone_page_state(page, NR_FILE_DIRTY);
1521			dec_bdi_stat(mapping->backing_dev_info,
1522					BDI_RECLAIMABLE);
1523			return 1;
1524		}
1525		return 0;
1526	}
1527	return TestClearPageDirty(page);
1528}
1529EXPORT_SYMBOL(clear_page_dirty_for_io);
1530
1531int test_clear_page_writeback(struct page *page)
1532{
1533	struct address_space *mapping = page_mapping(page);
1534	int ret;
 
 
1535
 
1536	if (mapping) {
1537		struct backing_dev_info *bdi = mapping->backing_dev_info;
1538		unsigned long flags;
1539
1540		spin_lock_irqsave(&mapping->tree_lock, flags);
1541		ret = TestClearPageWriteback(page);
1542		if (ret) {
1543			radix_tree_tag_clear(&mapping->page_tree,
1544						page_index(page),
1545						PAGECACHE_TAG_WRITEBACK);
1546			if (bdi_cap_account_writeback(bdi)) {
1547				__dec_bdi_stat(bdi, BDI_WRITEBACK);
1548				__bdi_writeout_inc(bdi);
1549			}
1550		}
1551		spin_unlock_irqrestore(&mapping->tree_lock, flags);
1552	} else {
1553		ret = TestClearPageWriteback(page);
1554	}
1555	if (ret) {
 
1556		dec_zone_page_state(page, NR_WRITEBACK);
1557		inc_zone_page_state(page, NR_WRITTEN);
1558	}
 
1559	return ret;
1560}
1561
1562int test_set_page_writeback(struct page *page)
1563{
1564	struct address_space *mapping = page_mapping(page);
1565	int ret;
 
 
1566
 
1567	if (mapping) {
1568		struct backing_dev_info *bdi = mapping->backing_dev_info;
1569		unsigned long flags;
1570
1571		spin_lock_irqsave(&mapping->tree_lock, flags);
1572		ret = TestSetPageWriteback(page);
1573		if (!ret) {
1574			radix_tree_tag_set(&mapping->page_tree,
1575						page_index(page),
1576						PAGECACHE_TAG_WRITEBACK);
1577			if (bdi_cap_account_writeback(bdi))
1578				__inc_bdi_stat(bdi, BDI_WRITEBACK);
1579		}
1580		if (!PageDirty(page))
1581			radix_tree_tag_clear(&mapping->page_tree,
1582						page_index(page),
1583						PAGECACHE_TAG_DIRTY);
1584		radix_tree_tag_clear(&mapping->page_tree,
1585				     page_index(page),
1586				     PAGECACHE_TAG_TOWRITE);
1587		spin_unlock_irqrestore(&mapping->tree_lock, flags);
1588	} else {
1589		ret = TestSetPageWriteback(page);
1590	}
1591	if (!ret)
1592		account_page_writeback(page);
 
1593	return ret;
1594
1595}
1596EXPORT_SYMBOL(test_set_page_writeback);
1597
1598/*
1599 * Return true if any of the pages in the mapping are marked with the
1600 * passed tag.
1601 */
1602int mapping_tagged(struct address_space *mapping, int tag)
1603{
1604	return radix_tree_tagged(&mapping->page_tree, tag);
1605}
1606EXPORT_SYMBOL(mapping_tagged);
v3.15
   1/*
   2 * mm/page-writeback.c
   3 *
   4 * Copyright (C) 2002, Linus Torvalds.
   5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
   6 *
   7 * Contains functions related to writing back dirty pages at the
   8 * address_space level.
   9 *
  10 * 10Apr2002	Andrew Morton
  11 *		Initial version
  12 */
  13
  14#include <linux/kernel.h>
  15#include <linux/export.h>
  16#include <linux/spinlock.h>
  17#include <linux/fs.h>
  18#include <linux/mm.h>
  19#include <linux/swap.h>
  20#include <linux/slab.h>
  21#include <linux/pagemap.h>
  22#include <linux/writeback.h>
  23#include <linux/init.h>
  24#include <linux/backing-dev.h>
  25#include <linux/task_io_accounting_ops.h>
  26#include <linux/blkdev.h>
  27#include <linux/mpage.h>
  28#include <linux/rmap.h>
  29#include <linux/percpu.h>
  30#include <linux/notifier.h>
  31#include <linux/smp.h>
  32#include <linux/sysctl.h>
  33#include <linux/cpu.h>
  34#include <linux/syscalls.h>
  35#include <linux/buffer_head.h> /* __set_page_dirty_buffers */
  36#include <linux/pagevec.h>
  37#include <linux/timer.h>
  38#include <linux/sched/rt.h>
  39#include <linux/mm_inline.h>
  40#include <trace/events/writeback.h>
  41
  42#include "internal.h"
  43
  44/*
  45 * Sleep at most 200ms at a time in balance_dirty_pages().
  46 */
  47#define MAX_PAUSE		max(HZ/5, 1)
  48
  49/*
  50 * Try to keep balance_dirty_pages() call intervals higher than this many pages
  51 * by raising pause time to max_pause when falls below it.
  52 */
  53#define DIRTY_POLL_THRESH	(128 >> (PAGE_SHIFT - 10))
  54
  55/*
  56 * Estimate write bandwidth at 200ms intervals.
  57 */
  58#define BANDWIDTH_INTERVAL	max(HZ/5, 1)
  59
  60#define RATELIMIT_CALC_SHIFT	10
  61
  62/*
  63 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
  64 * will look to see if it needs to force writeback or throttling.
  65 */
  66static long ratelimit_pages = 32;
  67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  68/* The following parameters are exported via /proc/sys/vm */
  69
  70/*
  71 * Start background writeback (via writeback threads) at this percentage
  72 */
  73int dirty_background_ratio = 10;
  74
  75/*
  76 * dirty_background_bytes starts at 0 (disabled) so that it is a function of
  77 * dirty_background_ratio * the amount of dirtyable memory
  78 */
  79unsigned long dirty_background_bytes;
  80
  81/*
  82 * free highmem will not be subtracted from the total free memory
  83 * for calculating free ratios if vm_highmem_is_dirtyable is true
  84 */
  85int vm_highmem_is_dirtyable;
  86
  87/*
  88 * The generator of dirty data starts writeback at this percentage
  89 */
  90int vm_dirty_ratio = 20;
  91
  92/*
  93 * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
  94 * vm_dirty_ratio * the amount of dirtyable memory
  95 */
  96unsigned long vm_dirty_bytes;
  97
  98/*
  99 * The interval between `kupdate'-style writebacks
 100 */
 101unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
 102
 103EXPORT_SYMBOL_GPL(dirty_writeback_interval);
 104
 105/*
 106 * The longest time for which data is allowed to remain dirty
 107 */
 108unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
 109
 110/*
 111 * Flag that makes the machine dump writes/reads and block dirtyings.
 112 */
 113int block_dump;
 114
 115/*
 116 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
 117 * a full sync is triggered after this time elapses without any disk activity.
 118 */
 119int laptop_mode;
 120
 121EXPORT_SYMBOL(laptop_mode);
 122
 123/* End of sysctl-exported parameters */
 124
 125unsigned long global_dirty_limit;
 126
 127/*
 128 * Scale the writeback cache size proportional to the relative writeout speeds.
 129 *
 130 * We do this by keeping a floating proportion between BDIs, based on page
 131 * writeback completions [end_page_writeback()]. Those devices that write out
 132 * pages fastest will get the larger share, while the slower will get a smaller
 133 * share.
 134 *
 135 * We use page writeout completions because we are interested in getting rid of
 136 * dirty pages. Having them written out is the primary goal.
 137 *
 138 * We introduce a concept of time, a period over which we measure these events,
 139 * because demand can/will vary over time. The length of this period itself is
 140 * measured in page writeback completions.
 141 *
 142 */
 143static struct fprop_global writeout_completions;
 144
 145static void writeout_period(unsigned long t);
 146/* Timer for aging of writeout_completions */
 147static struct timer_list writeout_period_timer =
 148		TIMER_DEFERRED_INITIALIZER(writeout_period, 0, 0);
 149static unsigned long writeout_period_time = 0;
 150
 151/*
 152 * Length of period for aging writeout fractions of bdis. This is an
 153 * arbitrarily chosen number. The longer the period, the slower fractions will
 154 * reflect changes in current writeout rate.
 155 */
 156#define VM_COMPLETIONS_PERIOD_LEN (3*HZ)
 157
 158/*
 159 * Work out the current dirty-memory clamping and background writeout
 160 * thresholds.
 161 *
 162 * The main aim here is to lower them aggressively if there is a lot of mapped
 163 * memory around.  To avoid stressing page reclaim with lots of unreclaimable
 164 * pages.  It is better to clamp down on writers than to start swapping, and
 165 * performing lots of scanning.
 166 *
 167 * We only allow 1/2 of the currently-unmapped memory to be dirtied.
 168 *
 169 * We don't permit the clamping level to fall below 5% - that is getting rather
 170 * excessive.
 171 *
 172 * We make sure that the background writeout level is below the adjusted
 173 * clamping level.
 174 */
 175
 176/*
 177 * In a memory zone, there is a certain amount of pages we consider
 178 * available for the page cache, which is essentially the number of
 179 * free and reclaimable pages, minus some zone reserves to protect
 180 * lowmem and the ability to uphold the zone's watermarks without
 181 * requiring writeback.
 182 *
 183 * This number of dirtyable pages is the base value of which the
 184 * user-configurable dirty ratio is the effictive number of pages that
 185 * are allowed to be actually dirtied.  Per individual zone, or
 186 * globally by using the sum of dirtyable pages over all zones.
 187 *
 188 * Because the user is allowed to specify the dirty limit globally as
 189 * absolute number of bytes, calculating the per-zone dirty limit can
 190 * require translating the configured limit into a percentage of
 191 * global dirtyable memory first.
 192 */
 193
 194/**
 195 * zone_dirtyable_memory - number of dirtyable pages in a zone
 196 * @zone: the zone
 197 *
 198 * Returns the zone's number of pages potentially available for dirty
 199 * page cache.  This is the base value for the per-zone dirty limits.
 200 */
 201static unsigned long zone_dirtyable_memory(struct zone *zone)
 202{
 203	unsigned long nr_pages;
 204
 205	nr_pages = zone_page_state(zone, NR_FREE_PAGES);
 206	nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
 207
 208	nr_pages += zone_page_state(zone, NR_INACTIVE_FILE);
 209	nr_pages += zone_page_state(zone, NR_ACTIVE_FILE);
 210
 211	return nr_pages;
 212}
 213
 214static unsigned long highmem_dirtyable_memory(unsigned long total)
 215{
 216#ifdef CONFIG_HIGHMEM
 217	int node;
 218	unsigned long x = 0;
 219
 220	for_each_node_state(node, N_HIGH_MEMORY) {
 221		struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
 222
 223		x += zone_dirtyable_memory(z);
 224	}
 225	/*
 226	 * Unreclaimable memory (kernel memory or anonymous memory
 227	 * without swap) can bring down the dirtyable pages below
 228	 * the zone's dirty balance reserve and the above calculation
 229	 * will underflow.  However we still want to add in nodes
 230	 * which are below threshold (negative values) to get a more
 231	 * accurate calculation but make sure that the total never
 232	 * underflows.
 233	 */
 234	if ((long)x < 0)
 235		x = 0;
 236
 237	/*
 238	 * Make sure that the number of highmem pages is never larger
 239	 * than the number of the total dirtyable memory. This can only
 240	 * occur in very strange VM situations but we want to make sure
 241	 * that this does not occur.
 242	 */
 243	return min(x, total);
 244#else
 245	return 0;
 246#endif
 247}
 248
 249/**
 250 * global_dirtyable_memory - number of globally dirtyable pages
 251 *
 252 * Returns the global number of pages potentially available for dirty
 253 * page cache.  This is the base value for the global dirty limits.
 254 */
 255static unsigned long global_dirtyable_memory(void)
 256{
 257	unsigned long x;
 258
 259	x = global_page_state(NR_FREE_PAGES);
 260	x -= min(x, dirty_balance_reserve);
 261
 262	x += global_page_state(NR_INACTIVE_FILE);
 263	x += global_page_state(NR_ACTIVE_FILE);
 264
 265	if (!vm_highmem_is_dirtyable)
 266		x -= highmem_dirtyable_memory(x);
 267
 268	return x + 1;	/* Ensure that we never return 0 */
 269}
 270
 271/*
 272 * global_dirty_limits - background-writeback and dirty-throttling thresholds
 273 *
 274 * Calculate the dirty thresholds based on sysctl parameters
 275 * - vm.dirty_background_ratio  or  vm.dirty_background_bytes
 276 * - vm.dirty_ratio             or  vm.dirty_bytes
 277 * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
 278 * real-time tasks.
 279 */
 280void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
 281{
 282	unsigned long background;
 283	unsigned long dirty;
 284	unsigned long uninitialized_var(available_memory);
 285	struct task_struct *tsk;
 286
 287	if (!vm_dirty_bytes || !dirty_background_bytes)
 288		available_memory = global_dirtyable_memory();
 289
 290	if (vm_dirty_bytes)
 291		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
 292	else
 293		dirty = (vm_dirty_ratio * available_memory) / 100;
 294
 295	if (dirty_background_bytes)
 296		background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
 297	else
 298		background = (dirty_background_ratio * available_memory) / 100;
 299
 300	if (background >= dirty)
 301		background = dirty / 2;
 302	tsk = current;
 303	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
 304		background += background / 4;
 305		dirty += dirty / 4;
 306	}
 307	*pbackground = background;
 308	*pdirty = dirty;
 309	trace_global_dirty_state(background, dirty);
 310}
 311
 312/**
 313 * zone_dirty_limit - maximum number of dirty pages allowed in a zone
 314 * @zone: the zone
 315 *
 316 * Returns the maximum number of dirty pages allowed in a zone, based
 317 * on the zone's dirtyable memory.
 318 */
 319static unsigned long zone_dirty_limit(struct zone *zone)
 320{
 321	unsigned long zone_memory = zone_dirtyable_memory(zone);
 322	struct task_struct *tsk = current;
 323	unsigned long dirty;
 324
 325	if (vm_dirty_bytes)
 326		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) *
 327			zone_memory / global_dirtyable_memory();
 328	else
 329		dirty = vm_dirty_ratio * zone_memory / 100;
 330
 331	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk))
 332		dirty += dirty / 4;
 333
 334	return dirty;
 335}
 336
 337/**
 338 * zone_dirty_ok - tells whether a zone is within its dirty limits
 339 * @zone: the zone to check
 340 *
 341 * Returns %true when the dirty pages in @zone are within the zone's
 342 * dirty limit, %false if the limit is exceeded.
 343 */
 344bool zone_dirty_ok(struct zone *zone)
 345{
 346	unsigned long limit = zone_dirty_limit(zone);
 347
 348	return zone_page_state(zone, NR_FILE_DIRTY) +
 349	       zone_page_state(zone, NR_UNSTABLE_NFS) +
 350	       zone_page_state(zone, NR_WRITEBACK) <= limit;
 351}
 352
 353int dirty_background_ratio_handler(struct ctl_table *table, int write,
 354		void __user *buffer, size_t *lenp,
 355		loff_t *ppos)
 356{
 357	int ret;
 358
 359	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 360	if (ret == 0 && write)
 361		dirty_background_bytes = 0;
 362	return ret;
 363}
 364
 365int dirty_background_bytes_handler(struct ctl_table *table, int write,
 366		void __user *buffer, size_t *lenp,
 367		loff_t *ppos)
 368{
 369	int ret;
 370
 371	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 372	if (ret == 0 && write)
 373		dirty_background_ratio = 0;
 374	return ret;
 375}
 376
 377int dirty_ratio_handler(struct ctl_table *table, int write,
 378		void __user *buffer, size_t *lenp,
 379		loff_t *ppos)
 380{
 381	int old_ratio = vm_dirty_ratio;
 382	int ret;
 383
 384	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 385	if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
 386		writeback_set_ratelimit();
 387		vm_dirty_bytes = 0;
 388	}
 389	return ret;
 390}
 391
 
 392int dirty_bytes_handler(struct ctl_table *table, int write,
 393		void __user *buffer, size_t *lenp,
 394		loff_t *ppos)
 395{
 396	unsigned long old_bytes = vm_dirty_bytes;
 397	int ret;
 398
 399	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 400	if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
 401		writeback_set_ratelimit();
 402		vm_dirty_ratio = 0;
 403	}
 404	return ret;
 405}
 406
 407static unsigned long wp_next_time(unsigned long cur_time)
 408{
 409	cur_time += VM_COMPLETIONS_PERIOD_LEN;
 410	/* 0 has a special meaning... */
 411	if (!cur_time)
 412		return 1;
 413	return cur_time;
 414}
 415
 416/*
 417 * Increment the BDI's writeout completion count and the global writeout
 418 * completion count. Called from test_clear_page_writeback().
 419 */
 420static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
 421{
 422	__inc_bdi_stat(bdi, BDI_WRITTEN);
 423	__fprop_inc_percpu_max(&writeout_completions, &bdi->completions,
 424			       bdi->max_prop_frac);
 425	/* First event after period switching was turned off? */
 426	if (!unlikely(writeout_period_time)) {
 427		/*
 428		 * We can race with other __bdi_writeout_inc calls here but
 429		 * it does not cause any harm since the resulting time when
 430		 * timer will fire and what is in writeout_period_time will be
 431		 * roughly the same.
 432		 */
 433		writeout_period_time = wp_next_time(jiffies);
 434		mod_timer(&writeout_period_timer, writeout_period_time);
 435	}
 436}
 437
 438void bdi_writeout_inc(struct backing_dev_info *bdi)
 439{
 440	unsigned long flags;
 441
 442	local_irq_save(flags);
 443	__bdi_writeout_inc(bdi);
 444	local_irq_restore(flags);
 445}
 446EXPORT_SYMBOL_GPL(bdi_writeout_inc);
 447
 
 
 
 
 
 448/*
 449 * Obtain an accurate fraction of the BDI's portion.
 450 */
 451static void bdi_writeout_fraction(struct backing_dev_info *bdi,
 452		long *numerator, long *denominator)
 453{
 454	fprop_fraction_percpu(&writeout_completions, &bdi->completions,
 
 
 
 
 
 
 
 455				numerator, denominator);
 456}
 457
 458/*
 459 * On idle system, we can be called long after we scheduled because we use
 460 * deferred timers so count with missed periods.
 461 */
 462static void writeout_period(unsigned long t)
 
 
 
 
 
 
 
 
 
 
 
 
 463{
 464	int miss_periods = (jiffies - writeout_period_time) /
 465						 VM_COMPLETIONS_PERIOD_LEN;
 
 
 
 
 
 466
 467	if (fprop_new_period(&writeout_completions, miss_periods + 1)) {
 468		writeout_period_time = wp_next_time(writeout_period_time +
 469				miss_periods * VM_COMPLETIONS_PERIOD_LEN);
 470		mod_timer(&writeout_period_timer, writeout_period_time);
 471	} else {
 472		/*
 473		 * Aging has zeroed all fractions. Stop wasting CPU on period
 474		 * updates.
 475		 */
 476		writeout_period_time = 0;
 477	}
 478}
 479
 480/*
 481 * bdi_min_ratio keeps the sum of the minimum dirty shares of all
 482 * registered backing devices, which, for obvious reasons, can not
 483 * exceed 100%.
 484 */
 485static unsigned int bdi_min_ratio;
 486
 487int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
 488{
 489	int ret = 0;
 490
 491	spin_lock_bh(&bdi_lock);
 492	if (min_ratio > bdi->max_ratio) {
 493		ret = -EINVAL;
 494	} else {
 495		min_ratio -= bdi->min_ratio;
 496		if (bdi_min_ratio + min_ratio < 100) {
 497			bdi_min_ratio += min_ratio;
 498			bdi->min_ratio += min_ratio;
 499		} else {
 500			ret = -EINVAL;
 501		}
 502	}
 503	spin_unlock_bh(&bdi_lock);
 504
 505	return ret;
 506}
 507
 508int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
 509{
 510	int ret = 0;
 511
 512	if (max_ratio > 100)
 513		return -EINVAL;
 514
 515	spin_lock_bh(&bdi_lock);
 516	if (bdi->min_ratio > max_ratio) {
 517		ret = -EINVAL;
 518	} else {
 519		bdi->max_ratio = max_ratio;
 520		bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 100;
 521	}
 522	spin_unlock_bh(&bdi_lock);
 523
 524	return ret;
 525}
 526EXPORT_SYMBOL(bdi_set_max_ratio);
 527
 528static unsigned long dirty_freerun_ceiling(unsigned long thresh,
 529					   unsigned long bg_thresh)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 530{
 531	return (thresh + bg_thresh) / 2;
 
 
 
 
 
 
 
 532}
 533
 534static unsigned long hard_dirty_limit(unsigned long thresh)
 535{
 536	return max(thresh, global_dirty_limit);
 537}
 538
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 539/**
 540 * bdi_dirty_limit - @bdi's share of dirty throttling threshold
 541 * @bdi: the backing_dev_info to query
 542 * @dirty: global dirty limit in pages
 543 *
 544 * Returns @bdi's dirty limit in pages. The term "dirty" in the context of
 545 * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
 546 *
 547 * Note that balance_dirty_pages() will only seriously take it as a hard limit
 548 * when sleeping max_pause per page is not enough to keep the dirty pages under
 549 * control. For example, when the device is completely stalled due to some error
 550 * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
 551 * In the other normal situations, it acts more gently by throttling the tasks
 552 * more (rather than completely block them) when the bdi dirty pages go high.
 553 *
 554 * It allocates high/low dirty limits to fast/slow devices, in order to prevent
 555 * - starving fast devices
 556 * - piling up dirty pages (that will take long time to sync) on slow devices
 557 *
 558 * The bdi's share of dirty limit will be adapting to its throughput and
 559 * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
 560 */
 561unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
 562{
 563	u64 bdi_dirty;
 564	long numerator, denominator;
 565
 566	/*
 567	 * Calculate this BDI's share of the dirty ratio.
 568	 */
 569	bdi_writeout_fraction(bdi, &numerator, &denominator);
 570
 571	bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
 572	bdi_dirty *= numerator;
 573	do_div(bdi_dirty, denominator);
 574
 575	bdi_dirty += (dirty * bdi->min_ratio) / 100;
 576	if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
 577		bdi_dirty = dirty * bdi->max_ratio / 100;
 578
 579	return bdi_dirty;
 580}
 581
 582/*
 583 *                           setpoint - dirty 3
 584 *        f(dirty) := 1.0 + (----------------)
 585 *                           limit - setpoint
 586 *
 587 * it's a 3rd order polynomial that subjects to
 588 *
 589 * (1) f(freerun)  = 2.0 => rampup dirty_ratelimit reasonably fast
 590 * (2) f(setpoint) = 1.0 => the balance point
 591 * (3) f(limit)    = 0   => the hard limit
 592 * (4) df/dx      <= 0	 => negative feedback control
 593 * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
 594 *     => fast response on large errors; small oscillation near setpoint
 595 */
 596static long long pos_ratio_polynom(unsigned long setpoint,
 597					  unsigned long dirty,
 598					  unsigned long limit)
 599{
 600	long long pos_ratio;
 601	long x;
 602
 603	x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
 604		    limit - setpoint + 1);
 605	pos_ratio = x;
 606	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
 607	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
 608	pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
 609
 610	return clamp(pos_ratio, 0LL, 2LL << RATELIMIT_CALC_SHIFT);
 611}
 612
 613/*
 614 * Dirty position control.
 615 *
 616 * (o) global/bdi setpoints
 617 *
 618 * We want the dirty pages be balanced around the global/bdi setpoints.
 619 * When the number of dirty pages is higher/lower than the setpoint, the
 620 * dirty position control ratio (and hence task dirty ratelimit) will be
 621 * decreased/increased to bring the dirty pages back to the setpoint.
 622 *
 623 *     pos_ratio = 1 << RATELIMIT_CALC_SHIFT
 624 *
 625 *     if (dirty < setpoint) scale up   pos_ratio
 626 *     if (dirty > setpoint) scale down pos_ratio
 627 *
 628 *     if (bdi_dirty < bdi_setpoint) scale up   pos_ratio
 629 *     if (bdi_dirty > bdi_setpoint) scale down pos_ratio
 630 *
 631 *     task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT
 632 *
 633 * (o) global control line
 634 *
 635 *     ^ pos_ratio
 636 *     |
 637 *     |            |<===== global dirty control scope ======>|
 638 * 2.0 .............*
 639 *     |            .*
 640 *     |            . *
 641 *     |            .   *
 642 *     |            .     *
 643 *     |            .        *
 644 *     |            .            *
 645 * 1.0 ................................*
 646 *     |            .                  .     *
 647 *     |            .                  .          *
 648 *     |            .                  .              *
 649 *     |            .                  .                 *
 650 *     |            .                  .                    *
 651 *   0 +------------.------------------.----------------------*------------->
 652 *           freerun^          setpoint^                 limit^   dirty pages
 653 *
 654 * (o) bdi control line
 655 *
 656 *     ^ pos_ratio
 657 *     |
 658 *     |            *
 659 *     |              *
 660 *     |                *
 661 *     |                  *
 662 *     |                    * |<=========== span ============>|
 663 * 1.0 .......................*
 664 *     |                      . *
 665 *     |                      .   *
 666 *     |                      .     *
 667 *     |                      .       *
 668 *     |                      .         *
 669 *     |                      .           *
 670 *     |                      .             *
 671 *     |                      .               *
 672 *     |                      .                 *
 673 *     |                      .                   *
 674 *     |                      .                     *
 675 * 1/4 ...............................................* * * * * * * * * * * *
 676 *     |                      .                         .
 677 *     |                      .                           .
 678 *     |                      .                             .
 679 *   0 +----------------------.-------------------------------.------------->
 680 *                bdi_setpoint^                    x_intercept^
 681 *
 682 * The bdi control line won't drop below pos_ratio=1/4, so that bdi_dirty can
 683 * be smoothly throttled down to normal if it starts high in situations like
 684 * - start writing to a slow SD card and a fast disk at the same time. The SD
 685 *   card's bdi_dirty may rush to many times higher than bdi_setpoint.
 686 * - the bdi dirty thresh drops quickly due to change of JBOD workload
 687 */
 688static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
 689					unsigned long thresh,
 690					unsigned long bg_thresh,
 691					unsigned long dirty,
 692					unsigned long bdi_thresh,
 693					unsigned long bdi_dirty)
 694{
 695	unsigned long write_bw = bdi->avg_write_bandwidth;
 696	unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh);
 697	unsigned long limit = hard_dirty_limit(thresh);
 698	unsigned long x_intercept;
 699	unsigned long setpoint;		/* dirty pages' target balance point */
 700	unsigned long bdi_setpoint;
 701	unsigned long span;
 702	long long pos_ratio;		/* for scaling up/down the rate limit */
 703	long x;
 704
 705	if (unlikely(dirty >= limit))
 706		return 0;
 707
 708	/*
 709	 * global setpoint
 710	 *
 711	 * See comment for pos_ratio_polynom().
 712	 */
 713	setpoint = (freerun + limit) / 2;
 714	pos_ratio = pos_ratio_polynom(setpoint, dirty, limit);
 715
 716	/*
 717	 * The strictlimit feature is a tool preventing mistrusted filesystems
 718	 * from growing a large number of dirty pages before throttling. For
 719	 * such filesystems balance_dirty_pages always checks bdi counters
 720	 * against bdi limits. Even if global "nr_dirty" is under "freerun".
 721	 * This is especially important for fuse which sets bdi->max_ratio to
 722	 * 1% by default. Without strictlimit feature, fuse writeback may
 723	 * consume arbitrary amount of RAM because it is accounted in
 724	 * NR_WRITEBACK_TEMP which is not involved in calculating "nr_dirty".
 725	 *
 726	 * Here, in bdi_position_ratio(), we calculate pos_ratio based on
 727	 * two values: bdi_dirty and bdi_thresh. Let's consider an example:
 728	 * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global
 729	 * limits are set by default to 10% and 20% (background and throttle).
 730	 * Then bdi_thresh is 1% of 20% of 16GB. This amounts to ~8K pages.
 731	 * bdi_dirty_limit(bdi, bg_thresh) is about ~4K pages. bdi_setpoint is
 732	 * about ~6K pages (as the average of background and throttle bdi
 733	 * limits). The 3rd order polynomial will provide positive feedback if
 734	 * bdi_dirty is under bdi_setpoint and vice versa.
 735	 *
 736	 * Note, that we cannot use global counters in these calculations
 737	 * because we want to throttle process writing to a strictlimit BDI
 738	 * much earlier than global "freerun" is reached (~23MB vs. ~2.3GB
 739	 * in the example above).
 740	 */
 741	if (unlikely(bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
 742		long long bdi_pos_ratio;
 743		unsigned long bdi_bg_thresh;
 744
 745		if (bdi_dirty < 8)
 746			return min_t(long long, pos_ratio * 2,
 747				     2 << RATELIMIT_CALC_SHIFT);
 748
 749		if (bdi_dirty >= bdi_thresh)
 750			return 0;
 751
 752		bdi_bg_thresh = div_u64((u64)bdi_thresh * bg_thresh, thresh);
 753		bdi_setpoint = dirty_freerun_ceiling(bdi_thresh,
 754						     bdi_bg_thresh);
 755
 756		if (bdi_setpoint == 0 || bdi_setpoint == bdi_thresh)
 757			return 0;
 758
 759		bdi_pos_ratio = pos_ratio_polynom(bdi_setpoint, bdi_dirty,
 760						  bdi_thresh);
 761
 762		/*
 763		 * Typically, for strictlimit case, bdi_setpoint << setpoint
 764		 * and pos_ratio >> bdi_pos_ratio. In the other words global
 765		 * state ("dirty") is not limiting factor and we have to
 766		 * make decision based on bdi counters. But there is an
 767		 * important case when global pos_ratio should get precedence:
 768		 * global limits are exceeded (e.g. due to activities on other
 769		 * BDIs) while given strictlimit BDI is below limit.
 770		 *
 771		 * "pos_ratio * bdi_pos_ratio" would work for the case above,
 772		 * but it would look too non-natural for the case of all
 773		 * activity in the system coming from a single strictlimit BDI
 774		 * with bdi->max_ratio == 100%.
 775		 *
 776		 * Note that min() below somewhat changes the dynamics of the
 777		 * control system. Normally, pos_ratio value can be well over 3
 778		 * (when globally we are at freerun and bdi is well below bdi
 779		 * setpoint). Now the maximum pos_ratio in the same situation
 780		 * is 2. We might want to tweak this if we observe the control
 781		 * system is too slow to adapt.
 782		 */
 783		return min(pos_ratio, bdi_pos_ratio);
 784	}
 785
 786	/*
 787	 * We have computed basic pos_ratio above based on global situation. If
 788	 * the bdi is over/under its share of dirty pages, we want to scale
 789	 * pos_ratio further down/up. That is done by the following mechanism.
 790	 */
 791
 792	/*
 793	 * bdi setpoint
 794	 *
 795	 *        f(bdi_dirty) := 1.0 + k * (bdi_dirty - bdi_setpoint)
 796	 *
 797	 *                        x_intercept - bdi_dirty
 798	 *                     := --------------------------
 799	 *                        x_intercept - bdi_setpoint
 800	 *
 801	 * The main bdi control line is a linear function that subjects to
 802	 *
 803	 * (1) f(bdi_setpoint) = 1.0
 804	 * (2) k = - 1 / (8 * write_bw)  (in single bdi case)
 805	 *     or equally: x_intercept = bdi_setpoint + 8 * write_bw
 806	 *
 807	 * For single bdi case, the dirty pages are observed to fluctuate
 808	 * regularly within range
 809	 *        [bdi_setpoint - write_bw/2, bdi_setpoint + write_bw/2]
 810	 * for various filesystems, where (2) can yield in a reasonable 12.5%
 811	 * fluctuation range for pos_ratio.
 812	 *
 813	 * For JBOD case, bdi_thresh (not bdi_dirty!) could fluctuate up to its
 814	 * own size, so move the slope over accordingly and choose a slope that
 815	 * yields 100% pos_ratio fluctuation on suddenly doubled bdi_thresh.
 816	 */
 817	if (unlikely(bdi_thresh > thresh))
 818		bdi_thresh = thresh;
 819	/*
 820	 * It's very possible that bdi_thresh is close to 0 not because the
 821	 * device is slow, but that it has remained inactive for long time.
 822	 * Honour such devices a reasonable good (hopefully IO efficient)
 823	 * threshold, so that the occasional writes won't be blocked and active
 824	 * writes can rampup the threshold quickly.
 825	 */
 826	bdi_thresh = max(bdi_thresh, (limit - dirty) / 8);
 827	/*
 828	 * scale global setpoint to bdi's:
 829	 *	bdi_setpoint = setpoint * bdi_thresh / thresh
 830	 */
 831	x = div_u64((u64)bdi_thresh << 16, thresh + 1);
 832	bdi_setpoint = setpoint * (u64)x >> 16;
 833	/*
 834	 * Use span=(8*write_bw) in single bdi case as indicated by
 835	 * (thresh - bdi_thresh ~= 0) and transit to bdi_thresh in JBOD case.
 836	 *
 837	 *        bdi_thresh                    thresh - bdi_thresh
 838	 * span = ---------- * (8 * write_bw) + ------------------- * bdi_thresh
 839	 *          thresh                            thresh
 840	 */
 841	span = (thresh - bdi_thresh + 8 * write_bw) * (u64)x >> 16;
 842	x_intercept = bdi_setpoint + span;
 843
 844	if (bdi_dirty < x_intercept - span / 4) {
 845		pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty),
 846				    x_intercept - bdi_setpoint + 1);
 847	} else
 848		pos_ratio /= 4;
 849
 850	/*
 851	 * bdi reserve area, safeguard against dirty pool underrun and disk idle
 852	 * It may push the desired control point of global dirty pages higher
 853	 * than setpoint.
 854	 */
 855	x_intercept = bdi_thresh / 2;
 856	if (bdi_dirty < x_intercept) {
 857		if (bdi_dirty > x_intercept / 8)
 858			pos_ratio = div_u64(pos_ratio * x_intercept, bdi_dirty);
 859		else
 860			pos_ratio *= 8;
 861	}
 862
 863	return pos_ratio;
 864}
 865
 866static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
 867				       unsigned long elapsed,
 868				       unsigned long written)
 869{
 870	const unsigned long period = roundup_pow_of_two(3 * HZ);
 871	unsigned long avg = bdi->avg_write_bandwidth;
 872	unsigned long old = bdi->write_bandwidth;
 873	u64 bw;
 874
 875	/*
 876	 * bw = written * HZ / elapsed
 877	 *
 878	 *                   bw * elapsed + write_bandwidth * (period - elapsed)
 879	 * write_bandwidth = ---------------------------------------------------
 880	 *                                          period
 881	 */
 882	bw = written - bdi->written_stamp;
 883	bw *= HZ;
 884	if (unlikely(elapsed > period)) {
 885		do_div(bw, elapsed);
 886		avg = bw;
 887		goto out;
 888	}
 889	bw += (u64)bdi->write_bandwidth * (period - elapsed);
 890	bw >>= ilog2(period);
 891
 892	/*
 893	 * one more level of smoothing, for filtering out sudden spikes
 894	 */
 895	if (avg > old && old >= (unsigned long)bw)
 896		avg -= (avg - old) >> 3;
 897
 898	if (avg < old && old <= (unsigned long)bw)
 899		avg += (old - avg) >> 3;
 900
 901out:
 902	bdi->write_bandwidth = bw;
 903	bdi->avg_write_bandwidth = avg;
 904}
 905
 906/*
 907 * The global dirtyable memory and dirty threshold could be suddenly knocked
 908 * down by a large amount (eg. on the startup of KVM in a swapless system).
 909 * This may throw the system into deep dirty exceeded state and throttle
 910 * heavy/light dirtiers alike. To retain good responsiveness, maintain
 911 * global_dirty_limit for tracking slowly down to the knocked down dirty
 912 * threshold.
 913 */
 914static void update_dirty_limit(unsigned long thresh, unsigned long dirty)
 915{
 916	unsigned long limit = global_dirty_limit;
 917
 918	/*
 919	 * Follow up in one step.
 920	 */
 921	if (limit < thresh) {
 922		limit = thresh;
 923		goto update;
 924	}
 925
 926	/*
 927	 * Follow down slowly. Use the higher one as the target, because thresh
 928	 * may drop below dirty. This is exactly the reason to introduce
 929	 * global_dirty_limit which is guaranteed to lie above the dirty pages.
 930	 */
 931	thresh = max(thresh, dirty);
 932	if (limit > thresh) {
 933		limit -= (limit - thresh) >> 5;
 934		goto update;
 935	}
 936	return;
 937update:
 938	global_dirty_limit = limit;
 939}
 940
 941static void global_update_bandwidth(unsigned long thresh,
 942				    unsigned long dirty,
 943				    unsigned long now)
 944{
 945	static DEFINE_SPINLOCK(dirty_lock);
 946	static unsigned long update_time;
 947
 948	/*
 949	 * check locklessly first to optimize away locking for the most time
 950	 */
 951	if (time_before(now, update_time + BANDWIDTH_INTERVAL))
 952		return;
 953
 954	spin_lock(&dirty_lock);
 955	if (time_after_eq(now, update_time + BANDWIDTH_INTERVAL)) {
 956		update_dirty_limit(thresh, dirty);
 957		update_time = now;
 958	}
 959	spin_unlock(&dirty_lock);
 960}
 961
 962/*
 963 * Maintain bdi->dirty_ratelimit, the base dirty throttle rate.
 964 *
 965 * Normal bdi tasks will be curbed at or below it in long term.
 966 * Obviously it should be around (write_bw / N) when there are N dd tasks.
 967 */
 968static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi,
 969				       unsigned long thresh,
 970				       unsigned long bg_thresh,
 971				       unsigned long dirty,
 972				       unsigned long bdi_thresh,
 973				       unsigned long bdi_dirty,
 974				       unsigned long dirtied,
 975				       unsigned long elapsed)
 976{
 977	unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh);
 978	unsigned long limit = hard_dirty_limit(thresh);
 979	unsigned long setpoint = (freerun + limit) / 2;
 980	unsigned long write_bw = bdi->avg_write_bandwidth;
 981	unsigned long dirty_ratelimit = bdi->dirty_ratelimit;
 982	unsigned long dirty_rate;
 983	unsigned long task_ratelimit;
 984	unsigned long balanced_dirty_ratelimit;
 985	unsigned long pos_ratio;
 986	unsigned long step;
 987	unsigned long x;
 988
 989	/*
 990	 * The dirty rate will match the writeout rate in long term, except
 991	 * when dirty pages are truncated by userspace or re-dirtied by FS.
 992	 */
 993	dirty_rate = (dirtied - bdi->dirtied_stamp) * HZ / elapsed;
 994
 995	pos_ratio = bdi_position_ratio(bdi, thresh, bg_thresh, dirty,
 996				       bdi_thresh, bdi_dirty);
 997	/*
 998	 * task_ratelimit reflects each dd's dirty rate for the past 200ms.
 999	 */
1000	task_ratelimit = (u64)dirty_ratelimit *
1001					pos_ratio >> RATELIMIT_CALC_SHIFT;
1002	task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */
1003
1004	/*
1005	 * A linear estimation of the "balanced" throttle rate. The theory is,
1006	 * if there are N dd tasks, each throttled at task_ratelimit, the bdi's
1007	 * dirty_rate will be measured to be (N * task_ratelimit). So the below
1008	 * formula will yield the balanced rate limit (write_bw / N).
1009	 *
1010	 * Note that the expanded form is not a pure rate feedback:
1011	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate)		     (1)
1012	 * but also takes pos_ratio into account:
1013	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio  (2)
1014	 *
1015	 * (1) is not realistic because pos_ratio also takes part in balancing
1016	 * the dirty rate.  Consider the state
1017	 *	pos_ratio = 0.5						     (3)
1018	 *	rate = 2 * (write_bw / N)				     (4)
1019	 * If (1) is used, it will stuck in that state! Because each dd will
1020	 * be throttled at
1021	 *	task_ratelimit = pos_ratio * rate = (write_bw / N)	     (5)
1022	 * yielding
1023	 *	dirty_rate = N * task_ratelimit = write_bw		     (6)
1024	 * put (6) into (1) we get
1025	 *	rate_(i+1) = rate_(i)					     (7)
1026	 *
1027	 * So we end up using (2) to always keep
1028	 *	rate_(i+1) ~= (write_bw / N)				     (8)
1029	 * regardless of the value of pos_ratio. As long as (8) is satisfied,
1030	 * pos_ratio is able to drive itself to 1.0, which is not only where
1031	 * the dirty count meet the setpoint, but also where the slope of
1032	 * pos_ratio is most flat and hence task_ratelimit is least fluctuated.
1033	 */
1034	balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
1035					   dirty_rate | 1);
1036	/*
1037	 * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw
1038	 */
1039	if (unlikely(balanced_dirty_ratelimit > write_bw))
1040		balanced_dirty_ratelimit = write_bw;
1041
1042	/*
1043	 * We could safely do this and return immediately:
1044	 *
1045	 *	bdi->dirty_ratelimit = balanced_dirty_ratelimit;
1046	 *
1047	 * However to get a more stable dirty_ratelimit, the below elaborated
1048	 * code makes use of task_ratelimit to filter out singular points and
1049	 * limit the step size.
1050	 *
1051	 * The below code essentially only uses the relative value of
1052	 *
1053	 *	task_ratelimit - dirty_ratelimit
1054	 *	= (pos_ratio - 1) * dirty_ratelimit
1055	 *
1056	 * which reflects the direction and size of dirty position error.
1057	 */
1058
1059	/*
1060	 * dirty_ratelimit will follow balanced_dirty_ratelimit iff
1061	 * task_ratelimit is on the same side of dirty_ratelimit, too.
1062	 * For example, when
1063	 * - dirty_ratelimit > balanced_dirty_ratelimit
1064	 * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint)
1065	 * lowering dirty_ratelimit will help meet both the position and rate
1066	 * control targets. Otherwise, don't update dirty_ratelimit if it will
1067	 * only help meet the rate target. After all, what the users ultimately
1068	 * feel and care are stable dirty rate and small position error.
1069	 *
1070	 * |task_ratelimit - dirty_ratelimit| is used to limit the step size
1071	 * and filter out the singular points of balanced_dirty_ratelimit. Which
1072	 * keeps jumping around randomly and can even leap far away at times
1073	 * due to the small 200ms estimation period of dirty_rate (we want to
1074	 * keep that period small to reduce time lags).
1075	 */
1076	step = 0;
1077
1078	/*
1079	 * For strictlimit case, calculations above were based on bdi counters
1080	 * and limits (starting from pos_ratio = bdi_position_ratio() and up to
1081	 * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate).
1082	 * Hence, to calculate "step" properly, we have to use bdi_dirty as
1083	 * "dirty" and bdi_setpoint as "setpoint".
1084	 *
1085	 * We rampup dirty_ratelimit forcibly if bdi_dirty is low because
1086	 * it's possible that bdi_thresh is close to zero due to inactivity
1087	 * of backing device (see the implementation of bdi_dirty_limit()).
1088	 */
1089	if (unlikely(bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
1090		dirty = bdi_dirty;
1091		if (bdi_dirty < 8)
1092			setpoint = bdi_dirty + 1;
1093		else
1094			setpoint = (bdi_thresh +
1095				    bdi_dirty_limit(bdi, bg_thresh)) / 2;
1096	}
1097
1098	if (dirty < setpoint) {
1099		x = min(bdi->balanced_dirty_ratelimit,
1100			 min(balanced_dirty_ratelimit, task_ratelimit));
1101		if (dirty_ratelimit < x)
1102			step = x - dirty_ratelimit;
1103	} else {
1104		x = max(bdi->balanced_dirty_ratelimit,
1105			 max(balanced_dirty_ratelimit, task_ratelimit));
1106		if (dirty_ratelimit > x)
1107			step = dirty_ratelimit - x;
1108	}
1109
1110	/*
1111	 * Don't pursue 100% rate matching. It's impossible since the balanced
1112	 * rate itself is constantly fluctuating. So decrease the track speed
1113	 * when it gets close to the target. Helps eliminate pointless tremors.
1114	 */
1115	step >>= dirty_ratelimit / (2 * step + 1);
1116	/*
1117	 * Limit the tracking speed to avoid overshooting.
1118	 */
1119	step = (step + 7) / 8;
1120
1121	if (dirty_ratelimit < balanced_dirty_ratelimit)
1122		dirty_ratelimit += step;
1123	else
1124		dirty_ratelimit -= step;
1125
1126	bdi->dirty_ratelimit = max(dirty_ratelimit, 1UL);
1127	bdi->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
1128
1129	trace_bdi_dirty_ratelimit(bdi, dirty_rate, task_ratelimit);
1130}
1131
1132void __bdi_update_bandwidth(struct backing_dev_info *bdi,
1133			    unsigned long thresh,
1134			    unsigned long bg_thresh,
1135			    unsigned long dirty,
1136			    unsigned long bdi_thresh,
1137			    unsigned long bdi_dirty,
1138			    unsigned long start_time)
1139{
1140	unsigned long now = jiffies;
1141	unsigned long elapsed = now - bdi->bw_time_stamp;
1142	unsigned long dirtied;
1143	unsigned long written;
1144
1145	/*
1146	 * rate-limit, only update once every 200ms.
1147	 */
1148	if (elapsed < BANDWIDTH_INTERVAL)
1149		return;
1150
1151	dirtied = percpu_counter_read(&bdi->bdi_stat[BDI_DIRTIED]);
1152	written = percpu_counter_read(&bdi->bdi_stat[BDI_WRITTEN]);
1153
1154	/*
1155	 * Skip quiet periods when disk bandwidth is under-utilized.
1156	 * (at least 1s idle time between two flusher runs)
1157	 */
1158	if (elapsed > HZ && time_before(bdi->bw_time_stamp, start_time))
1159		goto snapshot;
1160
1161	if (thresh) {
1162		global_update_bandwidth(thresh, dirty, now);
1163		bdi_update_dirty_ratelimit(bdi, thresh, bg_thresh, dirty,
1164					   bdi_thresh, bdi_dirty,
1165					   dirtied, elapsed);
1166	}
1167	bdi_update_write_bandwidth(bdi, elapsed, written);
1168
1169snapshot:
1170	bdi->dirtied_stamp = dirtied;
1171	bdi->written_stamp = written;
1172	bdi->bw_time_stamp = now;
1173}
1174
1175static void bdi_update_bandwidth(struct backing_dev_info *bdi,
1176				 unsigned long thresh,
1177				 unsigned long bg_thresh,
1178				 unsigned long dirty,
1179				 unsigned long bdi_thresh,
1180				 unsigned long bdi_dirty,
1181				 unsigned long start_time)
1182{
1183	if (time_is_after_eq_jiffies(bdi->bw_time_stamp + BANDWIDTH_INTERVAL))
1184		return;
1185	spin_lock(&bdi->wb.list_lock);
1186	__bdi_update_bandwidth(bdi, thresh, bg_thresh, dirty,
1187			       bdi_thresh, bdi_dirty, start_time);
1188	spin_unlock(&bdi->wb.list_lock);
1189}
1190
1191/*
1192 * After a task dirtied this many pages, balance_dirty_pages_ratelimited()
1193 * will look to see if it needs to start dirty throttling.
1194 *
1195 * If dirty_poll_interval is too low, big NUMA machines will call the expensive
1196 * global_page_state() too often. So scale it near-sqrt to the safety margin
1197 * (the number of pages we may dirty without exceeding the dirty limits).
1198 */
1199static unsigned long dirty_poll_interval(unsigned long dirty,
1200					 unsigned long thresh)
1201{
1202	if (thresh > dirty)
1203		return 1UL << (ilog2(thresh - dirty) >> 1);
1204
1205	return 1;
1206}
1207
1208static unsigned long bdi_max_pause(struct backing_dev_info *bdi,
1209				   unsigned long bdi_dirty)
1210{
1211	unsigned long bw = bdi->avg_write_bandwidth;
1212	unsigned long t;
1213
1214	/*
1215	 * Limit pause time for small memory systems. If sleeping for too long
1216	 * time, a small pool of dirty/writeback pages may go empty and disk go
1217	 * idle.
1218	 *
1219	 * 8 serves as the safety ratio.
1220	 */
1221	t = bdi_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
1222	t++;
1223
1224	return min_t(unsigned long, t, MAX_PAUSE);
1225}
1226
1227static long bdi_min_pause(struct backing_dev_info *bdi,
1228			  long max_pause,
1229			  unsigned long task_ratelimit,
1230			  unsigned long dirty_ratelimit,
1231			  int *nr_dirtied_pause)
1232{
1233	long hi = ilog2(bdi->avg_write_bandwidth);
1234	long lo = ilog2(bdi->dirty_ratelimit);
1235	long t;		/* target pause */
1236	long pause;	/* estimated next pause */
1237	int pages;	/* target nr_dirtied_pause */
1238
1239	/* target for 10ms pause on 1-dd case */
1240	t = max(1, HZ / 100);
1241
1242	/*
1243	 * Scale up pause time for concurrent dirtiers in order to reduce CPU
1244	 * overheads.
1245	 *
1246	 * (N * 10ms) on 2^N concurrent tasks.
1247	 */
1248	if (hi > lo)
1249		t += (hi - lo) * (10 * HZ) / 1024;
1250
1251	/*
1252	 * This is a bit convoluted. We try to base the next nr_dirtied_pause
1253	 * on the much more stable dirty_ratelimit. However the next pause time
1254	 * will be computed based on task_ratelimit and the two rate limits may
1255	 * depart considerably at some time. Especially if task_ratelimit goes
1256	 * below dirty_ratelimit/2 and the target pause is max_pause, the next
1257	 * pause time will be max_pause*2 _trimmed down_ to max_pause.  As a
1258	 * result task_ratelimit won't be executed faithfully, which could
1259	 * eventually bring down dirty_ratelimit.
1260	 *
1261	 * We apply two rules to fix it up:
1262	 * 1) try to estimate the next pause time and if necessary, use a lower
1263	 *    nr_dirtied_pause so as not to exceed max_pause. When this happens,
1264	 *    nr_dirtied_pause will be "dancing" with task_ratelimit.
1265	 * 2) limit the target pause time to max_pause/2, so that the normal
1266	 *    small fluctuations of task_ratelimit won't trigger rule (1) and
1267	 *    nr_dirtied_pause will remain as stable as dirty_ratelimit.
1268	 */
1269	t = min(t, 1 + max_pause / 2);
1270	pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1271
1272	/*
1273	 * Tiny nr_dirtied_pause is found to hurt I/O performance in the test
1274	 * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}.
1275	 * When the 16 consecutive reads are often interrupted by some dirty
1276	 * throttling pause during the async writes, cfq will go into idles
1277	 * (deadline is fine). So push nr_dirtied_pause as high as possible
1278	 * until reaches DIRTY_POLL_THRESH=32 pages.
1279	 */
1280	if (pages < DIRTY_POLL_THRESH) {
1281		t = max_pause;
1282		pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1283		if (pages > DIRTY_POLL_THRESH) {
1284			pages = DIRTY_POLL_THRESH;
1285			t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit;
1286		}
1287	}
1288
1289	pause = HZ * pages / (task_ratelimit + 1);
1290	if (pause > max_pause) {
1291		t = max_pause;
1292		pages = task_ratelimit * t / roundup_pow_of_two(HZ);
1293	}
1294
1295	*nr_dirtied_pause = pages;
1296	/*
1297	 * The minimal pause time will normally be half the target pause time.
1298	 */
1299	return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
1300}
1301
1302static inline void bdi_dirty_limits(struct backing_dev_info *bdi,
1303				    unsigned long dirty_thresh,
1304				    unsigned long background_thresh,
1305				    unsigned long *bdi_dirty,
1306				    unsigned long *bdi_thresh,
1307				    unsigned long *bdi_bg_thresh)
1308{
1309	unsigned long bdi_reclaimable;
1310
1311	/*
1312	 * bdi_thresh is not treated as some limiting factor as
1313	 * dirty_thresh, due to reasons
1314	 * - in JBOD setup, bdi_thresh can fluctuate a lot
1315	 * - in a system with HDD and USB key, the USB key may somehow
1316	 *   go into state (bdi_dirty >> bdi_thresh) either because
1317	 *   bdi_dirty starts high, or because bdi_thresh drops low.
1318	 *   In this case we don't want to hard throttle the USB key
1319	 *   dirtiers for 100 seconds until bdi_dirty drops under
1320	 *   bdi_thresh. Instead the auxiliary bdi control line in
1321	 *   bdi_position_ratio() will let the dirtier task progress
1322	 *   at some rate <= (write_bw / 2) for bringing down bdi_dirty.
1323	 */
1324	*bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
1325
1326	if (bdi_bg_thresh)
1327		*bdi_bg_thresh = div_u64((u64)*bdi_thresh *
1328					 background_thresh,
1329					 dirty_thresh);
1330
1331	/*
1332	 * In order to avoid the stacked BDI deadlock we need
1333	 * to ensure we accurately count the 'dirty' pages when
1334	 * the threshold is low.
1335	 *
1336	 * Otherwise it would be possible to get thresh+n pages
1337	 * reported dirty, even though there are thresh-m pages
1338	 * actually dirty; with m+n sitting in the percpu
1339	 * deltas.
1340	 */
1341	if (*bdi_thresh < 2 * bdi_stat_error(bdi)) {
1342		bdi_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
1343		*bdi_dirty = bdi_reclaimable +
1344			bdi_stat_sum(bdi, BDI_WRITEBACK);
1345	} else {
1346		bdi_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
1347		*bdi_dirty = bdi_reclaimable +
1348			bdi_stat(bdi, BDI_WRITEBACK);
1349	}
1350}
1351
1352/*
1353 * balance_dirty_pages() must be called by processes which are generating dirty
1354 * data.  It looks at the number of dirty pages in the machine and will force
1355 * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2.
1356 * If we're over `background_thresh' then the writeback threads are woken to
1357 * perform some writeout.
1358 */
1359static void balance_dirty_pages(struct address_space *mapping,
1360				unsigned long pages_dirtied)
1361{
1362	unsigned long nr_reclaimable;	/* = file_dirty + unstable_nfs */
1363	unsigned long nr_dirty;  /* = file_dirty + writeback + unstable_nfs */
 
1364	unsigned long background_thresh;
1365	unsigned long dirty_thresh;
1366	long period;
1367	long pause;
1368	long max_pause;
1369	long min_pause;
1370	int nr_dirtied_pause;
1371	bool dirty_exceeded = false;
1372	unsigned long task_ratelimit;
1373	unsigned long dirty_ratelimit;
1374	unsigned long pos_ratio;
1375	struct backing_dev_info *bdi = mapping->backing_dev_info;
1376	bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT;
1377	unsigned long start_time = jiffies;
1378
1379	for (;;) {
1380		unsigned long now = jiffies;
1381		unsigned long uninitialized_var(bdi_thresh);
1382		unsigned long thresh;
1383		unsigned long uninitialized_var(bdi_dirty);
1384		unsigned long dirty;
1385		unsigned long bg_thresh;
1386
1387		/*
1388		 * Unstable writes are a feature of certain networked
1389		 * filesystems (i.e. NFS) in which data may have been
1390		 * written to the server's write cache, but has not yet
1391		 * been flushed to permanent storage.
1392		 */
1393		nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
1394					global_page_state(NR_UNSTABLE_NFS);
1395		nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK);
1396
1397		global_dirty_limits(&background_thresh, &dirty_thresh);
1398
1399		if (unlikely(strictlimit)) {
1400			bdi_dirty_limits(bdi, dirty_thresh, background_thresh,
1401					 &bdi_dirty, &bdi_thresh, &bg_thresh);
 
 
 
 
 
 
 
 
1402
1403			dirty = bdi_dirty;
1404			thresh = bdi_thresh;
 
 
 
 
 
 
 
 
 
 
 
 
1405		} else {
1406			dirty = nr_dirty;
1407			thresh = dirty_thresh;
1408			bg_thresh = background_thresh;
1409		}
1410
1411		/*
1412		 * Throttle it only when the background writeback cannot
1413		 * catch-up. This avoids (excessively) small writeouts
1414		 * when the bdi limits are ramping up in case of !strictlimit.
1415		 *
1416		 * In strictlimit case make decision based on the bdi counters
1417		 * and limits. Small writeouts when the bdi limits are ramping
1418		 * up are the price we consciously pay for strictlimit-ing.
1419		 */
1420		if (dirty <= dirty_freerun_ceiling(thresh, bg_thresh)) {
1421			current->dirty_paused_when = now;
1422			current->nr_dirtied = 0;
1423			current->nr_dirtied_pause =
1424				dirty_poll_interval(dirty, thresh);
 
1425			break;
1426		}
1427
1428		if (unlikely(!writeback_in_progress(bdi)))
1429			bdi_start_background_writeback(bdi);
1430
1431		if (!strictlimit)
1432			bdi_dirty_limits(bdi, dirty_thresh, background_thresh,
1433					 &bdi_dirty, &bdi_thresh, NULL);
1434
1435		dirty_exceeded = (bdi_dirty > bdi_thresh) &&
1436				 ((nr_dirty > dirty_thresh) || strictlimit);
1437		if (dirty_exceeded && !bdi->dirty_exceeded)
1438			bdi->dirty_exceeded = 1;
1439
1440		bdi_update_bandwidth(bdi, dirty_thresh, background_thresh,
1441				     nr_dirty, bdi_thresh, bdi_dirty,
1442				     start_time);
1443
1444		dirty_ratelimit = bdi->dirty_ratelimit;
1445		pos_ratio = bdi_position_ratio(bdi, dirty_thresh,
1446					       background_thresh, nr_dirty,
1447					       bdi_thresh, bdi_dirty);
1448		task_ratelimit = ((u64)dirty_ratelimit * pos_ratio) >>
1449							RATELIMIT_CALC_SHIFT;
1450		max_pause = bdi_max_pause(bdi, bdi_dirty);
1451		min_pause = bdi_min_pause(bdi, max_pause,
1452					  task_ratelimit, dirty_ratelimit,
1453					  &nr_dirtied_pause);
1454
1455		if (unlikely(task_ratelimit == 0)) {
1456			period = max_pause;
1457			pause = max_pause;
1458			goto pause;
1459		}
1460		period = HZ * pages_dirtied / task_ratelimit;
1461		pause = period;
1462		if (current->dirty_paused_when)
1463			pause -= now - current->dirty_paused_when;
1464		/*
1465		 * For less than 1s think time (ext3/4 may block the dirtier
1466		 * for up to 800ms from time to time on 1-HDD; so does xfs,
1467		 * however at much less frequency), try to compensate it in
1468		 * future periods by updating the virtual time; otherwise just
1469		 * do a reset, as it may be a light dirtier.
1470		 */
1471		if (pause < min_pause) {
1472			trace_balance_dirty_pages(bdi,
1473						  dirty_thresh,
1474						  background_thresh,
1475						  nr_dirty,
1476						  bdi_thresh,
1477						  bdi_dirty,
1478						  dirty_ratelimit,
1479						  task_ratelimit,
1480						  pages_dirtied,
1481						  period,
1482						  min(pause, 0L),
1483						  start_time);
1484			if (pause < -HZ) {
1485				current->dirty_paused_when = now;
1486				current->nr_dirtied = 0;
1487			} else if (period) {
1488				current->dirty_paused_when += period;
1489				current->nr_dirtied = 0;
1490			} else if (current->nr_dirtied_pause <= pages_dirtied)
1491				current->nr_dirtied_pause += pages_dirtied;
1492			break;
1493		}
1494		if (unlikely(pause > max_pause)) {
1495			/* for occasional dropped task_ratelimit */
1496			now += min(pause - max_pause, max_pause);
1497			pause = max_pause;
1498		}
1499
1500pause:
1501		trace_balance_dirty_pages(bdi,
1502					  dirty_thresh,
1503					  background_thresh,
1504					  nr_dirty,
1505					  bdi_thresh,
1506					  bdi_dirty,
1507					  dirty_ratelimit,
1508					  task_ratelimit,
1509					  pages_dirtied,
1510					  period,
1511					  pause,
1512					  start_time);
1513		__set_current_state(TASK_KILLABLE);
1514		io_schedule_timeout(pause);
 
1515
1516		current->dirty_paused_when = now + pause;
1517		current->nr_dirtied = 0;
1518		current->nr_dirtied_pause = nr_dirtied_pause;
1519
1520		/*
1521		 * This is typically equal to (nr_dirty < dirty_thresh) and can
1522		 * also keep "1000+ dd on a slow USB stick" under control.
 
 
1523		 */
1524		if (task_ratelimit)
 
 
1525			break;
1526
1527		/*
1528		 * In the case of an unresponding NFS server and the NFS dirty
1529		 * pages exceeds dirty_thresh, give the other good bdi's a pipe
1530		 * to go through, so that tasks on them still remain responsive.
1531		 *
1532		 * In theory 1 page is enough to keep the comsumer-producer
1533		 * pipe going: the flusher cleans 1 page => the task dirties 1
1534		 * more page. However bdi_dirty has accounting errors.  So use
1535		 * the larger and more IO friendly bdi_stat_error.
1536		 */
1537		if (bdi_dirty <= bdi_stat_error(bdi))
1538			break;
1539
1540		if (fatal_signal_pending(current))
1541			break;
1542	}
1543
1544	if (!dirty_exceeded && bdi->dirty_exceeded)
 
1545		bdi->dirty_exceeded = 0;
1546
1547	if (writeback_in_progress(bdi))
1548		return;
1549
1550	/*
1551	 * In laptop mode, we wait until hitting the higher threshold before
1552	 * starting background writeout, and then write out all the way down
1553	 * to the lower threshold.  So slow writers cause minimal disk activity.
1554	 *
1555	 * In normal mode, we start background writeout at the lower
1556	 * background_thresh, to keep the amount of dirty memory low.
1557	 */
1558	if (laptop_mode)
1559		return;
1560
1561	if (nr_reclaimable > background_thresh)
1562		bdi_start_background_writeback(bdi);
1563}
1564
1565void set_page_dirty_balance(struct page *page)
1566{
1567	if (set_page_dirty(page)) {
1568		struct address_space *mapping = page_mapping(page);
1569
1570		if (mapping)
1571			balance_dirty_pages_ratelimited(mapping);
1572	}
1573}
1574
1575static DEFINE_PER_CPU(int, bdp_ratelimits);
1576
1577/*
1578 * Normal tasks are throttled by
1579 *	loop {
1580 *		dirty tsk->nr_dirtied_pause pages;
1581 *		take a snap in balance_dirty_pages();
1582 *	}
1583 * However there is a worst case. If every task exit immediately when dirtied
1584 * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
1585 * called to throttle the page dirties. The solution is to save the not yet
1586 * throttled page dirties in dirty_throttle_leaks on task exit and charge them
1587 * randomly into the running tasks. This works well for the above worst case,
1588 * as the new task will pick up and accumulate the old task's leaked dirty
1589 * count and eventually get throttled.
1590 */
1591DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
1592
1593/**
1594 * balance_dirty_pages_ratelimited - balance dirty memory state
1595 * @mapping: address_space which was dirtied
 
1596 *
1597 * Processes which are dirtying memory should call in here once for each page
1598 * which was newly dirtied.  The function will periodically check the system's
1599 * dirty state and will initiate writeback if needed.
1600 *
1601 * On really big machines, get_writeback_state is expensive, so try to avoid
1602 * calling it too often (ratelimiting).  But once we're over the dirty memory
1603 * limit we decrease the ratelimiting by a lot, to prevent individual processes
1604 * from overshooting the limit by (ratelimit_pages) each.
1605 */
1606void balance_dirty_pages_ratelimited(struct address_space *mapping)
 
1607{
1608	struct backing_dev_info *bdi = mapping->backing_dev_info;
1609	int ratelimit;
1610	int *p;
1611
1612	if (!bdi_cap_account_dirty(bdi))
1613		return;
1614
1615	ratelimit = current->nr_dirtied_pause;
1616	if (bdi->dirty_exceeded)
1617		ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));
1618
1619	preempt_disable();
1620	/*
1621	 * This prevents one CPU to accumulate too many dirtied pages without
1622	 * calling into balance_dirty_pages(), which can happen when there are
1623	 * 1000+ tasks, all of them start dirtying pages at exactly the same
1624	 * time, hence all honoured too large initial task->nr_dirtied_pause.
1625	 */
 
1626	p =  &__get_cpu_var(bdp_ratelimits);
1627	if (unlikely(current->nr_dirtied >= ratelimit))
 
 
1628		*p = 0;
1629	else if (unlikely(*p >= ratelimit_pages)) {
1630		*p = 0;
1631		ratelimit = 0;
1632	}
1633	/*
1634	 * Pick up the dirtied pages by the exited tasks. This avoids lots of
1635	 * short-lived tasks (eg. gcc invocations in a kernel build) escaping
1636	 * the dirty throttling and livelock other long-run dirtiers.
1637	 */
1638	p = &__get_cpu_var(dirty_throttle_leaks);
1639	if (*p > 0 && current->nr_dirtied < ratelimit) {
1640		unsigned long nr_pages_dirtied;
1641		nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
1642		*p -= nr_pages_dirtied;
1643		current->nr_dirtied += nr_pages_dirtied;
1644	}
1645	preempt_enable();
1646
1647	if (unlikely(current->nr_dirtied >= ratelimit))
1648		balance_dirty_pages(mapping, current->nr_dirtied);
1649}
1650EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
1651
1652void throttle_vm_writeout(gfp_t gfp_mask)
1653{
1654	unsigned long background_thresh;
1655	unsigned long dirty_thresh;
1656
1657        for ( ; ; ) {
1658		global_dirty_limits(&background_thresh, &dirty_thresh);
1659		dirty_thresh = hard_dirty_limit(dirty_thresh);
1660
1661                /*
1662                 * Boost the allowable dirty threshold a bit for page
1663                 * allocators so they don't get DoS'ed by heavy writers
1664                 */
1665                dirty_thresh += dirty_thresh / 10;      /* wheeee... */
1666
1667                if (global_page_state(NR_UNSTABLE_NFS) +
1668			global_page_state(NR_WRITEBACK) <= dirty_thresh)
1669                        	break;
1670                congestion_wait(BLK_RW_ASYNC, HZ/10);
1671
1672		/*
1673		 * The caller might hold locks which can prevent IO completion
1674		 * or progress in the filesystem.  So we cannot just sit here
1675		 * waiting for IO to complete.
1676		 */
1677		if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
1678			break;
1679        }
1680}
1681
1682/*
1683 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
1684 */
1685int dirty_writeback_centisecs_handler(ctl_table *table, int write,
1686	void __user *buffer, size_t *length, loff_t *ppos)
1687{
1688	proc_dointvec(table, write, buffer, length, ppos);
 
1689	return 0;
1690}
1691
1692#ifdef CONFIG_BLOCK
1693void laptop_mode_timer_fn(unsigned long data)
1694{
1695	struct request_queue *q = (struct request_queue *)data;
1696	int nr_pages = global_page_state(NR_FILE_DIRTY) +
1697		global_page_state(NR_UNSTABLE_NFS);
1698
1699	/*
1700	 * We want to write everything out, not just down to the dirty
1701	 * threshold
1702	 */
1703	if (bdi_has_dirty_io(&q->backing_dev_info))
1704		bdi_start_writeback(&q->backing_dev_info, nr_pages,
1705					WB_REASON_LAPTOP_TIMER);
1706}
1707
1708/*
1709 * We've spun up the disk and we're in laptop mode: schedule writeback
1710 * of all dirty data a few seconds from now.  If the flush is already scheduled
1711 * then push it back - the user is still using the disk.
1712 */
1713void laptop_io_completion(struct backing_dev_info *info)
1714{
1715	mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
1716}
1717
1718/*
1719 * We're in laptop mode and we've just synced. The sync's writes will have
1720 * caused another writeback to be scheduled by laptop_io_completion.
1721 * Nothing needs to be written back anymore, so we unschedule the writeback.
1722 */
1723void laptop_sync_completion(void)
1724{
1725	struct backing_dev_info *bdi;
1726
1727	rcu_read_lock();
1728
1729	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
1730		del_timer(&bdi->laptop_mode_wb_timer);
1731
1732	rcu_read_unlock();
1733}
1734#endif
1735
1736/*
1737 * If ratelimit_pages is too high then we can get into dirty-data overload
1738 * if a large number of processes all perform writes at the same time.
1739 * If it is too low then SMP machines will call the (expensive)
1740 * get_writeback_state too often.
1741 *
1742 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
1743 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
1744 * thresholds.
 
 
 
 
 
 
1745 */
1746
1747void writeback_set_ratelimit(void)
1748{
1749	unsigned long background_thresh;
1750	unsigned long dirty_thresh;
1751	global_dirty_limits(&background_thresh, &dirty_thresh);
1752	global_dirty_limit = dirty_thresh;
1753	ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
1754	if (ratelimit_pages < 16)
1755		ratelimit_pages = 16;
 
 
1756}
1757
1758static int
1759ratelimit_handler(struct notifier_block *self, unsigned long action,
1760		  void *hcpu)
1761{
1762
1763	switch (action & ~CPU_TASKS_FROZEN) {
1764	case CPU_ONLINE:
1765	case CPU_DEAD:
1766		writeback_set_ratelimit();
1767		return NOTIFY_OK;
1768	default:
1769		return NOTIFY_DONE;
1770	}
1771}
1772
1773static struct notifier_block ratelimit_nb = {
1774	.notifier_call	= ratelimit_handler,
1775	.next		= NULL,
1776};
1777
1778/*
1779 * Called early on to tune the page writeback dirty limits.
1780 *
1781 * We used to scale dirty pages according to how total memory
1782 * related to pages that could be allocated for buffers (by
1783 * comparing nr_free_buffer_pages() to vm_total_pages.
1784 *
1785 * However, that was when we used "dirty_ratio" to scale with
1786 * all memory, and we don't do that any more. "dirty_ratio"
1787 * is now applied to total non-HIGHPAGE memory (by subtracting
1788 * totalhigh_pages from vm_total_pages), and as such we can't
1789 * get into the old insane situation any more where we had
1790 * large amounts of dirty pages compared to a small amount of
1791 * non-HIGHMEM memory.
1792 *
1793 * But we might still want to scale the dirty_ratio by how
1794 * much memory the box has..
1795 */
1796void __init page_writeback_init(void)
1797{
 
 
1798	writeback_set_ratelimit();
1799	register_cpu_notifier(&ratelimit_nb);
1800
1801	fprop_global_init(&writeout_completions);
 
 
1802}
1803
1804/**
1805 * tag_pages_for_writeback - tag pages to be written by write_cache_pages
1806 * @mapping: address space structure to write
1807 * @start: starting page index
1808 * @end: ending page index (inclusive)
1809 *
1810 * This function scans the page range from @start to @end (inclusive) and tags
1811 * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is
1812 * that write_cache_pages (or whoever calls this function) will then use
1813 * TOWRITE tag to identify pages eligible for writeback.  This mechanism is
1814 * used to avoid livelocking of writeback by a process steadily creating new
1815 * dirty pages in the file (thus it is important for this function to be quick
1816 * so that it can tag pages faster than a dirtying process can create them).
1817 */
1818/*
1819 * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency.
1820 */
1821void tag_pages_for_writeback(struct address_space *mapping,
1822			     pgoff_t start, pgoff_t end)
1823{
1824#define WRITEBACK_TAG_BATCH 4096
1825	unsigned long tagged;
1826
1827	do {
1828		spin_lock_irq(&mapping->tree_lock);
1829		tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree,
1830				&start, end, WRITEBACK_TAG_BATCH,
1831				PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE);
1832		spin_unlock_irq(&mapping->tree_lock);
1833		WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);
1834		cond_resched();
1835		/* We check 'start' to handle wrapping when end == ~0UL */
1836	} while (tagged >= WRITEBACK_TAG_BATCH && start);
1837}
1838EXPORT_SYMBOL(tag_pages_for_writeback);
1839
1840/**
1841 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
1842 * @mapping: address space structure to write
1843 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1844 * @writepage: function called for each page
1845 * @data: data passed to writepage function
1846 *
1847 * If a page is already under I/O, write_cache_pages() skips it, even
1848 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
1849 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
1850 * and msync() need to guarantee that all the data which was dirty at the time
1851 * the call was made get new I/O started against them.  If wbc->sync_mode is
1852 * WB_SYNC_ALL then we were called for data integrity and we must wait for
1853 * existing IO to complete.
1854 *
1855 * To avoid livelocks (when other process dirties new pages), we first tag
1856 * pages which should be written back with TOWRITE tag and only then start
1857 * writing them. For data-integrity sync we have to be careful so that we do
1858 * not miss some pages (e.g., because some other process has cleared TOWRITE
1859 * tag we set). The rule we follow is that TOWRITE tag can be cleared only
1860 * by the process clearing the DIRTY tag (and submitting the page for IO).
1861 */
1862int write_cache_pages(struct address_space *mapping,
1863		      struct writeback_control *wbc, writepage_t writepage,
1864		      void *data)
1865{
1866	int ret = 0;
1867	int done = 0;
1868	struct pagevec pvec;
1869	int nr_pages;
1870	pgoff_t uninitialized_var(writeback_index);
1871	pgoff_t index;
1872	pgoff_t end;		/* Inclusive */
1873	pgoff_t done_index;
1874	int cycled;
1875	int range_whole = 0;
1876	int tag;
1877
1878	pagevec_init(&pvec, 0);
1879	if (wbc->range_cyclic) {
1880		writeback_index = mapping->writeback_index; /* prev offset */
1881		index = writeback_index;
1882		if (index == 0)
1883			cycled = 1;
1884		else
1885			cycled = 0;
1886		end = -1;
1887	} else {
1888		index = wbc->range_start >> PAGE_CACHE_SHIFT;
1889		end = wbc->range_end >> PAGE_CACHE_SHIFT;
1890		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1891			range_whole = 1;
1892		cycled = 1; /* ignore range_cyclic tests */
1893	}
1894	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1895		tag = PAGECACHE_TAG_TOWRITE;
1896	else
1897		tag = PAGECACHE_TAG_DIRTY;
1898retry:
1899	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1900		tag_pages_for_writeback(mapping, index, end);
1901	done_index = index;
1902	while (!done && (index <= end)) {
1903		int i;
1904
1905		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
1906			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1907		if (nr_pages == 0)
1908			break;
1909
1910		for (i = 0; i < nr_pages; i++) {
1911			struct page *page = pvec.pages[i];
1912
1913			/*
1914			 * At this point, the page may be truncated or
1915			 * invalidated (changing page->mapping to NULL), or
1916			 * even swizzled back from swapper_space to tmpfs file
1917			 * mapping. However, page->index will not change
1918			 * because we have a reference on the page.
1919			 */
1920			if (page->index > end) {
1921				/*
1922				 * can't be range_cyclic (1st pass) because
1923				 * end == -1 in that case.
1924				 */
1925				done = 1;
1926				break;
1927			}
1928
1929			done_index = page->index;
1930
1931			lock_page(page);
1932
1933			/*
1934			 * Page truncated or invalidated. We can freely skip it
1935			 * then, even for data integrity operations: the page
1936			 * has disappeared concurrently, so there could be no
1937			 * real expectation of this data interity operation
1938			 * even if there is now a new, dirty page at the same
1939			 * pagecache address.
1940			 */
1941			if (unlikely(page->mapping != mapping)) {
1942continue_unlock:
1943				unlock_page(page);
1944				continue;
1945			}
1946
1947			if (!PageDirty(page)) {
1948				/* someone wrote it for us */
1949				goto continue_unlock;
1950			}
1951
1952			if (PageWriteback(page)) {
1953				if (wbc->sync_mode != WB_SYNC_NONE)
1954					wait_on_page_writeback(page);
1955				else
1956					goto continue_unlock;
1957			}
1958
1959			BUG_ON(PageWriteback(page));
1960			if (!clear_page_dirty_for_io(page))
1961				goto continue_unlock;
1962
1963			trace_wbc_writepage(wbc, mapping->backing_dev_info);
1964			ret = (*writepage)(page, wbc, data);
1965			if (unlikely(ret)) {
1966				if (ret == AOP_WRITEPAGE_ACTIVATE) {
1967					unlock_page(page);
1968					ret = 0;
1969				} else {
1970					/*
1971					 * done_index is set past this page,
1972					 * so media errors will not choke
1973					 * background writeout for the entire
1974					 * file. This has consequences for
1975					 * range_cyclic semantics (ie. it may
1976					 * not be suitable for data integrity
1977					 * writeout).
1978					 */
1979					done_index = page->index + 1;
1980					done = 1;
1981					break;
1982				}
1983			}
1984
1985			/*
1986			 * We stop writing back only if we are not doing
1987			 * integrity sync. In case of integrity sync we have to
1988			 * keep going until we have written all the pages
1989			 * we tagged for writeback prior to entering this loop.
1990			 */
1991			if (--wbc->nr_to_write <= 0 &&
1992			    wbc->sync_mode == WB_SYNC_NONE) {
1993				done = 1;
1994				break;
1995			}
1996		}
1997		pagevec_release(&pvec);
1998		cond_resched();
1999	}
2000	if (!cycled && !done) {
2001		/*
2002		 * range_cyclic:
2003		 * We hit the last page and there is more work to be done: wrap
2004		 * back to the start of the file
2005		 */
2006		cycled = 1;
2007		index = 0;
2008		end = writeback_index - 1;
2009		goto retry;
2010	}
2011	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2012		mapping->writeback_index = done_index;
2013
2014	return ret;
2015}
2016EXPORT_SYMBOL(write_cache_pages);
2017
2018/*
2019 * Function used by generic_writepages to call the real writepage
2020 * function and set the mapping flags on error
2021 */
2022static int __writepage(struct page *page, struct writeback_control *wbc,
2023		       void *data)
2024{
2025	struct address_space *mapping = data;
2026	int ret = mapping->a_ops->writepage(page, wbc);
2027	mapping_set_error(mapping, ret);
2028	return ret;
2029}
2030
2031/**
2032 * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
2033 * @mapping: address space structure to write
2034 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2035 *
2036 * This is a library function, which implements the writepages()
2037 * address_space_operation.
2038 */
2039int generic_writepages(struct address_space *mapping,
2040		       struct writeback_control *wbc)
2041{
2042	struct blk_plug plug;
2043	int ret;
2044
2045	/* deal with chardevs and other special file */
2046	if (!mapping->a_ops->writepage)
2047		return 0;
2048
2049	blk_start_plug(&plug);
2050	ret = write_cache_pages(mapping, wbc, __writepage, mapping);
2051	blk_finish_plug(&plug);
2052	return ret;
2053}
2054
2055EXPORT_SYMBOL(generic_writepages);
2056
2057int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
2058{
2059	int ret;
2060
2061	if (wbc->nr_to_write <= 0)
2062		return 0;
2063	if (mapping->a_ops->writepages)
2064		ret = mapping->a_ops->writepages(mapping, wbc);
2065	else
2066		ret = generic_writepages(mapping, wbc);
2067	return ret;
2068}
2069
2070/**
2071 * write_one_page - write out a single page and optionally wait on I/O
2072 * @page: the page to write
2073 * @wait: if true, wait on writeout
2074 *
2075 * The page must be locked by the caller and will be unlocked upon return.
2076 *
2077 * write_one_page() returns a negative error code if I/O failed.
2078 */
2079int write_one_page(struct page *page, int wait)
2080{
2081	struct address_space *mapping = page->mapping;
2082	int ret = 0;
2083	struct writeback_control wbc = {
2084		.sync_mode = WB_SYNC_ALL,
2085		.nr_to_write = 1,
2086	};
2087
2088	BUG_ON(!PageLocked(page));
2089
2090	if (wait)
2091		wait_on_page_writeback(page);
2092
2093	if (clear_page_dirty_for_io(page)) {
2094		page_cache_get(page);
2095		ret = mapping->a_ops->writepage(page, &wbc);
2096		if (ret == 0 && wait) {
2097			wait_on_page_writeback(page);
2098			if (PageError(page))
2099				ret = -EIO;
2100		}
2101		page_cache_release(page);
2102	} else {
2103		unlock_page(page);
2104	}
2105	return ret;
2106}
2107EXPORT_SYMBOL(write_one_page);
2108
2109/*
2110 * For address_spaces which do not use buffers nor write back.
2111 */
2112int __set_page_dirty_no_writeback(struct page *page)
2113{
2114	if (!PageDirty(page))
2115		return !TestSetPageDirty(page);
2116	return 0;
2117}
2118
2119/*
2120 * Helper function for set_page_dirty family.
2121 * NOTE: This relies on being atomic wrt interrupts.
2122 */
2123void account_page_dirtied(struct page *page, struct address_space *mapping)
2124{
2125	trace_writeback_dirty_page(page, mapping);
2126
2127	if (mapping_cap_account_dirty(mapping)) {
2128		__inc_zone_page_state(page, NR_FILE_DIRTY);
2129		__inc_zone_page_state(page, NR_DIRTIED);
2130		__inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
2131		__inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
2132		task_io_account_write(PAGE_CACHE_SIZE);
2133		current->nr_dirtied++;
2134		this_cpu_inc(bdp_ratelimits);
2135	}
2136}
2137EXPORT_SYMBOL(account_page_dirtied);
2138
2139/*
2140 * Helper function for set_page_writeback family.
2141 *
2142 * The caller must hold mem_cgroup_begin/end_update_page_stat() lock
2143 * while calling this function.
2144 * See test_set_page_writeback for example.
2145 *
2146 * NOTE: Unlike account_page_dirtied this does not rely on being atomic
2147 * wrt interrupts.
2148 */
2149void account_page_writeback(struct page *page)
2150{
2151	mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
2152	inc_zone_page_state(page, NR_WRITEBACK);
2153}
2154EXPORT_SYMBOL(account_page_writeback);
2155
2156/*
2157 * For address_spaces which do not use buffers.  Just tag the page as dirty in
2158 * its radix tree.
2159 *
2160 * This is also used when a single buffer is being dirtied: we want to set the
2161 * page dirty in that case, but not all the buffers.  This is a "bottom-up"
2162 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
2163 *
2164 * Most callers have locked the page, which pins the address_space in memory.
2165 * But zap_pte_range() does not lock the page, however in that case the
2166 * mapping is pinned by the vma's ->vm_file reference.
2167 *
2168 * We take care to handle the case where the page was truncated from the
2169 * mapping by re-checking page_mapping() inside tree_lock.
2170 */
2171int __set_page_dirty_nobuffers(struct page *page)
2172{
2173	if (!TestSetPageDirty(page)) {
2174		struct address_space *mapping = page_mapping(page);
2175		struct address_space *mapping2;
2176		unsigned long flags;
2177
2178		if (!mapping)
2179			return 1;
2180
2181		spin_lock_irqsave(&mapping->tree_lock, flags);
2182		mapping2 = page_mapping(page);
2183		if (mapping2) { /* Race with truncate? */
2184			BUG_ON(mapping2 != mapping);
2185			WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
2186			account_page_dirtied(page, mapping);
2187			radix_tree_tag_set(&mapping->page_tree,
2188				page_index(page), PAGECACHE_TAG_DIRTY);
2189		}
2190		spin_unlock_irqrestore(&mapping->tree_lock, flags);
2191		if (mapping->host) {
2192			/* !PageAnon && !swapper_space */
2193			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
2194		}
2195		return 1;
2196	}
2197	return 0;
2198}
2199EXPORT_SYMBOL(__set_page_dirty_nobuffers);
2200
2201/*
2202 * Call this whenever redirtying a page, to de-account the dirty counters
2203 * (NR_DIRTIED, BDI_DIRTIED, tsk->nr_dirtied), so that they match the written
2204 * counters (NR_WRITTEN, BDI_WRITTEN) in long term. The mismatches will lead to
2205 * systematic errors in balanced_dirty_ratelimit and the dirty pages position
2206 * control.
2207 */
2208void account_page_redirty(struct page *page)
2209{
2210	struct address_space *mapping = page->mapping;
2211	if (mapping && mapping_cap_account_dirty(mapping)) {
2212		current->nr_dirtied--;
2213		dec_zone_page_state(page, NR_DIRTIED);
2214		dec_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
2215	}
2216}
2217EXPORT_SYMBOL(account_page_redirty);
2218
2219/*
2220 * When a writepage implementation decides that it doesn't want to write this
2221 * page for some reason, it should redirty the locked page via
2222 * redirty_page_for_writepage() and it should then unlock the page and return 0
2223 */
2224int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
2225{
2226	wbc->pages_skipped++;
2227	account_page_redirty(page);
2228	return __set_page_dirty_nobuffers(page);
2229}
2230EXPORT_SYMBOL(redirty_page_for_writepage);
2231
2232/*
2233 * Dirty a page.
2234 *
2235 * For pages with a mapping this should be done under the page lock
2236 * for the benefit of asynchronous memory errors who prefer a consistent
2237 * dirty state. This rule can be broken in some special cases,
2238 * but should be better not to.
2239 *
2240 * If the mapping doesn't provide a set_page_dirty a_op, then
2241 * just fall through and assume that it wants buffer_heads.
2242 */
2243int set_page_dirty(struct page *page)
2244{
2245	struct address_space *mapping = page_mapping(page);
2246
2247	if (likely(mapping)) {
2248		int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
2249		/*
2250		 * readahead/lru_deactivate_page could remain
2251		 * PG_readahead/PG_reclaim due to race with end_page_writeback
2252		 * About readahead, if the page is written, the flags would be
2253		 * reset. So no problem.
2254		 * About lru_deactivate_page, if the page is redirty, the flag
2255		 * will be reset. So no problem. but if the page is used by readahead
2256		 * it will confuse readahead and make it restart the size rampup
2257		 * process. But it's a trivial problem.
2258		 */
2259		ClearPageReclaim(page);
2260#ifdef CONFIG_BLOCK
2261		if (!spd)
2262			spd = __set_page_dirty_buffers;
2263#endif
2264		return (*spd)(page);
2265	}
2266	if (!PageDirty(page)) {
2267		if (!TestSetPageDirty(page))
2268			return 1;
2269	}
2270	return 0;
2271}
2272EXPORT_SYMBOL(set_page_dirty);
2273
2274/*
2275 * set_page_dirty() is racy if the caller has no reference against
2276 * page->mapping->host, and if the page is unlocked.  This is because another
2277 * CPU could truncate the page off the mapping and then free the mapping.
2278 *
2279 * Usually, the page _is_ locked, or the caller is a user-space process which
2280 * holds a reference on the inode by having an open file.
2281 *
2282 * In other cases, the page should be locked before running set_page_dirty().
2283 */
2284int set_page_dirty_lock(struct page *page)
2285{
2286	int ret;
2287
2288	lock_page(page);
2289	ret = set_page_dirty(page);
2290	unlock_page(page);
2291	return ret;
2292}
2293EXPORT_SYMBOL(set_page_dirty_lock);
2294
2295/*
2296 * Clear a page's dirty flag, while caring for dirty memory accounting.
2297 * Returns true if the page was previously dirty.
2298 *
2299 * This is for preparing to put the page under writeout.  We leave the page
2300 * tagged as dirty in the radix tree so that a concurrent write-for-sync
2301 * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage
2302 * implementation will run either set_page_writeback() or set_page_dirty(),
2303 * at which stage we bring the page's dirty flag and radix-tree dirty tag
2304 * back into sync.
2305 *
2306 * This incoherency between the page's dirty flag and radix-tree tag is
2307 * unfortunate, but it only exists while the page is locked.
2308 */
2309int clear_page_dirty_for_io(struct page *page)
2310{
2311	struct address_space *mapping = page_mapping(page);
2312
2313	BUG_ON(!PageLocked(page));
2314
2315	if (mapping && mapping_cap_account_dirty(mapping)) {
2316		/*
2317		 * Yes, Virginia, this is indeed insane.
2318		 *
2319		 * We use this sequence to make sure that
2320		 *  (a) we account for dirty stats properly
2321		 *  (b) we tell the low-level filesystem to
2322		 *      mark the whole page dirty if it was
2323		 *      dirty in a pagetable. Only to then
2324		 *  (c) clean the page again and return 1 to
2325		 *      cause the writeback.
2326		 *
2327		 * This way we avoid all nasty races with the
2328		 * dirty bit in multiple places and clearing
2329		 * them concurrently from different threads.
2330		 *
2331		 * Note! Normally the "set_page_dirty(page)"
2332		 * has no effect on the actual dirty bit - since
2333		 * that will already usually be set. But we
2334		 * need the side effects, and it can help us
2335		 * avoid races.
2336		 *
2337		 * We basically use the page "master dirty bit"
2338		 * as a serialization point for all the different
2339		 * threads doing their things.
2340		 */
2341		if (page_mkclean(page))
2342			set_page_dirty(page);
2343		/*
2344		 * We carefully synchronise fault handlers against
2345		 * installing a dirty pte and marking the page dirty
2346		 * at this point. We do this by having them hold the
2347		 * page lock at some point after installing their
2348		 * pte, but before marking the page dirty.
2349		 * Pages are always locked coming in here, so we get
2350		 * the desired exclusion. See mm/memory.c:do_wp_page()
2351		 * for more comments.
2352		 */
2353		if (TestClearPageDirty(page)) {
2354			dec_zone_page_state(page, NR_FILE_DIRTY);
2355			dec_bdi_stat(mapping->backing_dev_info,
2356					BDI_RECLAIMABLE);
2357			return 1;
2358		}
2359		return 0;
2360	}
2361	return TestClearPageDirty(page);
2362}
2363EXPORT_SYMBOL(clear_page_dirty_for_io);
2364
2365int test_clear_page_writeback(struct page *page)
2366{
2367	struct address_space *mapping = page_mapping(page);
2368	int ret;
2369	bool locked;
2370	unsigned long memcg_flags;
2371
2372	mem_cgroup_begin_update_page_stat(page, &locked, &memcg_flags);
2373	if (mapping) {
2374		struct backing_dev_info *bdi = mapping->backing_dev_info;
2375		unsigned long flags;
2376
2377		spin_lock_irqsave(&mapping->tree_lock, flags);
2378		ret = TestClearPageWriteback(page);
2379		if (ret) {
2380			radix_tree_tag_clear(&mapping->page_tree,
2381						page_index(page),
2382						PAGECACHE_TAG_WRITEBACK);
2383			if (bdi_cap_account_writeback(bdi)) {
2384				__dec_bdi_stat(bdi, BDI_WRITEBACK);
2385				__bdi_writeout_inc(bdi);
2386			}
2387		}
2388		spin_unlock_irqrestore(&mapping->tree_lock, flags);
2389	} else {
2390		ret = TestClearPageWriteback(page);
2391	}
2392	if (ret) {
2393		mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
2394		dec_zone_page_state(page, NR_WRITEBACK);
2395		inc_zone_page_state(page, NR_WRITTEN);
2396	}
2397	mem_cgroup_end_update_page_stat(page, &locked, &memcg_flags);
2398	return ret;
2399}
2400
2401int test_set_page_writeback(struct page *page)
2402{
2403	struct address_space *mapping = page_mapping(page);
2404	int ret;
2405	bool locked;
2406	unsigned long memcg_flags;
2407
2408	mem_cgroup_begin_update_page_stat(page, &locked, &memcg_flags);
2409	if (mapping) {
2410		struct backing_dev_info *bdi = mapping->backing_dev_info;
2411		unsigned long flags;
2412
2413		spin_lock_irqsave(&mapping->tree_lock, flags);
2414		ret = TestSetPageWriteback(page);
2415		if (!ret) {
2416			radix_tree_tag_set(&mapping->page_tree,
2417						page_index(page),
2418						PAGECACHE_TAG_WRITEBACK);
2419			if (bdi_cap_account_writeback(bdi))
2420				__inc_bdi_stat(bdi, BDI_WRITEBACK);
2421		}
2422		if (!PageDirty(page))
2423			radix_tree_tag_clear(&mapping->page_tree,
2424						page_index(page),
2425						PAGECACHE_TAG_DIRTY);
2426		radix_tree_tag_clear(&mapping->page_tree,
2427				     page_index(page),
2428				     PAGECACHE_TAG_TOWRITE);
2429		spin_unlock_irqrestore(&mapping->tree_lock, flags);
2430	} else {
2431		ret = TestSetPageWriteback(page);
2432	}
2433	if (!ret)
2434		account_page_writeback(page);
2435	mem_cgroup_end_update_page_stat(page, &locked, &memcg_flags);
2436	return ret;
2437
2438}
2439EXPORT_SYMBOL(test_set_page_writeback);
2440
2441/*
2442 * Return true if any of the pages in the mapping are marked with the
2443 * passed tag.
2444 */
2445int mapping_tagged(struct address_space *mapping, int tag)
2446{
2447	return radix_tree_tagged(&mapping->page_tree, tag);
2448}
2449EXPORT_SYMBOL(mapping_tagged);
2450
2451/**
2452 * wait_for_stable_page() - wait for writeback to finish, if necessary.
2453 * @page:	The page to wait on.
2454 *
2455 * This function determines if the given page is related to a backing device
2456 * that requires page contents to be held stable during writeback.  If so, then
2457 * it will wait for any pending writeback to complete.
2458 */
2459void wait_for_stable_page(struct page *page)
2460{
2461	struct address_space *mapping = page_mapping(page);
2462	struct backing_dev_info *bdi = mapping->backing_dev_info;
2463
2464	if (!bdi_cap_stable_pages_required(bdi))
2465		return;
2466
2467	wait_on_page_writeback(page);
2468}
2469EXPORT_SYMBOL_GPL(wait_for_stable_page);