Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * mm/page-writeback.c
   3 *
   4 * Copyright (C) 2002, Linus Torvalds.
   5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
   6 *
   7 * Contains functions related to writing back dirty pages at the
   8 * address_space level.
   9 *
  10 * 10Apr2002	Andrew Morton
  11 *		Initial version
  12 */
  13
  14#include <linux/kernel.h>
  15#include <linux/export.h>
  16#include <linux/spinlock.h>
  17#include <linux/fs.h>
  18#include <linux/mm.h>
  19#include <linux/swap.h>
  20#include <linux/slab.h>
  21#include <linux/pagemap.h>
  22#include <linux/writeback.h>
  23#include <linux/init.h>
  24#include <linux/backing-dev.h>
  25#include <linux/task_io_accounting_ops.h>
  26#include <linux/blkdev.h>
  27#include <linux/mpage.h>
  28#include <linux/rmap.h>
  29#include <linux/percpu.h>
  30#include <linux/notifier.h>
  31#include <linux/smp.h>
  32#include <linux/sysctl.h>
  33#include <linux/cpu.h>
  34#include <linux/syscalls.h>
  35#include <linux/buffer_head.h> /* __set_page_dirty_buffers */
  36#include <linux/pagevec.h>
  37#include <linux/timer.h>
  38#include <linux/sched/rt.h>
  39#include <linux/mm_inline.h>
  40#include <trace/events/writeback.h>
  41
  42#include "internal.h"
  43
  44/*
  45 * Sleep at most 200ms at a time in balance_dirty_pages().
  46 */
  47#define MAX_PAUSE		max(HZ/5, 1)
  48
  49/*
  50 * Try to keep balance_dirty_pages() call intervals higher than this many pages
  51 * by raising pause time to max_pause when falls below it.
  52 */
  53#define DIRTY_POLL_THRESH	(128 >> (PAGE_SHIFT - 10))
  54
  55/*
  56 * Estimate write bandwidth at 200ms intervals.
  57 */
  58#define BANDWIDTH_INTERVAL	max(HZ/5, 1)
  59
  60#define RATELIMIT_CALC_SHIFT	10
  61
  62/*
  63 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
  64 * will look to see if it needs to force writeback or throttling.
  65 */
  66static long ratelimit_pages = 32;
  67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  68/* The following parameters are exported via /proc/sys/vm */
  69
  70/*
  71 * Start background writeback (via writeback threads) at this percentage
  72 */
  73int dirty_background_ratio = 10;
  74
  75/*
  76 * dirty_background_bytes starts at 0 (disabled) so that it is a function of
  77 * dirty_background_ratio * the amount of dirtyable memory
  78 */
  79unsigned long dirty_background_bytes;
  80
  81/*
  82 * free highmem will not be subtracted from the total free memory
  83 * for calculating free ratios if vm_highmem_is_dirtyable is true
  84 */
  85int vm_highmem_is_dirtyable;
  86
  87/*
  88 * The generator of dirty data starts writeback at this percentage
  89 */
  90int vm_dirty_ratio = 20;
  91
  92/*
  93 * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
  94 * vm_dirty_ratio * the amount of dirtyable memory
  95 */
  96unsigned long vm_dirty_bytes;
  97
  98/*
  99 * The interval between `kupdate'-style writebacks
 100 */
 101unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
 102
 103EXPORT_SYMBOL_GPL(dirty_writeback_interval);
 104
 105/*
 106 * The longest time for which data is allowed to remain dirty
 107 */
 108unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
 109
 110/*
 111 * Flag that makes the machine dump writes/reads and block dirtyings.
 112 */
 113int block_dump;
 114
 115/*
 116 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
 117 * a full sync is triggered after this time elapses without any disk activity.
 118 */
 119int laptop_mode;
 120
 121EXPORT_SYMBOL(laptop_mode);
 122
 123/* End of sysctl-exported parameters */
 124
 125struct wb_domain global_wb_domain;
 126
 127/* consolidated parameters for balance_dirty_pages() and its subroutines */
 128struct dirty_throttle_control {
 129#ifdef CONFIG_CGROUP_WRITEBACK
 130	struct wb_domain	*dom;
 131	struct dirty_throttle_control *gdtc;	/* only set in memcg dtc's */
 132#endif
 133	struct bdi_writeback	*wb;
 134	struct fprop_local_percpu *wb_completions;
 135
 136	unsigned long		avail;		/* dirtyable */
 137	unsigned long		dirty;		/* file_dirty + write + nfs */
 138	unsigned long		thresh;		/* dirty threshold */
 139	unsigned long		bg_thresh;	/* dirty background threshold */
 140
 141	unsigned long		wb_dirty;	/* per-wb counterparts */
 142	unsigned long		wb_thresh;
 143	unsigned long		wb_bg_thresh;
 144
 145	unsigned long		pos_ratio;
 146};
 147
 148/*
 149 * Length of period for aging writeout fractions of bdis. This is an
 150 * arbitrarily chosen number. The longer the period, the slower fractions will
 151 * reflect changes in current writeout rate.
 152 */
 153#define VM_COMPLETIONS_PERIOD_LEN (3*HZ)
 154
 155#ifdef CONFIG_CGROUP_WRITEBACK
 156
 157#define GDTC_INIT(__wb)		.wb = (__wb),				\
 158				.dom = &global_wb_domain,		\
 159				.wb_completions = &(__wb)->completions
 160
 161#define GDTC_INIT_NO_WB		.dom = &global_wb_domain
 162
 163#define MDTC_INIT(__wb, __gdtc)	.wb = (__wb),				\
 164				.dom = mem_cgroup_wb_domain(__wb),	\
 165				.wb_completions = &(__wb)->memcg_completions, \
 166				.gdtc = __gdtc
 167
 168static bool mdtc_valid(struct dirty_throttle_control *dtc)
 169{
 170	return dtc->dom;
 171}
 172
 173static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
 174{
 175	return dtc->dom;
 176}
 177
 178static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
 179{
 180	return mdtc->gdtc;
 181}
 182
 183static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
 184{
 185	return &wb->memcg_completions;
 186}
 187
 188static void wb_min_max_ratio(struct bdi_writeback *wb,
 189			     unsigned long *minp, unsigned long *maxp)
 190{
 191	unsigned long this_bw = wb->avg_write_bandwidth;
 192	unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
 193	unsigned long long min = wb->bdi->min_ratio;
 194	unsigned long long max = wb->bdi->max_ratio;
 195
 196	/*
 197	 * @wb may already be clean by the time control reaches here and
 198	 * the total may not include its bw.
 199	 */
 200	if (this_bw < tot_bw) {
 201		if (min) {
 202			min *= this_bw;
 203			do_div(min, tot_bw);
 204		}
 205		if (max < 100) {
 206			max *= this_bw;
 207			do_div(max, tot_bw);
 208		}
 209	}
 210
 211	*minp = min;
 212	*maxp = max;
 213}
 214
 215#else	/* CONFIG_CGROUP_WRITEBACK */
 216
 217#define GDTC_INIT(__wb)		.wb = (__wb),                           \
 218				.wb_completions = &(__wb)->completions
 219#define GDTC_INIT_NO_WB
 220#define MDTC_INIT(__wb, __gdtc)
 221
 222static bool mdtc_valid(struct dirty_throttle_control *dtc)
 223{
 224	return false;
 225}
 226
 227static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
 228{
 229	return &global_wb_domain;
 230}
 231
 232static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
 233{
 234	return NULL;
 235}
 236
 237static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
 238{
 239	return NULL;
 240}
 241
 242static void wb_min_max_ratio(struct bdi_writeback *wb,
 243			     unsigned long *minp, unsigned long *maxp)
 244{
 245	*minp = wb->bdi->min_ratio;
 246	*maxp = wb->bdi->max_ratio;
 247}
 248
 249#endif	/* CONFIG_CGROUP_WRITEBACK */
 250
 251/*
 252 * In a memory zone, there is a certain amount of pages we consider
 253 * available for the page cache, which is essentially the number of
 254 * free and reclaimable pages, minus some zone reserves to protect
 255 * lowmem and the ability to uphold the zone's watermarks without
 256 * requiring writeback.
 257 *
 258 * This number of dirtyable pages is the base value of which the
 259 * user-configurable dirty ratio is the effictive number of pages that
 260 * are allowed to be actually dirtied.  Per individual zone, or
 261 * globally by using the sum of dirtyable pages over all zones.
 262 *
 263 * Because the user is allowed to specify the dirty limit globally as
 264 * absolute number of bytes, calculating the per-zone dirty limit can
 265 * require translating the configured limit into a percentage of
 266 * global dirtyable memory first.
 267 */
 268
 269/**
 270 * zone_dirtyable_memory - number of dirtyable pages in a zone
 271 * @zone: the zone
 272 *
 273 * Returns the zone's number of pages potentially available for dirty
 274 * page cache.  This is the base value for the per-zone dirty limits.
 275 */
 276static unsigned long zone_dirtyable_memory(struct zone *zone)
 277{
 278	unsigned long nr_pages;
 279
 280	nr_pages = zone_page_state(zone, NR_FREE_PAGES);
 281	/*
 282	 * Pages reserved for the kernel should not be considered
 283	 * dirtyable, to prevent a situation where reclaim has to
 284	 * clean pages in order to balance the zones.
 285	 */
 286	nr_pages -= min(nr_pages, zone->totalreserve_pages);
 287
 288	nr_pages += zone_page_state(zone, NR_INACTIVE_FILE);
 289	nr_pages += zone_page_state(zone, NR_ACTIVE_FILE);
 290
 291	return nr_pages;
 292}
 293
 294static unsigned long highmem_dirtyable_memory(unsigned long total)
 295{
 296#ifdef CONFIG_HIGHMEM
 297	int node;
 298	unsigned long x = 0;
 299
 300	for_each_node_state(node, N_HIGH_MEMORY) {
 301		struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
 302
 303		x += zone_dirtyable_memory(z);
 304	}
 305	/*
 306	 * Unreclaimable memory (kernel memory or anonymous memory
 307	 * without swap) can bring down the dirtyable pages below
 308	 * the zone's dirty balance reserve and the above calculation
 309	 * will underflow.  However we still want to add in nodes
 310	 * which are below threshold (negative values) to get a more
 311	 * accurate calculation but make sure that the total never
 312	 * underflows.
 313	 */
 314	if ((long)x < 0)
 315		x = 0;
 316
 317	/*
 318	 * Make sure that the number of highmem pages is never larger
 319	 * than the number of the total dirtyable memory. This can only
 320	 * occur in very strange VM situations but we want to make sure
 321	 * that this does not occur.
 322	 */
 323	return min(x, total);
 324#else
 325	return 0;
 326#endif
 327}
 328
 329/**
 330 * global_dirtyable_memory - number of globally dirtyable pages
 331 *
 332 * Returns the global number of pages potentially available for dirty
 333 * page cache.  This is the base value for the global dirty limits.
 334 */
 335static unsigned long global_dirtyable_memory(void)
 336{
 337	unsigned long x;
 338
 339	x = global_page_state(NR_FREE_PAGES);
 340	/*
 341	 * Pages reserved for the kernel should not be considered
 342	 * dirtyable, to prevent a situation where reclaim has to
 343	 * clean pages in order to balance the zones.
 344	 */
 345	x -= min(x, totalreserve_pages);
 346
 347	x += global_page_state(NR_INACTIVE_FILE);
 348	x += global_page_state(NR_ACTIVE_FILE);
 349
 350	if (!vm_highmem_is_dirtyable)
 351		x -= highmem_dirtyable_memory(x);
 352
 353	return x + 1;	/* Ensure that we never return 0 */
 354}
 355
 356/**
 357 * domain_dirty_limits - calculate thresh and bg_thresh for a wb_domain
 358 * @dtc: dirty_throttle_control of interest
 359 *
 360 * Calculate @dtc->thresh and ->bg_thresh considering
 361 * vm_dirty_{bytes|ratio} and dirty_background_{bytes|ratio}.  The caller
 362 * must ensure that @dtc->avail is set before calling this function.  The
 363 * dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
 364 * real-time tasks.
 365 */
 366static void domain_dirty_limits(struct dirty_throttle_control *dtc)
 367{
 368	const unsigned long available_memory = dtc->avail;
 369	struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
 370	unsigned long bytes = vm_dirty_bytes;
 371	unsigned long bg_bytes = dirty_background_bytes;
 372	unsigned long ratio = vm_dirty_ratio;
 373	unsigned long bg_ratio = dirty_background_ratio;
 374	unsigned long thresh;
 375	unsigned long bg_thresh;
 376	struct task_struct *tsk;
 377
 378	/* gdtc is !NULL iff @dtc is for memcg domain */
 379	if (gdtc) {
 380		unsigned long global_avail = gdtc->avail;
 381
 382		/*
 383		 * The byte settings can't be applied directly to memcg
 384		 * domains.  Convert them to ratios by scaling against
 385		 * globally available memory.
 386		 */
 387		if (bytes)
 388			ratio = min(DIV_ROUND_UP(bytes, PAGE_SIZE) * 100 /
 389				    global_avail, 100UL);
 390		if (bg_bytes)
 391			bg_ratio = min(DIV_ROUND_UP(bg_bytes, PAGE_SIZE) * 100 /
 392				       global_avail, 100UL);
 393		bytes = bg_bytes = 0;
 394	}
 395
 396	if (bytes)
 397		thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
 398	else
 399		thresh = (ratio * available_memory) / 100;
 400
 401	if (bg_bytes)
 402		bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
 403	else
 404		bg_thresh = (bg_ratio * available_memory) / 100;
 405
 406	if (bg_thresh >= thresh)
 407		bg_thresh = thresh / 2;
 408	tsk = current;
 409	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
 410		bg_thresh += bg_thresh / 4;
 411		thresh += thresh / 4;
 412	}
 413	dtc->thresh = thresh;
 414	dtc->bg_thresh = bg_thresh;
 415
 416	/* we should eventually report the domain in the TP */
 417	if (!gdtc)
 418		trace_global_dirty_state(bg_thresh, thresh);
 419}
 420
 421/**
 422 * global_dirty_limits - background-writeback and dirty-throttling thresholds
 423 * @pbackground: out parameter for bg_thresh
 424 * @pdirty: out parameter for thresh
 425 *
 426 * Calculate bg_thresh and thresh for global_wb_domain.  See
 427 * domain_dirty_limits() for details.
 428 */
 429void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
 430{
 431	struct dirty_throttle_control gdtc = { GDTC_INIT_NO_WB };
 432
 433	gdtc.avail = global_dirtyable_memory();
 434	domain_dirty_limits(&gdtc);
 435
 436	*pbackground = gdtc.bg_thresh;
 437	*pdirty = gdtc.thresh;
 438}
 439
 440/**
 441 * zone_dirty_limit - maximum number of dirty pages allowed in a zone
 442 * @zone: the zone
 443 *
 444 * Returns the maximum number of dirty pages allowed in a zone, based
 445 * on the zone's dirtyable memory.
 446 */
 447static unsigned long zone_dirty_limit(struct zone *zone)
 448{
 449	unsigned long zone_memory = zone_dirtyable_memory(zone);
 450	struct task_struct *tsk = current;
 451	unsigned long dirty;
 452
 453	if (vm_dirty_bytes)
 454		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) *
 455			zone_memory / global_dirtyable_memory();
 456	else
 457		dirty = vm_dirty_ratio * zone_memory / 100;
 458
 459	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk))
 460		dirty += dirty / 4;
 461
 462	return dirty;
 463}
 464
 465/**
 466 * zone_dirty_ok - tells whether a zone is within its dirty limits
 467 * @zone: the zone to check
 468 *
 469 * Returns %true when the dirty pages in @zone are within the zone's
 470 * dirty limit, %false if the limit is exceeded.
 471 */
 472bool zone_dirty_ok(struct zone *zone)
 473{
 474	unsigned long limit = zone_dirty_limit(zone);
 475
 476	return zone_page_state(zone, NR_FILE_DIRTY) +
 477	       zone_page_state(zone, NR_UNSTABLE_NFS) +
 478	       zone_page_state(zone, NR_WRITEBACK) <= limit;
 479}
 480
 481int dirty_background_ratio_handler(struct ctl_table *table, int write,
 482		void __user *buffer, size_t *lenp,
 483		loff_t *ppos)
 484{
 485	int ret;
 486
 487	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 488	if (ret == 0 && write)
 489		dirty_background_bytes = 0;
 490	return ret;
 491}
 492
 493int dirty_background_bytes_handler(struct ctl_table *table, int write,
 494		void __user *buffer, size_t *lenp,
 495		loff_t *ppos)
 496{
 497	int ret;
 498
 499	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 500	if (ret == 0 && write)
 501		dirty_background_ratio = 0;
 502	return ret;
 503}
 504
 505int dirty_ratio_handler(struct ctl_table *table, int write,
 506		void __user *buffer, size_t *lenp,
 507		loff_t *ppos)
 508{
 509	int old_ratio = vm_dirty_ratio;
 510	int ret;
 511
 512	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 513	if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
 514		writeback_set_ratelimit();
 515		vm_dirty_bytes = 0;
 516	}
 517	return ret;
 518}
 519
 
 520int dirty_bytes_handler(struct ctl_table *table, int write,
 521		void __user *buffer, size_t *lenp,
 522		loff_t *ppos)
 523{
 524	unsigned long old_bytes = vm_dirty_bytes;
 525	int ret;
 526
 527	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 528	if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
 529		writeback_set_ratelimit();
 530		vm_dirty_ratio = 0;
 531	}
 532	return ret;
 533}
 534
 535static unsigned long wp_next_time(unsigned long cur_time)
 536{
 537	cur_time += VM_COMPLETIONS_PERIOD_LEN;
 538	/* 0 has a special meaning... */
 539	if (!cur_time)
 540		return 1;
 541	return cur_time;
 542}
 543
 544static void wb_domain_writeout_inc(struct wb_domain *dom,
 545				   struct fprop_local_percpu *completions,
 546				   unsigned int max_prop_frac)
 547{
 548	__fprop_inc_percpu_max(&dom->completions, completions,
 549			       max_prop_frac);
 550	/* First event after period switching was turned off? */
 551	if (!unlikely(dom->period_time)) {
 552		/*
 553		 * We can race with other __bdi_writeout_inc calls here but
 554		 * it does not cause any harm since the resulting time when
 555		 * timer will fire and what is in writeout_period_time will be
 556		 * roughly the same.
 557		 */
 558		dom->period_time = wp_next_time(jiffies);
 559		mod_timer(&dom->period_timer, dom->period_time);
 560	}
 561}
 562
 563/*
 564 * Increment @wb's writeout completion count and the global writeout
 565 * completion count. Called from test_clear_page_writeback().
 566 */
 567static inline void __wb_writeout_inc(struct bdi_writeback *wb)
 568{
 569	struct wb_domain *cgdom;
 570
 571	__inc_wb_stat(wb, WB_WRITTEN);
 572	wb_domain_writeout_inc(&global_wb_domain, &wb->completions,
 573			       wb->bdi->max_prop_frac);
 574
 575	cgdom = mem_cgroup_wb_domain(wb);
 576	if (cgdom)
 577		wb_domain_writeout_inc(cgdom, wb_memcg_completions(wb),
 578				       wb->bdi->max_prop_frac);
 579}
 580
 581void wb_writeout_inc(struct bdi_writeback *wb)
 582{
 583	unsigned long flags;
 584
 585	local_irq_save(flags);
 586	__wb_writeout_inc(wb);
 587	local_irq_restore(flags);
 588}
 589EXPORT_SYMBOL_GPL(wb_writeout_inc);
 
 
 
 
 
 590
 591/*
 592 * On idle system, we can be called long after we scheduled because we use
 593 * deferred timers so count with missed periods.
 594 */
 595static void writeout_period(unsigned long t)
 
 596{
 597	struct wb_domain *dom = (void *)t;
 598	int miss_periods = (jiffies - dom->period_time) /
 599						 VM_COMPLETIONS_PERIOD_LEN;
 600
 601	if (fprop_new_period(&dom->completions, miss_periods + 1)) {
 602		dom->period_time = wp_next_time(dom->period_time +
 603				miss_periods * VM_COMPLETIONS_PERIOD_LEN);
 604		mod_timer(&dom->period_timer, dom->period_time);
 605	} else {
 606		/*
 607		 * Aging has zeroed all fractions. Stop wasting CPU on period
 608		 * updates.
 609		 */
 610		dom->period_time = 0;
 611	}
 612}
 613
 614int wb_domain_init(struct wb_domain *dom, gfp_t gfp)
 
 615{
 616	memset(dom, 0, sizeof(*dom));
 
 
 617
 618	spin_lock_init(&dom->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 619
 620	init_timer_deferrable(&dom->period_timer);
 621	dom->period_timer.function = writeout_period;
 622	dom->period_timer.data = (unsigned long)dom;
 623
 624	dom->dirty_limit_tstamp = jiffies;
 625
 626	return fprop_global_init(&dom->completions, gfp);
 627}
 628
 629#ifdef CONFIG_CGROUP_WRITEBACK
 630void wb_domain_exit(struct wb_domain *dom)
 631{
 632	del_timer_sync(&dom->period_timer);
 633	fprop_global_destroy(&dom->completions);
 634}
 635#endif
 636
 637/*
 638 * bdi_min_ratio keeps the sum of the minimum dirty shares of all
 639 * registered backing devices, which, for obvious reasons, can not
 640 * exceed 100%.
 641 */
 642static unsigned int bdi_min_ratio;
 643
 644int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
 645{
 646	int ret = 0;
 647
 648	spin_lock_bh(&bdi_lock);
 649	if (min_ratio > bdi->max_ratio) {
 650		ret = -EINVAL;
 651	} else {
 652		min_ratio -= bdi->min_ratio;
 653		if (bdi_min_ratio + min_ratio < 100) {
 654			bdi_min_ratio += min_ratio;
 655			bdi->min_ratio += min_ratio;
 656		} else {
 657			ret = -EINVAL;
 658		}
 659	}
 660	spin_unlock_bh(&bdi_lock);
 661
 662	return ret;
 663}
 664
 665int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
 666{
 667	int ret = 0;
 668
 669	if (max_ratio > 100)
 670		return -EINVAL;
 671
 672	spin_lock_bh(&bdi_lock);
 673	if (bdi->min_ratio > max_ratio) {
 674		ret = -EINVAL;
 675	} else {
 676		bdi->max_ratio = max_ratio;
 677		bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 100;
 678	}
 679	spin_unlock_bh(&bdi_lock);
 680
 681	return ret;
 682}
 683EXPORT_SYMBOL(bdi_set_max_ratio);
 684
 685static unsigned long dirty_freerun_ceiling(unsigned long thresh,
 686					   unsigned long bg_thresh)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 687{
 688	return (thresh + bg_thresh) / 2;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 689}
 690
 691static unsigned long hard_dirty_limit(struct wb_domain *dom,
 692				      unsigned long thresh)
 693{
 694	return max(thresh, dom->dirty_limit);
 695}
 696
 697/*
 698 * Memory which can be further allocated to a memcg domain is capped by
 699 * system-wide clean memory excluding the amount being used in the domain.
 
 
 
 
 
 700 */
 701static void mdtc_calc_avail(struct dirty_throttle_control *mdtc,
 702			    unsigned long filepages, unsigned long headroom)
 703{
 704	struct dirty_throttle_control *gdtc = mdtc_gdtc(mdtc);
 705	unsigned long clean = filepages - min(filepages, mdtc->dirty);
 706	unsigned long global_clean = gdtc->avail - min(gdtc->avail, gdtc->dirty);
 707	unsigned long other_clean = global_clean - min(global_clean, clean);
 
 
 
 708
 709	mdtc->avail = filepages + min(headroom, other_clean);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 710}
 711
 712/**
 713 * __wb_calc_thresh - @wb's share of dirty throttling threshold
 714 * @dtc: dirty_throttle_context of interest
 
 715 *
 716 * Returns @wb's dirty limit in pages. The term "dirty" in the context of
 717 * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
 718 *
 719 * Note that balance_dirty_pages() will only seriously take it as a hard limit
 720 * when sleeping max_pause per page is not enough to keep the dirty pages under
 721 * control. For example, when the device is completely stalled due to some error
 722 * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
 723 * In the other normal situations, it acts more gently by throttling the tasks
 724 * more (rather than completely block them) when the wb dirty pages go high.
 725 *
 726 * It allocates high/low dirty limits to fast/slow devices, in order to prevent
 727 * - starving fast devices
 728 * - piling up dirty pages (that will take long time to sync) on slow devices
 729 *
 730 * The wb's share of dirty limit will be adapting to its throughput and
 731 * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
 732 */
 733static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc)
 734{
 735	struct wb_domain *dom = dtc_dom(dtc);
 736	unsigned long thresh = dtc->thresh;
 737	u64 wb_thresh;
 738	long numerator, denominator;
 739	unsigned long wb_min_ratio, wb_max_ratio;
 740
 741	/*
 742	 * Calculate this BDI's share of the thresh ratio.
 743	 */
 744	fprop_fraction_percpu(&dom->completions, dtc->wb_completions,
 745			      &numerator, &denominator);
 746
 747	wb_thresh = (thresh * (100 - bdi_min_ratio)) / 100;
 748	wb_thresh *= numerator;
 749	do_div(wb_thresh, denominator);
 750
 751	wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio);
 752
 753	wb_thresh += (thresh * wb_min_ratio) / 100;
 754	if (wb_thresh > (thresh * wb_max_ratio) / 100)
 755		wb_thresh = thresh * wb_max_ratio / 100;
 756
 757	return wb_thresh;
 758}
 759
 760unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh)
 761{
 762	struct dirty_throttle_control gdtc = { GDTC_INIT(wb),
 763					       .thresh = thresh };
 764	return __wb_calc_thresh(&gdtc);
 765}
 766
 767/*
 768 *                           setpoint - dirty 3
 769 *        f(dirty) := 1.0 + (----------------)
 770 *                           limit - setpoint
 771 *
 772 * it's a 3rd order polynomial that subjects to
 773 *
 774 * (1) f(freerun)  = 2.0 => rampup dirty_ratelimit reasonably fast
 775 * (2) f(setpoint) = 1.0 => the balance point
 776 * (3) f(limit)    = 0   => the hard limit
 777 * (4) df/dx      <= 0	 => negative feedback control
 778 * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
 779 *     => fast response on large errors; small oscillation near setpoint
 780 */
 781static long long pos_ratio_polynom(unsigned long setpoint,
 782					  unsigned long dirty,
 783					  unsigned long limit)
 784{
 785	long long pos_ratio;
 786	long x;
 787
 788	x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
 789		      (limit - setpoint) | 1);
 790	pos_ratio = x;
 791	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
 792	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
 793	pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
 794
 795	return clamp(pos_ratio, 0LL, 2LL << RATELIMIT_CALC_SHIFT);
 796}
 797
 798/*
 799 * Dirty position control.
 800 *
 801 * (o) global/bdi setpoints
 802 *
 803 * We want the dirty pages be balanced around the global/wb setpoints.
 804 * When the number of dirty pages is higher/lower than the setpoint, the
 805 * dirty position control ratio (and hence task dirty ratelimit) will be
 806 * decreased/increased to bring the dirty pages back to the setpoint.
 807 *
 808 *     pos_ratio = 1 << RATELIMIT_CALC_SHIFT
 809 *
 810 *     if (dirty < setpoint) scale up   pos_ratio
 811 *     if (dirty > setpoint) scale down pos_ratio
 812 *
 813 *     if (wb_dirty < wb_setpoint) scale up   pos_ratio
 814 *     if (wb_dirty > wb_setpoint) scale down pos_ratio
 815 *
 816 *     task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT
 817 *
 818 * (o) global control line
 819 *
 820 *     ^ pos_ratio
 821 *     |
 822 *     |            |<===== global dirty control scope ======>|
 823 * 2.0 .............*
 824 *     |            .*
 825 *     |            . *
 826 *     |            .   *
 827 *     |            .     *
 828 *     |            .        *
 829 *     |            .            *
 830 * 1.0 ................................*
 831 *     |            .                  .     *
 832 *     |            .                  .          *
 833 *     |            .                  .              *
 834 *     |            .                  .                 *
 835 *     |            .                  .                    *
 836 *   0 +------------.------------------.----------------------*------------->
 837 *           freerun^          setpoint^                 limit^   dirty pages
 838 *
 839 * (o) wb control line
 840 *
 841 *     ^ pos_ratio
 842 *     |
 843 *     |            *
 844 *     |              *
 845 *     |                *
 846 *     |                  *
 847 *     |                    * |<=========== span ============>|
 848 * 1.0 .......................*
 849 *     |                      . *
 850 *     |                      .   *
 851 *     |                      .     *
 852 *     |                      .       *
 853 *     |                      .         *
 854 *     |                      .           *
 855 *     |                      .             *
 856 *     |                      .               *
 857 *     |                      .                 *
 858 *     |                      .                   *
 859 *     |                      .                     *
 860 * 1/4 ...............................................* * * * * * * * * * * *
 861 *     |                      .                         .
 862 *     |                      .                           .
 863 *     |                      .                             .
 864 *   0 +----------------------.-------------------------------.------------->
 865 *                wb_setpoint^                    x_intercept^
 866 *
 867 * The wb control line won't drop below pos_ratio=1/4, so that wb_dirty can
 868 * be smoothly throttled down to normal if it starts high in situations like
 869 * - start writing to a slow SD card and a fast disk at the same time. The SD
 870 *   card's wb_dirty may rush to many times higher than wb_setpoint.
 871 * - the wb dirty thresh drops quickly due to change of JBOD workload
 872 */
 873static void wb_position_ratio(struct dirty_throttle_control *dtc)
 874{
 875	struct bdi_writeback *wb = dtc->wb;
 876	unsigned long write_bw = wb->avg_write_bandwidth;
 877	unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
 878	unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
 879	unsigned long wb_thresh = dtc->wb_thresh;
 880	unsigned long x_intercept;
 881	unsigned long setpoint;		/* dirty pages' target balance point */
 882	unsigned long wb_setpoint;
 883	unsigned long span;
 884	long long pos_ratio;		/* for scaling up/down the rate limit */
 885	long x;
 886
 887	dtc->pos_ratio = 0;
 888
 889	if (unlikely(dtc->dirty >= limit))
 890		return;
 891
 892	/*
 893	 * global setpoint
 894	 *
 895	 * See comment for pos_ratio_polynom().
 896	 */
 897	setpoint = (freerun + limit) / 2;
 898	pos_ratio = pos_ratio_polynom(setpoint, dtc->dirty, limit);
 899
 900	/*
 901	 * The strictlimit feature is a tool preventing mistrusted filesystems
 902	 * from growing a large number of dirty pages before throttling. For
 903	 * such filesystems balance_dirty_pages always checks wb counters
 904	 * against wb limits. Even if global "nr_dirty" is under "freerun".
 905	 * This is especially important for fuse which sets bdi->max_ratio to
 906	 * 1% by default. Without strictlimit feature, fuse writeback may
 907	 * consume arbitrary amount of RAM because it is accounted in
 908	 * NR_WRITEBACK_TEMP which is not involved in calculating "nr_dirty".
 909	 *
 910	 * Here, in wb_position_ratio(), we calculate pos_ratio based on
 911	 * two values: wb_dirty and wb_thresh. Let's consider an example:
 912	 * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global
 913	 * limits are set by default to 10% and 20% (background and throttle).
 914	 * Then wb_thresh is 1% of 20% of 16GB. This amounts to ~8K pages.
 915	 * wb_calc_thresh(wb, bg_thresh) is about ~4K pages. wb_setpoint is
 916	 * about ~6K pages (as the average of background and throttle wb
 917	 * limits). The 3rd order polynomial will provide positive feedback if
 918	 * wb_dirty is under wb_setpoint and vice versa.
 919	 *
 920	 * Note, that we cannot use global counters in these calculations
 921	 * because we want to throttle process writing to a strictlimit wb
 922	 * much earlier than global "freerun" is reached (~23MB vs. ~2.3GB
 923	 * in the example above).
 924	 */
 925	if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
 926		long long wb_pos_ratio;
 927
 928		if (dtc->wb_dirty < 8) {
 929			dtc->pos_ratio = min_t(long long, pos_ratio * 2,
 930					   2 << RATELIMIT_CALC_SHIFT);
 931			return;
 932		}
 933
 934		if (dtc->wb_dirty >= wb_thresh)
 935			return;
 936
 937		wb_setpoint = dirty_freerun_ceiling(wb_thresh,
 938						    dtc->wb_bg_thresh);
 939
 940		if (wb_setpoint == 0 || wb_setpoint == wb_thresh)
 941			return;
 942
 943		wb_pos_ratio = pos_ratio_polynom(wb_setpoint, dtc->wb_dirty,
 944						 wb_thresh);
 945
 946		/*
 947		 * Typically, for strictlimit case, wb_setpoint << setpoint
 948		 * and pos_ratio >> wb_pos_ratio. In the other words global
 949		 * state ("dirty") is not limiting factor and we have to
 950		 * make decision based on wb counters. But there is an
 951		 * important case when global pos_ratio should get precedence:
 952		 * global limits are exceeded (e.g. due to activities on other
 953		 * wb's) while given strictlimit wb is below limit.
 954		 *
 955		 * "pos_ratio * wb_pos_ratio" would work for the case above,
 956		 * but it would look too non-natural for the case of all
 957		 * activity in the system coming from a single strictlimit wb
 958		 * with bdi->max_ratio == 100%.
 959		 *
 960		 * Note that min() below somewhat changes the dynamics of the
 961		 * control system. Normally, pos_ratio value can be well over 3
 962		 * (when globally we are at freerun and wb is well below wb
 963		 * setpoint). Now the maximum pos_ratio in the same situation
 964		 * is 2. We might want to tweak this if we observe the control
 965		 * system is too slow to adapt.
 966		 */
 967		dtc->pos_ratio = min(pos_ratio, wb_pos_ratio);
 968		return;
 969	}
 970
 971	/*
 972	 * We have computed basic pos_ratio above based on global situation. If
 973	 * the wb is over/under its share of dirty pages, we want to scale
 974	 * pos_ratio further down/up. That is done by the following mechanism.
 975	 */
 976
 977	/*
 978	 * wb setpoint
 979	 *
 980	 *        f(wb_dirty) := 1.0 + k * (wb_dirty - wb_setpoint)
 981	 *
 982	 *                        x_intercept - wb_dirty
 983	 *                     := --------------------------
 984	 *                        x_intercept - wb_setpoint
 985	 *
 986	 * The main wb control line is a linear function that subjects to
 987	 *
 988	 * (1) f(wb_setpoint) = 1.0
 989	 * (2) k = - 1 / (8 * write_bw)  (in single wb case)
 990	 *     or equally: x_intercept = wb_setpoint + 8 * write_bw
 991	 *
 992	 * For single wb case, the dirty pages are observed to fluctuate
 993	 * regularly within range
 994	 *        [wb_setpoint - write_bw/2, wb_setpoint + write_bw/2]
 995	 * for various filesystems, where (2) can yield in a reasonable 12.5%
 996	 * fluctuation range for pos_ratio.
 997	 *
 998	 * For JBOD case, wb_thresh (not wb_dirty!) could fluctuate up to its
 999	 * own size, so move the slope over accordingly and choose a slope that
1000	 * yields 100% pos_ratio fluctuation on suddenly doubled wb_thresh.
1001	 */
1002	if (unlikely(wb_thresh > dtc->thresh))
1003		wb_thresh = dtc->thresh;
1004	/*
1005	 * It's very possible that wb_thresh is close to 0 not because the
1006	 * device is slow, but that it has remained inactive for long time.
1007	 * Honour such devices a reasonable good (hopefully IO efficient)
1008	 * threshold, so that the occasional writes won't be blocked and active
1009	 * writes can rampup the threshold quickly.
1010	 */
1011	wb_thresh = max(wb_thresh, (limit - dtc->dirty) / 8);
1012	/*
1013	 * scale global setpoint to wb's:
1014	 *	wb_setpoint = setpoint * wb_thresh / thresh
1015	 */
1016	x = div_u64((u64)wb_thresh << 16, dtc->thresh | 1);
1017	wb_setpoint = setpoint * (u64)x >> 16;
1018	/*
1019	 * Use span=(8*write_bw) in single wb case as indicated by
1020	 * (thresh - wb_thresh ~= 0) and transit to wb_thresh in JBOD case.
1021	 *
1022	 *        wb_thresh                    thresh - wb_thresh
1023	 * span = --------- * (8 * write_bw) + ------------------ * wb_thresh
1024	 *         thresh                           thresh
1025	 */
1026	span = (dtc->thresh - wb_thresh + 8 * write_bw) * (u64)x >> 16;
1027	x_intercept = wb_setpoint + span;
1028
1029	if (dtc->wb_dirty < x_intercept - span / 4) {
1030		pos_ratio = div64_u64(pos_ratio * (x_intercept - dtc->wb_dirty),
1031				      (x_intercept - wb_setpoint) | 1);
1032	} else
1033		pos_ratio /= 4;
1034
1035	/*
1036	 * wb reserve area, safeguard against dirty pool underrun and disk idle
1037	 * It may push the desired control point of global dirty pages higher
1038	 * than setpoint.
1039	 */
1040	x_intercept = wb_thresh / 2;
1041	if (dtc->wb_dirty < x_intercept) {
1042		if (dtc->wb_dirty > x_intercept / 8)
1043			pos_ratio = div_u64(pos_ratio * x_intercept,
1044					    dtc->wb_dirty);
1045		else
1046			pos_ratio *= 8;
1047	}
1048
1049	dtc->pos_ratio = pos_ratio;
1050}
1051
1052static void wb_update_write_bandwidth(struct bdi_writeback *wb,
1053				      unsigned long elapsed,
1054				      unsigned long written)
1055{
1056	const unsigned long period = roundup_pow_of_two(3 * HZ);
1057	unsigned long avg = wb->avg_write_bandwidth;
1058	unsigned long old = wb->write_bandwidth;
1059	u64 bw;
1060
1061	/*
1062	 * bw = written * HZ / elapsed
1063	 *
1064	 *                   bw * elapsed + write_bandwidth * (period - elapsed)
1065	 * write_bandwidth = ---------------------------------------------------
1066	 *                                          period
1067	 *
1068	 * @written may have decreased due to account_page_redirty().
1069	 * Avoid underflowing @bw calculation.
1070	 */
1071	bw = written - min(written, wb->written_stamp);
1072	bw *= HZ;
1073	if (unlikely(elapsed > period)) {
1074		do_div(bw, elapsed);
1075		avg = bw;
1076		goto out;
1077	}
1078	bw += (u64)wb->write_bandwidth * (period - elapsed);
1079	bw >>= ilog2(period);
1080
1081	/*
1082	 * one more level of smoothing, for filtering out sudden spikes
1083	 */
1084	if (avg > old && old >= (unsigned long)bw)
1085		avg -= (avg - old) >> 3;
1086
1087	if (avg < old && old <= (unsigned long)bw)
1088		avg += (old - avg) >> 3;
1089
1090out:
1091	/* keep avg > 0 to guarantee that tot > 0 if there are dirty wbs */
1092	avg = max(avg, 1LU);
1093	if (wb_has_dirty_io(wb)) {
1094		long delta = avg - wb->avg_write_bandwidth;
1095		WARN_ON_ONCE(atomic_long_add_return(delta,
1096					&wb->bdi->tot_write_bandwidth) <= 0);
1097	}
1098	wb->write_bandwidth = bw;
1099	wb->avg_write_bandwidth = avg;
1100}
1101
1102static void update_dirty_limit(struct dirty_throttle_control *dtc)
1103{
1104	struct wb_domain *dom = dtc_dom(dtc);
1105	unsigned long thresh = dtc->thresh;
1106	unsigned long limit = dom->dirty_limit;
1107
1108	/*
1109	 * Follow up in one step.
1110	 */
1111	if (limit < thresh) {
1112		limit = thresh;
1113		goto update;
1114	}
1115
1116	/*
1117	 * Follow down slowly. Use the higher one as the target, because thresh
1118	 * may drop below dirty. This is exactly the reason to introduce
1119	 * dom->dirty_limit which is guaranteed to lie above the dirty pages.
1120	 */
1121	thresh = max(thresh, dtc->dirty);
1122	if (limit > thresh) {
1123		limit -= (limit - thresh) >> 5;
1124		goto update;
1125	}
1126	return;
1127update:
1128	dom->dirty_limit = limit;
1129}
1130
1131static void domain_update_bandwidth(struct dirty_throttle_control *dtc,
 
1132				    unsigned long now)
1133{
1134	struct wb_domain *dom = dtc_dom(dtc);
 
1135
1136	/*
1137	 * check locklessly first to optimize away locking for the most time
1138	 */
1139	if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL))
1140		return;
1141
1142	spin_lock(&dom->lock);
1143	if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) {
1144		update_dirty_limit(dtc);
1145		dom->dirty_limit_tstamp = now;
1146	}
1147	spin_unlock(&dom->lock);
1148}
1149
1150/*
1151 * Maintain wb->dirty_ratelimit, the base dirty throttle rate.
1152 *
1153 * Normal wb tasks will be curbed at or below it in long term.
1154 * Obviously it should be around (write_bw / N) when there are N dd tasks.
1155 */
1156static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc,
1157				      unsigned long dirtied,
1158				      unsigned long elapsed)
1159{
1160	struct bdi_writeback *wb = dtc->wb;
1161	unsigned long dirty = dtc->dirty;
1162	unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
1163	unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
1164	unsigned long setpoint = (freerun + limit) / 2;
1165	unsigned long write_bw = wb->avg_write_bandwidth;
1166	unsigned long dirty_ratelimit = wb->dirty_ratelimit;
1167	unsigned long dirty_rate;
1168	unsigned long task_ratelimit;
1169	unsigned long balanced_dirty_ratelimit;
1170	unsigned long step;
1171	unsigned long x;
1172	unsigned long shift;
1173
1174	/*
1175	 * The dirty rate will match the writeout rate in long term, except
1176	 * when dirty pages are truncated by userspace or re-dirtied by FS.
1177	 */
1178	dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed;
1179
1180	/*
1181	 * task_ratelimit reflects each dd's dirty rate for the past 200ms.
1182	 */
1183	task_ratelimit = (u64)dirty_ratelimit *
1184					dtc->pos_ratio >> RATELIMIT_CALC_SHIFT;
1185	task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */
1186
1187	/*
1188	 * A linear estimation of the "balanced" throttle rate. The theory is,
1189	 * if there are N dd tasks, each throttled at task_ratelimit, the wb's
1190	 * dirty_rate will be measured to be (N * task_ratelimit). So the below
1191	 * formula will yield the balanced rate limit (write_bw / N).
1192	 *
1193	 * Note that the expanded form is not a pure rate feedback:
1194	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate)		     (1)
1195	 * but also takes pos_ratio into account:
1196	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio  (2)
1197	 *
1198	 * (1) is not realistic because pos_ratio also takes part in balancing
1199	 * the dirty rate.  Consider the state
1200	 *	pos_ratio = 0.5						     (3)
1201	 *	rate = 2 * (write_bw / N)				     (4)
1202	 * If (1) is used, it will stuck in that state! Because each dd will
1203	 * be throttled at
1204	 *	task_ratelimit = pos_ratio * rate = (write_bw / N)	     (5)
1205	 * yielding
1206	 *	dirty_rate = N * task_ratelimit = write_bw		     (6)
1207	 * put (6) into (1) we get
1208	 *	rate_(i+1) = rate_(i)					     (7)
1209	 *
1210	 * So we end up using (2) to always keep
1211	 *	rate_(i+1) ~= (write_bw / N)				     (8)
1212	 * regardless of the value of pos_ratio. As long as (8) is satisfied,
1213	 * pos_ratio is able to drive itself to 1.0, which is not only where
1214	 * the dirty count meet the setpoint, but also where the slope of
1215	 * pos_ratio is most flat and hence task_ratelimit is least fluctuated.
1216	 */
1217	balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
1218					   dirty_rate | 1);
1219	/*
1220	 * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw
1221	 */
1222	if (unlikely(balanced_dirty_ratelimit > write_bw))
1223		balanced_dirty_ratelimit = write_bw;
1224
1225	/*
1226	 * We could safely do this and return immediately:
1227	 *
1228	 *	wb->dirty_ratelimit = balanced_dirty_ratelimit;
1229	 *
1230	 * However to get a more stable dirty_ratelimit, the below elaborated
1231	 * code makes use of task_ratelimit to filter out singular points and
1232	 * limit the step size.
1233	 *
1234	 * The below code essentially only uses the relative value of
1235	 *
1236	 *	task_ratelimit - dirty_ratelimit
1237	 *	= (pos_ratio - 1) * dirty_ratelimit
1238	 *
1239	 * which reflects the direction and size of dirty position error.
1240	 */
1241
1242	/*
1243	 * dirty_ratelimit will follow balanced_dirty_ratelimit iff
1244	 * task_ratelimit is on the same side of dirty_ratelimit, too.
1245	 * For example, when
1246	 * - dirty_ratelimit > balanced_dirty_ratelimit
1247	 * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint)
1248	 * lowering dirty_ratelimit will help meet both the position and rate
1249	 * control targets. Otherwise, don't update dirty_ratelimit if it will
1250	 * only help meet the rate target. After all, what the users ultimately
1251	 * feel and care are stable dirty rate and small position error.
1252	 *
1253	 * |task_ratelimit - dirty_ratelimit| is used to limit the step size
1254	 * and filter out the singular points of balanced_dirty_ratelimit. Which
1255	 * keeps jumping around randomly and can even leap far away at times
1256	 * due to the small 200ms estimation period of dirty_rate (we want to
1257	 * keep that period small to reduce time lags).
1258	 */
1259	step = 0;
1260
1261	/*
1262	 * For strictlimit case, calculations above were based on wb counters
1263	 * and limits (starting from pos_ratio = wb_position_ratio() and up to
1264	 * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate).
1265	 * Hence, to calculate "step" properly, we have to use wb_dirty as
1266	 * "dirty" and wb_setpoint as "setpoint".
1267	 *
1268	 * We rampup dirty_ratelimit forcibly if wb_dirty is low because
1269	 * it's possible that wb_thresh is close to zero due to inactivity
1270	 * of backing device.
1271	 */
1272	if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
1273		dirty = dtc->wb_dirty;
1274		if (dtc->wb_dirty < 8)
1275			setpoint = dtc->wb_dirty + 1;
1276		else
1277			setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2;
1278	}
1279
1280	if (dirty < setpoint) {
1281		x = min3(wb->balanced_dirty_ratelimit,
1282			 balanced_dirty_ratelimit, task_ratelimit);
1283		if (dirty_ratelimit < x)
1284			step = x - dirty_ratelimit;
1285	} else {
1286		x = max3(wb->balanced_dirty_ratelimit,
1287			 balanced_dirty_ratelimit, task_ratelimit);
1288		if (dirty_ratelimit > x)
1289			step = dirty_ratelimit - x;
1290	}
1291
1292	/*
1293	 * Don't pursue 100% rate matching. It's impossible since the balanced
1294	 * rate itself is constantly fluctuating. So decrease the track speed
1295	 * when it gets close to the target. Helps eliminate pointless tremors.
1296	 */
1297	shift = dirty_ratelimit / (2 * step + 1);
1298	if (shift < BITS_PER_LONG)
1299		step = DIV_ROUND_UP(step >> shift, 8);
1300	else
1301		step = 0;
1302
1303	if (dirty_ratelimit < balanced_dirty_ratelimit)
1304		dirty_ratelimit += step;
1305	else
1306		dirty_ratelimit -= step;
1307
1308	wb->dirty_ratelimit = max(dirty_ratelimit, 1UL);
1309	wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
1310
1311	trace_bdi_dirty_ratelimit(wb, dirty_rate, task_ratelimit);
1312}
1313
1314static void __wb_update_bandwidth(struct dirty_throttle_control *gdtc,
1315				  struct dirty_throttle_control *mdtc,
1316				  unsigned long start_time,
1317				  bool update_ratelimit)
 
 
1318{
1319	struct bdi_writeback *wb = gdtc->wb;
1320	unsigned long now = jiffies;
1321	unsigned long elapsed = now - wb->bw_time_stamp;
1322	unsigned long dirtied;
1323	unsigned long written;
1324
1325	lockdep_assert_held(&wb->list_lock);
1326
1327	/*
1328	 * rate-limit, only update once every 200ms.
1329	 */
1330	if (elapsed < BANDWIDTH_INTERVAL)
1331		return;
1332
1333	dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]);
1334	written = percpu_counter_read(&wb->stat[WB_WRITTEN]);
1335
1336	/*
1337	 * Skip quiet periods when disk bandwidth is under-utilized.
1338	 * (at least 1s idle time between two flusher runs)
1339	 */
1340	if (elapsed > HZ && time_before(wb->bw_time_stamp, start_time))
1341		goto snapshot;
1342
1343	if (update_ratelimit) {
1344		domain_update_bandwidth(gdtc, now);
1345		wb_update_dirty_ratelimit(gdtc, dirtied, elapsed);
1346
1347		/*
1348		 * @mdtc is always NULL if !CGROUP_WRITEBACK but the
1349		 * compiler has no way to figure that out.  Help it.
1350		 */
1351		if (IS_ENABLED(CONFIG_CGROUP_WRITEBACK) && mdtc) {
1352			domain_update_bandwidth(mdtc, now);
1353			wb_update_dirty_ratelimit(mdtc, dirtied, elapsed);
1354		}
1355	}
1356	wb_update_write_bandwidth(wb, elapsed, written);
1357
1358snapshot:
1359	wb->dirtied_stamp = dirtied;
1360	wb->written_stamp = written;
1361	wb->bw_time_stamp = now;
1362}
1363
1364void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time)
1365{
1366	struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };
1367
1368	__wb_update_bandwidth(&gdtc, NULL, start_time, false);
1369}
1370
1371/*
1372 * After a task dirtied this many pages, balance_dirty_pages_ratelimited()
1373 * will look to see if it needs to start dirty throttling.
1374 *
1375 * If dirty_poll_interval is too low, big NUMA machines will call the expensive
1376 * global_page_state() too often. So scale it near-sqrt to the safety margin
1377 * (the number of pages we may dirty without exceeding the dirty limits).
1378 */
1379static unsigned long dirty_poll_interval(unsigned long dirty,
1380					 unsigned long thresh)
1381{
1382	if (thresh > dirty)
1383		return 1UL << (ilog2(thresh - dirty) >> 1);
1384
1385	return 1;
1386}
1387
1388static unsigned long wb_max_pause(struct bdi_writeback *wb,
1389				  unsigned long wb_dirty)
 
 
 
 
1390{
1391	unsigned long bw = wb->avg_write_bandwidth;
1392	unsigned long t;
1393
1394	/*
1395	 * Limit pause time for small memory systems. If sleeping for too long
1396	 * time, a small pool of dirty/writeback pages may go empty and disk go
1397	 * idle.
1398	 *
1399	 * 8 serves as the safety ratio.
1400	 */
1401	t = wb_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
1402	t++;
1403
1404	return min_t(unsigned long, t, MAX_PAUSE);
1405}
1406
1407static long wb_min_pause(struct bdi_writeback *wb,
1408			 long max_pause,
1409			 unsigned long task_ratelimit,
1410			 unsigned long dirty_ratelimit,
1411			 int *nr_dirtied_pause)
1412{
1413	long hi = ilog2(wb->avg_write_bandwidth);
1414	long lo = ilog2(wb->dirty_ratelimit);
1415	long t;		/* target pause */
1416	long pause;	/* estimated next pause */
1417	int pages;	/* target nr_dirtied_pause */
1418
1419	/* target for 10ms pause on 1-dd case */
1420	t = max(1, HZ / 100);
1421
1422	/*
1423	 * Scale up pause time for concurrent dirtiers in order to reduce CPU
1424	 * overheads.
1425	 *
1426	 * (N * 10ms) on 2^N concurrent tasks.
1427	 */
1428	if (hi > lo)
1429		t += (hi - lo) * (10 * HZ) / 1024;
1430
1431	/*
1432	 * This is a bit convoluted. We try to base the next nr_dirtied_pause
1433	 * on the much more stable dirty_ratelimit. However the next pause time
1434	 * will be computed based on task_ratelimit and the two rate limits may
1435	 * depart considerably at some time. Especially if task_ratelimit goes
1436	 * below dirty_ratelimit/2 and the target pause is max_pause, the next
1437	 * pause time will be max_pause*2 _trimmed down_ to max_pause.  As a
1438	 * result task_ratelimit won't be executed faithfully, which could
1439	 * eventually bring down dirty_ratelimit.
1440	 *
1441	 * We apply two rules to fix it up:
1442	 * 1) try to estimate the next pause time and if necessary, use a lower
1443	 *    nr_dirtied_pause so as not to exceed max_pause. When this happens,
1444	 *    nr_dirtied_pause will be "dancing" with task_ratelimit.
1445	 * 2) limit the target pause time to max_pause/2, so that the normal
1446	 *    small fluctuations of task_ratelimit won't trigger rule (1) and
1447	 *    nr_dirtied_pause will remain as stable as dirty_ratelimit.
1448	 */
1449	t = min(t, 1 + max_pause / 2);
1450	pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1451
1452	/*
1453	 * Tiny nr_dirtied_pause is found to hurt I/O performance in the test
1454	 * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}.
1455	 * When the 16 consecutive reads are often interrupted by some dirty
1456	 * throttling pause during the async writes, cfq will go into idles
1457	 * (deadline is fine). So push nr_dirtied_pause as high as possible
1458	 * until reaches DIRTY_POLL_THRESH=32 pages.
1459	 */
1460	if (pages < DIRTY_POLL_THRESH) {
1461		t = max_pause;
1462		pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1463		if (pages > DIRTY_POLL_THRESH) {
1464			pages = DIRTY_POLL_THRESH;
1465			t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit;
1466		}
1467	}
1468
1469	pause = HZ * pages / (task_ratelimit + 1);
1470	if (pause > max_pause) {
1471		t = max_pause;
1472		pages = task_ratelimit * t / roundup_pow_of_two(HZ);
1473	}
1474
1475	*nr_dirtied_pause = pages;
1476	/*
1477	 * The minimal pause time will normally be half the target pause time.
1478	 */
1479	return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
1480}
1481
1482static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
1483{
1484	struct bdi_writeback *wb = dtc->wb;
1485	unsigned long wb_reclaimable;
1486
1487	/*
1488	 * wb_thresh is not treated as some limiting factor as
1489	 * dirty_thresh, due to reasons
1490	 * - in JBOD setup, wb_thresh can fluctuate a lot
1491	 * - in a system with HDD and USB key, the USB key may somehow
1492	 *   go into state (wb_dirty >> wb_thresh) either because
1493	 *   wb_dirty starts high, or because wb_thresh drops low.
1494	 *   In this case we don't want to hard throttle the USB key
1495	 *   dirtiers for 100 seconds until wb_dirty drops under
1496	 *   wb_thresh. Instead the auxiliary wb control line in
1497	 *   wb_position_ratio() will let the dirtier task progress
1498	 *   at some rate <= (write_bw / 2) for bringing down wb_dirty.
1499	 */
1500	dtc->wb_thresh = __wb_calc_thresh(dtc);
1501	dtc->wb_bg_thresh = dtc->thresh ?
1502		div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
1503
1504	/*
1505	 * In order to avoid the stacked BDI deadlock we need
1506	 * to ensure we accurately count the 'dirty' pages when
1507	 * the threshold is low.
1508	 *
1509	 * Otherwise it would be possible to get thresh+n pages
1510	 * reported dirty, even though there are thresh-m pages
1511	 * actually dirty; with m+n sitting in the percpu
1512	 * deltas.
1513	 */
1514	if (dtc->wb_thresh < 2 * wb_stat_error(wb)) {
1515		wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
1516		dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK);
1517	} else {
1518		wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE);
1519		dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK);
1520	}
1521}
1522
1523/*
1524 * balance_dirty_pages() must be called by processes which are generating dirty
1525 * data.  It looks at the number of dirty pages in the machine and will force
1526 * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2.
1527 * If we're over `background_thresh' then the writeback threads are woken to
1528 * perform some writeout.
1529 */
1530static void balance_dirty_pages(struct address_space *mapping,
1531				struct bdi_writeback *wb,
1532				unsigned long pages_dirtied)
1533{
1534	struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
1535	struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
1536	struct dirty_throttle_control * const gdtc = &gdtc_stor;
1537	struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
1538						     &mdtc_stor : NULL;
1539	struct dirty_throttle_control *sdtc;
1540	unsigned long nr_reclaimable;	/* = file_dirty + unstable_nfs */
1541	long period;
1542	long pause;
1543	long max_pause;
1544	long min_pause;
1545	int nr_dirtied_pause;
1546	bool dirty_exceeded = false;
1547	unsigned long task_ratelimit;
1548	unsigned long dirty_ratelimit;
1549	struct backing_dev_info *bdi = wb->bdi;
1550	bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT;
1551	unsigned long start_time = jiffies;
1552
1553	for (;;) {
1554		unsigned long now = jiffies;
1555		unsigned long dirty, thresh, bg_thresh;
1556		unsigned long m_dirty = 0;	/* stop bogus uninit warnings */
1557		unsigned long m_thresh = 0;
1558		unsigned long m_bg_thresh = 0;
1559
1560		/*
1561		 * Unstable writes are a feature of certain networked
1562		 * filesystems (i.e. NFS) in which data may have been
1563		 * written to the server's write cache, but has not yet
1564		 * been flushed to permanent storage.
1565		 */
1566		nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
1567					global_page_state(NR_UNSTABLE_NFS);
1568		gdtc->avail = global_dirtyable_memory();
1569		gdtc->dirty = nr_reclaimable + global_page_state(NR_WRITEBACK);
1570
1571		domain_dirty_limits(gdtc);
1572
1573		if (unlikely(strictlimit)) {
1574			wb_dirty_limits(gdtc);
1575
1576			dirty = gdtc->wb_dirty;
1577			thresh = gdtc->wb_thresh;
1578			bg_thresh = gdtc->wb_bg_thresh;
1579		} else {
1580			dirty = gdtc->dirty;
1581			thresh = gdtc->thresh;
1582			bg_thresh = gdtc->bg_thresh;
1583		}
1584
1585		if (mdtc) {
1586			unsigned long filepages, headroom, writeback;
1587
1588			/*
1589			 * If @wb belongs to !root memcg, repeat the same
1590			 * basic calculations for the memcg domain.
1591			 */
1592			mem_cgroup_wb_stats(wb, &filepages, &headroom,
1593					    &mdtc->dirty, &writeback);
1594			mdtc->dirty += writeback;
1595			mdtc_calc_avail(mdtc, filepages, headroom);
1596
1597			domain_dirty_limits(mdtc);
1598
1599			if (unlikely(strictlimit)) {
1600				wb_dirty_limits(mdtc);
1601				m_dirty = mdtc->wb_dirty;
1602				m_thresh = mdtc->wb_thresh;
1603				m_bg_thresh = mdtc->wb_bg_thresh;
1604			} else {
1605				m_dirty = mdtc->dirty;
1606				m_thresh = mdtc->thresh;
1607				m_bg_thresh = mdtc->bg_thresh;
1608			}
1609		}
1610
1611		/*
1612		 * Throttle it only when the background writeback cannot
1613		 * catch-up. This avoids (excessively) small writeouts
1614		 * when the wb limits are ramping up in case of !strictlimit.
1615		 *
1616		 * In strictlimit case make decision based on the wb counters
1617		 * and limits. Small writeouts when the wb limits are ramping
1618		 * up are the price we consciously pay for strictlimit-ing.
1619		 *
1620		 * If memcg domain is in effect, @dirty should be under
1621		 * both global and memcg freerun ceilings.
1622		 */
1623		if (dirty <= dirty_freerun_ceiling(thresh, bg_thresh) &&
1624		    (!mdtc ||
1625		     m_dirty <= dirty_freerun_ceiling(m_thresh, m_bg_thresh))) {
1626			unsigned long intv = dirty_poll_interval(dirty, thresh);
1627			unsigned long m_intv = ULONG_MAX;
1628
1629			current->dirty_paused_when = now;
1630			current->nr_dirtied = 0;
1631			if (mdtc)
1632				m_intv = dirty_poll_interval(m_dirty, m_thresh);
1633			current->nr_dirtied_pause = min(intv, m_intv);
1634			break;
1635		}
1636
1637		if (unlikely(!writeback_in_progress(wb)))
1638			wb_start_background_writeback(wb);
 
1639
1640		/*
1641		 * Calculate global domain's pos_ratio and select the
1642		 * global dtc by default.
 
 
 
 
 
 
1643		 */
1644		if (!strictlimit)
1645			wb_dirty_limits(gdtc);
1646
1647		dirty_exceeded = (gdtc->wb_dirty > gdtc->wb_thresh) &&
1648			((gdtc->dirty > gdtc->thresh) || strictlimit);
1649
1650		wb_position_ratio(gdtc);
1651		sdtc = gdtc;
1652
1653		if (mdtc) {
1654			/*
1655			 * If memcg domain is in effect, calculate its
1656			 * pos_ratio.  @wb should satisfy constraints from
1657			 * both global and memcg domains.  Choose the one
1658			 * w/ lower pos_ratio.
1659			 */
1660			if (!strictlimit)
1661				wb_dirty_limits(mdtc);
1662
1663			dirty_exceeded |= (mdtc->wb_dirty > mdtc->wb_thresh) &&
1664				((mdtc->dirty > mdtc->thresh) || strictlimit);
 
 
 
 
 
 
 
 
1665
1666			wb_position_ratio(mdtc);
1667			if (mdtc->pos_ratio < gdtc->pos_ratio)
1668				sdtc = mdtc;
1669		}
1670
1671		if (dirty_exceeded && !wb->dirty_exceeded)
1672			wb->dirty_exceeded = 1;
1673
1674		if (time_is_before_jiffies(wb->bw_time_stamp +
1675					   BANDWIDTH_INTERVAL)) {
1676			spin_lock(&wb->list_lock);
1677			__wb_update_bandwidth(gdtc, mdtc, start_time, true);
1678			spin_unlock(&wb->list_lock);
1679		}
1680
1681		/* throttle according to the chosen dtc */
1682		dirty_ratelimit = wb->dirty_ratelimit;
1683		task_ratelimit = ((u64)dirty_ratelimit * sdtc->pos_ratio) >>
1684							RATELIMIT_CALC_SHIFT;
1685		max_pause = wb_max_pause(wb, sdtc->wb_dirty);
1686		min_pause = wb_min_pause(wb, max_pause,
1687					 task_ratelimit, dirty_ratelimit,
1688					 &nr_dirtied_pause);
1689
1690		if (unlikely(task_ratelimit == 0)) {
1691			period = max_pause;
1692			pause = max_pause;
1693			goto pause;
1694		}
1695		period = HZ * pages_dirtied / task_ratelimit;
1696		pause = period;
1697		if (current->dirty_paused_when)
1698			pause -= now - current->dirty_paused_when;
1699		/*
1700		 * For less than 1s think time (ext3/4 may block the dirtier
1701		 * for up to 800ms from time to time on 1-HDD; so does xfs,
1702		 * however at much less frequency), try to compensate it in
1703		 * future periods by updating the virtual time; otherwise just
1704		 * do a reset, as it may be a light dirtier.
1705		 */
1706		if (pause < min_pause) {
1707			trace_balance_dirty_pages(wb,
1708						  sdtc->thresh,
1709						  sdtc->bg_thresh,
1710						  sdtc->dirty,
1711						  sdtc->wb_thresh,
1712						  sdtc->wb_dirty,
1713						  dirty_ratelimit,
1714						  task_ratelimit,
1715						  pages_dirtied,
1716						  period,
1717						  min(pause, 0L),
1718						  start_time);
1719			if (pause < -HZ) {
1720				current->dirty_paused_when = now;
1721				current->nr_dirtied = 0;
1722			} else if (period) {
1723				current->dirty_paused_when += period;
1724				current->nr_dirtied = 0;
1725			} else if (current->nr_dirtied_pause <= pages_dirtied)
1726				current->nr_dirtied_pause += pages_dirtied;
1727			break;
1728		}
1729		if (unlikely(pause > max_pause)) {
1730			/* for occasional dropped task_ratelimit */
1731			now += min(pause - max_pause, max_pause);
1732			pause = max_pause;
1733		}
1734
1735pause:
1736		trace_balance_dirty_pages(wb,
1737					  sdtc->thresh,
1738					  sdtc->bg_thresh,
1739					  sdtc->dirty,
1740					  sdtc->wb_thresh,
1741					  sdtc->wb_dirty,
1742					  dirty_ratelimit,
1743					  task_ratelimit,
1744					  pages_dirtied,
1745					  period,
1746					  pause,
1747					  start_time);
1748		__set_current_state(TASK_KILLABLE);
1749		io_schedule_timeout(pause);
 
1750
1751		current->dirty_paused_when = now + pause;
1752		current->nr_dirtied = 0;
1753		current->nr_dirtied_pause = nr_dirtied_pause;
1754
1755		/*
1756		 * This is typically equal to (dirty < thresh) and can also
1757		 * keep "1000+ dd on a slow USB stick" under control.
 
 
1758		 */
1759		if (task_ratelimit)
 
 
1760			break;
1761
1762		/*
1763		 * In the case of an unresponding NFS server and the NFS dirty
1764		 * pages exceeds dirty_thresh, give the other good wb's a pipe
1765		 * to go through, so that tasks on them still remain responsive.
1766		 *
1767		 * In theory 1 page is enough to keep the comsumer-producer
1768		 * pipe going: the flusher cleans 1 page => the task dirties 1
1769		 * more page. However wb_dirty has accounting errors.  So use
1770		 * the larger and more IO friendly wb_stat_error.
1771		 */
1772		if (sdtc->wb_dirty <= wb_stat_error(wb))
1773			break;
1774
1775		if (fatal_signal_pending(current))
1776			break;
1777	}
1778
1779	if (!dirty_exceeded && wb->dirty_exceeded)
1780		wb->dirty_exceeded = 0;
 
1781
1782	if (writeback_in_progress(wb))
1783		return;
1784
1785	/*
1786	 * In laptop mode, we wait until hitting the higher threshold before
1787	 * starting background writeout, and then write out all the way down
1788	 * to the lower threshold.  So slow writers cause minimal disk activity.
1789	 *
1790	 * In normal mode, we start background writeout at the lower
1791	 * background_thresh, to keep the amount of dirty memory low.
1792	 */
1793	if (laptop_mode)
1794		return;
1795
1796	if (nr_reclaimable > gdtc->bg_thresh)
1797		wb_start_background_writeback(wb);
1798}
1799
1800static DEFINE_PER_CPU(int, bdp_ratelimits);
 
 
 
 
 
 
 
 
1801
1802/*
1803 * Normal tasks are throttled by
1804 *	loop {
1805 *		dirty tsk->nr_dirtied_pause pages;
1806 *		take a snap in balance_dirty_pages();
1807 *	}
1808 * However there is a worst case. If every task exit immediately when dirtied
1809 * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
1810 * called to throttle the page dirties. The solution is to save the not yet
1811 * throttled page dirties in dirty_throttle_leaks on task exit and charge them
1812 * randomly into the running tasks. This works well for the above worst case,
1813 * as the new task will pick up and accumulate the old task's leaked dirty
1814 * count and eventually get throttled.
1815 */
1816DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
1817
1818/**
1819 * balance_dirty_pages_ratelimited - balance dirty memory state
1820 * @mapping: address_space which was dirtied
 
1821 *
1822 * Processes which are dirtying memory should call in here once for each page
1823 * which was newly dirtied.  The function will periodically check the system's
1824 * dirty state and will initiate writeback if needed.
1825 *
1826 * On really big machines, get_writeback_state is expensive, so try to avoid
1827 * calling it too often (ratelimiting).  But once we're over the dirty memory
1828 * limit we decrease the ratelimiting by a lot, to prevent individual processes
1829 * from overshooting the limit by (ratelimit_pages) each.
1830 */
1831void balance_dirty_pages_ratelimited(struct address_space *mapping)
 
1832{
1833	struct inode *inode = mapping->host;
1834	struct backing_dev_info *bdi = inode_to_bdi(inode);
1835	struct bdi_writeback *wb = NULL;
1836	int ratelimit;
1837	int *p;
1838
1839	if (!bdi_cap_account_dirty(bdi))
1840		return;
1841
1842	if (inode_cgwb_enabled(inode))
1843		wb = wb_get_create_current(bdi, GFP_KERNEL);
1844	if (!wb)
1845		wb = &bdi->wb;
1846
1847	ratelimit = current->nr_dirtied_pause;
1848	if (wb->dirty_exceeded)
1849		ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));
1850
1851	preempt_disable();
1852	/*
1853	 * This prevents one CPU to accumulate too many dirtied pages without
1854	 * calling into balance_dirty_pages(), which can happen when there are
1855	 * 1000+ tasks, all of them start dirtying pages at exactly the same
1856	 * time, hence all honoured too large initial task->nr_dirtied_pause.
1857	 */
1858	p =  this_cpu_ptr(&bdp_ratelimits);
1859	if (unlikely(current->nr_dirtied >= ratelimit))
1860		*p = 0;
1861	else if (unlikely(*p >= ratelimit_pages)) {
 
1862		*p = 0;
1863		ratelimit = 0;
1864	}
1865	/*
1866	 * Pick up the dirtied pages by the exited tasks. This avoids lots of
1867	 * short-lived tasks (eg. gcc invocations in a kernel build) escaping
1868	 * the dirty throttling and livelock other long-run dirtiers.
1869	 */
1870	p = this_cpu_ptr(&dirty_throttle_leaks);
1871	if (*p > 0 && current->nr_dirtied < ratelimit) {
1872		unsigned long nr_pages_dirtied;
1873		nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
1874		*p -= nr_pages_dirtied;
1875		current->nr_dirtied += nr_pages_dirtied;
1876	}
1877	preempt_enable();
1878
1879	if (unlikely(current->nr_dirtied >= ratelimit))
1880		balance_dirty_pages(mapping, wb, current->nr_dirtied);
1881
1882	wb_put(wb);
1883}
1884EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
1885
1886/**
1887 * wb_over_bg_thresh - does @wb need to be written back?
1888 * @wb: bdi_writeback of interest
1889 *
1890 * Determines whether background writeback should keep writing @wb or it's
1891 * clean enough.  Returns %true if writeback should continue.
1892 */
1893bool wb_over_bg_thresh(struct bdi_writeback *wb)
1894{
1895	struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
1896	struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
1897	struct dirty_throttle_control * const gdtc = &gdtc_stor;
1898	struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
1899						     &mdtc_stor : NULL;
1900
1901	/*
1902	 * Similar to balance_dirty_pages() but ignores pages being written
1903	 * as we're trying to decide whether to put more under writeback.
1904	 */
1905	gdtc->avail = global_dirtyable_memory();
1906	gdtc->dirty = global_page_state(NR_FILE_DIRTY) +
1907		      global_page_state(NR_UNSTABLE_NFS);
1908	domain_dirty_limits(gdtc);
1909
1910	if (gdtc->dirty > gdtc->bg_thresh)
1911		return true;
1912
1913	if (wb_stat(wb, WB_RECLAIMABLE) >
1914	    wb_calc_thresh(gdtc->wb, gdtc->bg_thresh))
1915		return true;
1916
1917	if (mdtc) {
1918		unsigned long filepages, headroom, writeback;
1919
1920		mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty,
1921				    &writeback);
1922		mdtc_calc_avail(mdtc, filepages, headroom);
1923		domain_dirty_limits(mdtc);	/* ditto, ignore writeback */
1924
1925		if (mdtc->dirty > mdtc->bg_thresh)
1926			return true;
1927
1928		if (wb_stat(wb, WB_RECLAIMABLE) >
1929		    wb_calc_thresh(mdtc->wb, mdtc->bg_thresh))
1930			return true;
1931	}
1932
1933	return false;
1934}
 
1935
1936void throttle_vm_writeout(gfp_t gfp_mask)
1937{
1938	unsigned long background_thresh;
1939	unsigned long dirty_thresh;
1940
1941        for ( ; ; ) {
1942		global_dirty_limits(&background_thresh, &dirty_thresh);
1943		dirty_thresh = hard_dirty_limit(&global_wb_domain, dirty_thresh);
1944
1945                /*
1946                 * Boost the allowable dirty threshold a bit for page
1947                 * allocators so they don't get DoS'ed by heavy writers
1948                 */
1949                dirty_thresh += dirty_thresh / 10;      /* wheeee... */
1950
1951                if (global_page_state(NR_UNSTABLE_NFS) +
1952			global_page_state(NR_WRITEBACK) <= dirty_thresh)
1953                        	break;
1954                congestion_wait(BLK_RW_ASYNC, HZ/10);
1955
1956		/*
1957		 * The caller might hold locks which can prevent IO completion
1958		 * or progress in the filesystem.  So we cannot just sit here
1959		 * waiting for IO to complete.
1960		 */
1961		if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
1962			break;
1963        }
1964}
1965
1966/*
1967 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
1968 */
1969int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
1970	void __user *buffer, size_t *length, loff_t *ppos)
1971{
1972	proc_dointvec(table, write, buffer, length, ppos);
 
1973	return 0;
1974}
1975
1976#ifdef CONFIG_BLOCK
1977void laptop_mode_timer_fn(unsigned long data)
1978{
1979	struct request_queue *q = (struct request_queue *)data;
1980	int nr_pages = global_page_state(NR_FILE_DIRTY) +
1981		global_page_state(NR_UNSTABLE_NFS);
1982	struct bdi_writeback *wb;
1983
1984	/*
1985	 * We want to write everything out, not just down to the dirty
1986	 * threshold
1987	 */
1988	if (!bdi_has_dirty_io(&q->backing_dev_info))
1989		return;
1990
1991	rcu_read_lock();
1992	list_for_each_entry_rcu(wb, &q->backing_dev_info.wb_list, bdi_node)
1993		if (wb_has_dirty_io(wb))
1994			wb_start_writeback(wb, nr_pages, true,
1995					   WB_REASON_LAPTOP_TIMER);
1996	rcu_read_unlock();
1997}
1998
1999/*
2000 * We've spun up the disk and we're in laptop mode: schedule writeback
2001 * of all dirty data a few seconds from now.  If the flush is already scheduled
2002 * then push it back - the user is still using the disk.
2003 */
2004void laptop_io_completion(struct backing_dev_info *info)
2005{
2006	mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
2007}
2008
2009/*
2010 * We're in laptop mode and we've just synced. The sync's writes will have
2011 * caused another writeback to be scheduled by laptop_io_completion.
2012 * Nothing needs to be written back anymore, so we unschedule the writeback.
2013 */
2014void laptop_sync_completion(void)
2015{
2016	struct backing_dev_info *bdi;
2017
2018	rcu_read_lock();
2019
2020	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
2021		del_timer(&bdi->laptop_mode_wb_timer);
2022
2023	rcu_read_unlock();
2024}
2025#endif
2026
2027/*
2028 * If ratelimit_pages is too high then we can get into dirty-data overload
2029 * if a large number of processes all perform writes at the same time.
2030 * If it is too low then SMP machines will call the (expensive)
2031 * get_writeback_state too often.
2032 *
2033 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
2034 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
2035 * thresholds.
 
 
 
 
 
 
2036 */
2037
2038void writeback_set_ratelimit(void)
2039{
2040	struct wb_domain *dom = &global_wb_domain;
2041	unsigned long background_thresh;
2042	unsigned long dirty_thresh;
2043
2044	global_dirty_limits(&background_thresh, &dirty_thresh);
2045	dom->dirty_limit = dirty_thresh;
2046	ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
2047	if (ratelimit_pages < 16)
2048		ratelimit_pages = 16;
 
 
2049}
2050
2051static int
2052ratelimit_handler(struct notifier_block *self, unsigned long action,
2053		  void *hcpu)
2054{
2055
2056	switch (action & ~CPU_TASKS_FROZEN) {
2057	case CPU_ONLINE:
2058	case CPU_DEAD:
2059		writeback_set_ratelimit();
2060		return NOTIFY_OK;
2061	default:
2062		return NOTIFY_DONE;
2063	}
2064}
2065
2066static struct notifier_block ratelimit_nb = {
2067	.notifier_call	= ratelimit_handler,
2068	.next		= NULL,
2069};
2070
2071/*
2072 * Called early on to tune the page writeback dirty limits.
2073 *
2074 * We used to scale dirty pages according to how total memory
2075 * related to pages that could be allocated for buffers (by
2076 * comparing nr_free_buffer_pages() to vm_total_pages.
2077 *
2078 * However, that was when we used "dirty_ratio" to scale with
2079 * all memory, and we don't do that any more. "dirty_ratio"
2080 * is now applied to total non-HIGHPAGE memory (by subtracting
2081 * totalhigh_pages from vm_total_pages), and as such we can't
2082 * get into the old insane situation any more where we had
2083 * large amounts of dirty pages compared to a small amount of
2084 * non-HIGHMEM memory.
2085 *
2086 * But we might still want to scale the dirty_ratio by how
2087 * much memory the box has..
2088 */
2089void __init page_writeback_init(void)
2090{
2091	BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
2092
2093	writeback_set_ratelimit();
2094	register_cpu_notifier(&ratelimit_nb);
 
 
 
 
2095}
2096
2097/**
2098 * tag_pages_for_writeback - tag pages to be written by write_cache_pages
2099 * @mapping: address space structure to write
2100 * @start: starting page index
2101 * @end: ending page index (inclusive)
2102 *
2103 * This function scans the page range from @start to @end (inclusive) and tags
2104 * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is
2105 * that write_cache_pages (or whoever calls this function) will then use
2106 * TOWRITE tag to identify pages eligible for writeback.  This mechanism is
2107 * used to avoid livelocking of writeback by a process steadily creating new
2108 * dirty pages in the file (thus it is important for this function to be quick
2109 * so that it can tag pages faster than a dirtying process can create them).
2110 */
2111/*
2112 * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency.
2113 */
2114void tag_pages_for_writeback(struct address_space *mapping,
2115			     pgoff_t start, pgoff_t end)
2116{
2117#define WRITEBACK_TAG_BATCH 4096
2118	unsigned long tagged;
2119
2120	do {
2121		spin_lock_irq(&mapping->tree_lock);
2122		tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree,
2123				&start, end, WRITEBACK_TAG_BATCH,
2124				PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE);
2125		spin_unlock_irq(&mapping->tree_lock);
2126		WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);
2127		cond_resched();
2128		/* We check 'start' to handle wrapping when end == ~0UL */
2129	} while (tagged >= WRITEBACK_TAG_BATCH && start);
2130}
2131EXPORT_SYMBOL(tag_pages_for_writeback);
2132
2133/**
2134 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2135 * @mapping: address space structure to write
2136 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2137 * @writepage: function called for each page
2138 * @data: data passed to writepage function
2139 *
2140 * If a page is already under I/O, write_cache_pages() skips it, even
2141 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
2142 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
2143 * and msync() need to guarantee that all the data which was dirty at the time
2144 * the call was made get new I/O started against them.  If wbc->sync_mode is
2145 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2146 * existing IO to complete.
2147 *
2148 * To avoid livelocks (when other process dirties new pages), we first tag
2149 * pages which should be written back with TOWRITE tag and only then start
2150 * writing them. For data-integrity sync we have to be careful so that we do
2151 * not miss some pages (e.g., because some other process has cleared TOWRITE
2152 * tag we set). The rule we follow is that TOWRITE tag can be cleared only
2153 * by the process clearing the DIRTY tag (and submitting the page for IO).
2154 */
2155int write_cache_pages(struct address_space *mapping,
2156		      struct writeback_control *wbc, writepage_t writepage,
2157		      void *data)
2158{
2159	int ret = 0;
2160	int done = 0;
2161	struct pagevec pvec;
2162	int nr_pages;
2163	pgoff_t uninitialized_var(writeback_index);
2164	pgoff_t index;
2165	pgoff_t end;		/* Inclusive */
2166	pgoff_t done_index;
2167	int cycled;
2168	int range_whole = 0;
2169	int tag;
2170
2171	pagevec_init(&pvec, 0);
2172	if (wbc->range_cyclic) {
2173		writeback_index = mapping->writeback_index; /* prev offset */
2174		index = writeback_index;
2175		if (index == 0)
2176			cycled = 1;
2177		else
2178			cycled = 0;
2179		end = -1;
2180	} else {
2181		index = wbc->range_start >> PAGE_SHIFT;
2182		end = wbc->range_end >> PAGE_SHIFT;
2183		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2184			range_whole = 1;
2185		cycled = 1; /* ignore range_cyclic tests */
2186	}
2187	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2188		tag = PAGECACHE_TAG_TOWRITE;
2189	else
2190		tag = PAGECACHE_TAG_DIRTY;
2191retry:
2192	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2193		tag_pages_for_writeback(mapping, index, end);
2194	done_index = index;
2195	while (!done && (index <= end)) {
2196		int i;
2197
2198		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
2199			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
2200		if (nr_pages == 0)
2201			break;
2202
2203		for (i = 0; i < nr_pages; i++) {
2204			struct page *page = pvec.pages[i];
2205
2206			/*
2207			 * At this point, the page may be truncated or
2208			 * invalidated (changing page->mapping to NULL), or
2209			 * even swizzled back from swapper_space to tmpfs file
2210			 * mapping. However, page->index will not change
2211			 * because we have a reference on the page.
2212			 */
2213			if (page->index > end) {
2214				/*
2215				 * can't be range_cyclic (1st pass) because
2216				 * end == -1 in that case.
2217				 */
2218				done = 1;
2219				break;
2220			}
2221
2222			done_index = page->index;
2223
2224			lock_page(page);
2225
2226			/*
2227			 * Page truncated or invalidated. We can freely skip it
2228			 * then, even for data integrity operations: the page
2229			 * has disappeared concurrently, so there could be no
2230			 * real expectation of this data interity operation
2231			 * even if there is now a new, dirty page at the same
2232			 * pagecache address.
2233			 */
2234			if (unlikely(page->mapping != mapping)) {
2235continue_unlock:
2236				unlock_page(page);
2237				continue;
2238			}
2239
2240			if (!PageDirty(page)) {
2241				/* someone wrote it for us */
2242				goto continue_unlock;
2243			}
2244
2245			if (PageWriteback(page)) {
2246				if (wbc->sync_mode != WB_SYNC_NONE)
2247					wait_on_page_writeback(page);
2248				else
2249					goto continue_unlock;
2250			}
2251
2252			BUG_ON(PageWriteback(page));
2253			if (!clear_page_dirty_for_io(page))
2254				goto continue_unlock;
2255
2256			trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
2257			ret = (*writepage)(page, wbc, data);
2258			if (unlikely(ret)) {
2259				if (ret == AOP_WRITEPAGE_ACTIVATE) {
2260					unlock_page(page);
2261					ret = 0;
2262				} else {
2263					/*
2264					 * done_index is set past this page,
2265					 * so media errors will not choke
2266					 * background writeout for the entire
2267					 * file. This has consequences for
2268					 * range_cyclic semantics (ie. it may
2269					 * not be suitable for data integrity
2270					 * writeout).
2271					 */
2272					done_index = page->index + 1;
2273					done = 1;
2274					break;
2275				}
2276			}
2277
2278			/*
2279			 * We stop writing back only if we are not doing
2280			 * integrity sync. In case of integrity sync we have to
2281			 * keep going until we have written all the pages
2282			 * we tagged for writeback prior to entering this loop.
2283			 */
2284			if (--wbc->nr_to_write <= 0 &&
2285			    wbc->sync_mode == WB_SYNC_NONE) {
2286				done = 1;
2287				break;
2288			}
2289		}
2290		pagevec_release(&pvec);
2291		cond_resched();
2292	}
2293	if (!cycled && !done) {
2294		/*
2295		 * range_cyclic:
2296		 * We hit the last page and there is more work to be done: wrap
2297		 * back to the start of the file
2298		 */
2299		cycled = 1;
2300		index = 0;
2301		end = writeback_index - 1;
2302		goto retry;
2303	}
2304	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2305		mapping->writeback_index = done_index;
2306
2307	return ret;
2308}
2309EXPORT_SYMBOL(write_cache_pages);
2310
2311/*
2312 * Function used by generic_writepages to call the real writepage
2313 * function and set the mapping flags on error
2314 */
2315static int __writepage(struct page *page, struct writeback_control *wbc,
2316		       void *data)
2317{
2318	struct address_space *mapping = data;
2319	int ret = mapping->a_ops->writepage(page, wbc);
2320	mapping_set_error(mapping, ret);
2321	return ret;
2322}
2323
2324/**
2325 * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
2326 * @mapping: address space structure to write
2327 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2328 *
2329 * This is a library function, which implements the writepages()
2330 * address_space_operation.
2331 */
2332int generic_writepages(struct address_space *mapping,
2333		       struct writeback_control *wbc)
2334{
2335	struct blk_plug plug;
2336	int ret;
2337
2338	/* deal with chardevs and other special file */
2339	if (!mapping->a_ops->writepage)
2340		return 0;
2341
2342	blk_start_plug(&plug);
2343	ret = write_cache_pages(mapping, wbc, __writepage, mapping);
2344	blk_finish_plug(&plug);
2345	return ret;
2346}
2347
2348EXPORT_SYMBOL(generic_writepages);
2349
2350int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
2351{
2352	int ret;
2353
2354	if (wbc->nr_to_write <= 0)
2355		return 0;
2356	if (mapping->a_ops->writepages)
2357		ret = mapping->a_ops->writepages(mapping, wbc);
2358	else
2359		ret = generic_writepages(mapping, wbc);
2360	return ret;
2361}
2362
2363/**
2364 * write_one_page - write out a single page and optionally wait on I/O
2365 * @page: the page to write
2366 * @wait: if true, wait on writeout
2367 *
2368 * The page must be locked by the caller and will be unlocked upon return.
2369 *
2370 * write_one_page() returns a negative error code if I/O failed.
2371 */
2372int write_one_page(struct page *page, int wait)
2373{
2374	struct address_space *mapping = page->mapping;
2375	int ret = 0;
2376	struct writeback_control wbc = {
2377		.sync_mode = WB_SYNC_ALL,
2378		.nr_to_write = 1,
2379	};
2380
2381	BUG_ON(!PageLocked(page));
2382
2383	if (wait)
2384		wait_on_page_writeback(page);
2385
2386	if (clear_page_dirty_for_io(page)) {
2387		get_page(page);
2388		ret = mapping->a_ops->writepage(page, &wbc);
2389		if (ret == 0 && wait) {
2390			wait_on_page_writeback(page);
2391			if (PageError(page))
2392				ret = -EIO;
2393		}
2394		put_page(page);
2395	} else {
2396		unlock_page(page);
2397	}
2398	return ret;
2399}
2400EXPORT_SYMBOL(write_one_page);
2401
2402/*
2403 * For address_spaces which do not use buffers nor write back.
2404 */
2405int __set_page_dirty_no_writeback(struct page *page)
2406{
2407	if (!PageDirty(page))
2408		return !TestSetPageDirty(page);
2409	return 0;
2410}
2411
2412/*
2413 * Helper function for set_page_dirty family.
2414 *
2415 * Caller must hold lock_page_memcg().
2416 *
2417 * NOTE: This relies on being atomic wrt interrupts.
2418 */
2419void account_page_dirtied(struct page *page, struct address_space *mapping)
2420{
2421	struct inode *inode = mapping->host;
2422
2423	trace_writeback_dirty_page(page, mapping);
2424
2425	if (mapping_cap_account_dirty(mapping)) {
2426		struct bdi_writeback *wb;
2427
2428		inode_attach_wb(inode, page);
2429		wb = inode_to_wb(inode);
2430
2431		mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_DIRTY);
2432		__inc_zone_page_state(page, NR_FILE_DIRTY);
2433		__inc_zone_page_state(page, NR_DIRTIED);
2434		__inc_wb_stat(wb, WB_RECLAIMABLE);
2435		__inc_wb_stat(wb, WB_DIRTIED);
2436		task_io_account_write(PAGE_SIZE);
2437		current->nr_dirtied++;
2438		this_cpu_inc(bdp_ratelimits);
2439	}
2440}
2441EXPORT_SYMBOL(account_page_dirtied);
2442
2443/*
2444 * Helper function for deaccounting dirty page without writeback.
2445 *
2446 * Caller must hold lock_page_memcg().
2447 */
2448void account_page_cleaned(struct page *page, struct address_space *mapping,
2449			  struct bdi_writeback *wb)
2450{
2451	if (mapping_cap_account_dirty(mapping)) {
2452		mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
2453		dec_zone_page_state(page, NR_FILE_DIRTY);
2454		dec_wb_stat(wb, WB_RECLAIMABLE);
2455		task_io_account_cancelled_write(PAGE_SIZE);
2456	}
2457}
 
2458
2459/*
2460 * For address_spaces which do not use buffers.  Just tag the page as dirty in
2461 * its radix tree.
2462 *
2463 * This is also used when a single buffer is being dirtied: we want to set the
2464 * page dirty in that case, but not all the buffers.  This is a "bottom-up"
2465 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
2466 *
2467 * The caller must ensure this doesn't race with truncation.  Most will simply
2468 * hold the page lock, but e.g. zap_pte_range() calls with the page mapped and
2469 * the pte lock held, which also locks out truncation.
 
 
 
2470 */
2471int __set_page_dirty_nobuffers(struct page *page)
2472{
2473	lock_page_memcg(page);
2474	if (!TestSetPageDirty(page)) {
2475		struct address_space *mapping = page_mapping(page);
2476		unsigned long flags;
2477
2478		if (!mapping) {
2479			unlock_page_memcg(page);
2480			return 1;
2481		}
2482
2483		spin_lock_irqsave(&mapping->tree_lock, flags);
2484		BUG_ON(page_mapping(page) != mapping);
2485		WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
2486		account_page_dirtied(page, mapping);
2487		radix_tree_tag_set(&mapping->page_tree, page_index(page),
2488				   PAGECACHE_TAG_DIRTY);
2489		spin_unlock_irqrestore(&mapping->tree_lock, flags);
2490		unlock_page_memcg(page);
2491
 
 
 
 
 
 
 
 
 
 
2492		if (mapping->host) {
2493			/* !PageAnon && !swapper_space */
2494			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
2495		}
2496		return 1;
2497	}
2498	unlock_page_memcg(page);
2499	return 0;
2500}
2501EXPORT_SYMBOL(__set_page_dirty_nobuffers);
2502
2503/*
2504 * Call this whenever redirtying a page, to de-account the dirty counters
2505 * (NR_DIRTIED, BDI_DIRTIED, tsk->nr_dirtied), so that they match the written
2506 * counters (NR_WRITTEN, BDI_WRITTEN) in long term. The mismatches will lead to
2507 * systematic errors in balanced_dirty_ratelimit and the dirty pages position
2508 * control.
2509 */
2510void account_page_redirty(struct page *page)
2511{
2512	struct address_space *mapping = page->mapping;
2513
2514	if (mapping && mapping_cap_account_dirty(mapping)) {
2515		struct inode *inode = mapping->host;
2516		struct bdi_writeback *wb;
2517		bool locked;
2518
2519		wb = unlocked_inode_to_wb_begin(inode, &locked);
2520		current->nr_dirtied--;
2521		dec_zone_page_state(page, NR_DIRTIED);
2522		dec_wb_stat(wb, WB_DIRTIED);
2523		unlocked_inode_to_wb_end(inode, locked);
2524	}
2525}
2526EXPORT_SYMBOL(account_page_redirty);
2527
2528/*
2529 * When a writepage implementation decides that it doesn't want to write this
2530 * page for some reason, it should redirty the locked page via
2531 * redirty_page_for_writepage() and it should then unlock the page and return 0
2532 */
2533int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
2534{
2535	int ret;
2536
2537	wbc->pages_skipped++;
2538	ret = __set_page_dirty_nobuffers(page);
2539	account_page_redirty(page);
2540	return ret;
2541}
2542EXPORT_SYMBOL(redirty_page_for_writepage);
2543
2544/*
2545 * Dirty a page.
2546 *
2547 * For pages with a mapping this should be done under the page lock
2548 * for the benefit of asynchronous memory errors who prefer a consistent
2549 * dirty state. This rule can be broken in some special cases,
2550 * but should be better not to.
2551 *
2552 * If the mapping doesn't provide a set_page_dirty a_op, then
2553 * just fall through and assume that it wants buffer_heads.
2554 */
2555int set_page_dirty(struct page *page)
2556{
2557	struct address_space *mapping = page_mapping(page);
2558
2559	if (likely(mapping)) {
2560		int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
2561		/*
2562		 * readahead/lru_deactivate_page could remain
2563		 * PG_readahead/PG_reclaim due to race with end_page_writeback
2564		 * About readahead, if the page is written, the flags would be
2565		 * reset. So no problem.
2566		 * About lru_deactivate_page, if the page is redirty, the flag
2567		 * will be reset. So no problem. but if the page is used by readahead
2568		 * it will confuse readahead and make it restart the size rampup
2569		 * process. But it's a trivial problem.
2570		 */
2571		if (PageReclaim(page))
2572			ClearPageReclaim(page);
2573#ifdef CONFIG_BLOCK
2574		if (!spd)
2575			spd = __set_page_dirty_buffers;
2576#endif
2577		return (*spd)(page);
2578	}
2579	if (!PageDirty(page)) {
2580		if (!TestSetPageDirty(page))
2581			return 1;
2582	}
2583	return 0;
2584}
2585EXPORT_SYMBOL(set_page_dirty);
2586
2587/*
2588 * set_page_dirty() is racy if the caller has no reference against
2589 * page->mapping->host, and if the page is unlocked.  This is because another
2590 * CPU could truncate the page off the mapping and then free the mapping.
2591 *
2592 * Usually, the page _is_ locked, or the caller is a user-space process which
2593 * holds a reference on the inode by having an open file.
2594 *
2595 * In other cases, the page should be locked before running set_page_dirty().
2596 */
2597int set_page_dirty_lock(struct page *page)
2598{
2599	int ret;
2600
2601	lock_page(page);
2602	ret = set_page_dirty(page);
2603	unlock_page(page);
2604	return ret;
2605}
2606EXPORT_SYMBOL(set_page_dirty_lock);
2607
2608/*
2609 * This cancels just the dirty bit on the kernel page itself, it does NOT
2610 * actually remove dirty bits on any mmap's that may be around. It also
2611 * leaves the page tagged dirty, so any sync activity will still find it on
2612 * the dirty lists, and in particular, clear_page_dirty_for_io() will still
2613 * look at the dirty bits in the VM.
2614 *
2615 * Doing this should *normally* only ever be done when a page is truncated,
2616 * and is not actually mapped anywhere at all. However, fs/buffer.c does
2617 * this when it notices that somebody has cleaned out all the buffers on a
2618 * page without actually doing it through the VM. Can you say "ext3 is
2619 * horribly ugly"? Thought you could.
2620 */
2621void cancel_dirty_page(struct page *page)
2622{
2623	struct address_space *mapping = page_mapping(page);
2624
2625	if (mapping_cap_account_dirty(mapping)) {
2626		struct inode *inode = mapping->host;
2627		struct bdi_writeback *wb;
2628		bool locked;
2629
2630		lock_page_memcg(page);
2631		wb = unlocked_inode_to_wb_begin(inode, &locked);
2632
2633		if (TestClearPageDirty(page))
2634			account_page_cleaned(page, mapping, wb);
2635
2636		unlocked_inode_to_wb_end(inode, locked);
2637		unlock_page_memcg(page);
2638	} else {
2639		ClearPageDirty(page);
2640	}
2641}
2642EXPORT_SYMBOL(cancel_dirty_page);
2643
2644/*
2645 * Clear a page's dirty flag, while caring for dirty memory accounting.
2646 * Returns true if the page was previously dirty.
2647 *
2648 * This is for preparing to put the page under writeout.  We leave the page
2649 * tagged as dirty in the radix tree so that a concurrent write-for-sync
2650 * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage
2651 * implementation will run either set_page_writeback() or set_page_dirty(),
2652 * at which stage we bring the page's dirty flag and radix-tree dirty tag
2653 * back into sync.
2654 *
2655 * This incoherency between the page's dirty flag and radix-tree tag is
2656 * unfortunate, but it only exists while the page is locked.
2657 */
2658int clear_page_dirty_for_io(struct page *page)
2659{
2660	struct address_space *mapping = page_mapping(page);
2661	int ret = 0;
2662
2663	BUG_ON(!PageLocked(page));
2664
2665	if (mapping && mapping_cap_account_dirty(mapping)) {
2666		struct inode *inode = mapping->host;
2667		struct bdi_writeback *wb;
2668		bool locked;
2669
2670		/*
2671		 * Yes, Virginia, this is indeed insane.
2672		 *
2673		 * We use this sequence to make sure that
2674		 *  (a) we account for dirty stats properly
2675		 *  (b) we tell the low-level filesystem to
2676		 *      mark the whole page dirty if it was
2677		 *      dirty in a pagetable. Only to then
2678		 *  (c) clean the page again and return 1 to
2679		 *      cause the writeback.
2680		 *
2681		 * This way we avoid all nasty races with the
2682		 * dirty bit in multiple places and clearing
2683		 * them concurrently from different threads.
2684		 *
2685		 * Note! Normally the "set_page_dirty(page)"
2686		 * has no effect on the actual dirty bit - since
2687		 * that will already usually be set. But we
2688		 * need the side effects, and it can help us
2689		 * avoid races.
2690		 *
2691		 * We basically use the page "master dirty bit"
2692		 * as a serialization point for all the different
2693		 * threads doing their things.
2694		 */
2695		if (page_mkclean(page))
2696			set_page_dirty(page);
2697		/*
2698		 * We carefully synchronise fault handlers against
2699		 * installing a dirty pte and marking the page dirty
2700		 * at this point.  We do this by having them hold the
2701		 * page lock while dirtying the page, and pages are
2702		 * always locked coming in here, so we get the desired
2703		 * exclusion.
 
 
2704		 */
2705		wb = unlocked_inode_to_wb_begin(inode, &locked);
2706		if (TestClearPageDirty(page)) {
2707			mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
2708			dec_zone_page_state(page, NR_FILE_DIRTY);
2709			dec_wb_stat(wb, WB_RECLAIMABLE);
2710			ret = 1;
 
2711		}
2712		unlocked_inode_to_wb_end(inode, locked);
2713		return ret;
2714	}
2715	return TestClearPageDirty(page);
2716}
2717EXPORT_SYMBOL(clear_page_dirty_for_io);
2718
2719int test_clear_page_writeback(struct page *page)
2720{
2721	struct address_space *mapping = page_mapping(page);
2722	int ret;
2723
2724	lock_page_memcg(page);
2725	if (mapping) {
2726		struct inode *inode = mapping->host;
2727		struct backing_dev_info *bdi = inode_to_bdi(inode);
2728		unsigned long flags;
2729
2730		spin_lock_irqsave(&mapping->tree_lock, flags);
2731		ret = TestClearPageWriteback(page);
2732		if (ret) {
2733			radix_tree_tag_clear(&mapping->page_tree,
2734						page_index(page),
2735						PAGECACHE_TAG_WRITEBACK);
2736			if (bdi_cap_account_writeback(bdi)) {
2737				struct bdi_writeback *wb = inode_to_wb(inode);
2738
2739				__dec_wb_stat(wb, WB_WRITEBACK);
2740				__wb_writeout_inc(wb);
2741			}
2742		}
2743		spin_unlock_irqrestore(&mapping->tree_lock, flags);
2744	} else {
2745		ret = TestClearPageWriteback(page);
2746	}
2747	if (ret) {
2748		mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
2749		dec_zone_page_state(page, NR_WRITEBACK);
2750		inc_zone_page_state(page, NR_WRITTEN);
2751	}
2752	unlock_page_memcg(page);
2753	return ret;
2754}
2755
2756int __test_set_page_writeback(struct page *page, bool keep_write)
2757{
2758	struct address_space *mapping = page_mapping(page);
2759	int ret;
2760
2761	lock_page_memcg(page);
2762	if (mapping) {
2763		struct inode *inode = mapping->host;
2764		struct backing_dev_info *bdi = inode_to_bdi(inode);
2765		unsigned long flags;
2766
2767		spin_lock_irqsave(&mapping->tree_lock, flags);
2768		ret = TestSetPageWriteback(page);
2769		if (!ret) {
2770			radix_tree_tag_set(&mapping->page_tree,
2771						page_index(page),
2772						PAGECACHE_TAG_WRITEBACK);
2773			if (bdi_cap_account_writeback(bdi))
2774				__inc_wb_stat(inode_to_wb(inode), WB_WRITEBACK);
2775		}
2776		if (!PageDirty(page))
2777			radix_tree_tag_clear(&mapping->page_tree,
2778						page_index(page),
2779						PAGECACHE_TAG_DIRTY);
2780		if (!keep_write)
2781			radix_tree_tag_clear(&mapping->page_tree,
2782						page_index(page),
2783						PAGECACHE_TAG_TOWRITE);
2784		spin_unlock_irqrestore(&mapping->tree_lock, flags);
2785	} else {
2786		ret = TestSetPageWriteback(page);
2787	}
2788	if (!ret) {
2789		mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
2790		inc_zone_page_state(page, NR_WRITEBACK);
2791	}
2792	unlock_page_memcg(page);
2793	return ret;
2794
2795}
2796EXPORT_SYMBOL(__test_set_page_writeback);
2797
2798/*
2799 * Return true if any of the pages in the mapping are marked with the
2800 * passed tag.
2801 */
2802int mapping_tagged(struct address_space *mapping, int tag)
2803{
2804	return radix_tree_tagged(&mapping->page_tree, tag);
2805}
2806EXPORT_SYMBOL(mapping_tagged);
2807
2808/**
2809 * wait_for_stable_page() - wait for writeback to finish, if necessary.
2810 * @page:	The page to wait on.
2811 *
2812 * This function determines if the given page is related to a backing device
2813 * that requires page contents to be held stable during writeback.  If so, then
2814 * it will wait for any pending writeback to complete.
2815 */
2816void wait_for_stable_page(struct page *page)
2817{
2818	if (bdi_cap_stable_pages_required(inode_to_bdi(page->mapping->host)))
2819		wait_on_page_writeback(page);
2820}
2821EXPORT_SYMBOL_GPL(wait_for_stable_page);
v3.1
   1/*
   2 * mm/page-writeback.c
   3 *
   4 * Copyright (C) 2002, Linus Torvalds.
   5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
   6 *
   7 * Contains functions related to writing back dirty pages at the
   8 * address_space level.
   9 *
  10 * 10Apr2002	Andrew Morton
  11 *		Initial version
  12 */
  13
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/spinlock.h>
  17#include <linux/fs.h>
  18#include <linux/mm.h>
  19#include <linux/swap.h>
  20#include <linux/slab.h>
  21#include <linux/pagemap.h>
  22#include <linux/writeback.h>
  23#include <linux/init.h>
  24#include <linux/backing-dev.h>
  25#include <linux/task_io_accounting_ops.h>
  26#include <linux/blkdev.h>
  27#include <linux/mpage.h>
  28#include <linux/rmap.h>
  29#include <linux/percpu.h>
  30#include <linux/notifier.h>
  31#include <linux/smp.h>
  32#include <linux/sysctl.h>
  33#include <linux/cpu.h>
  34#include <linux/syscalls.h>
  35#include <linux/buffer_head.h>
  36#include <linux/pagevec.h>
 
 
 
  37#include <trace/events/writeback.h>
  38
 
 
  39/*
  40 * Sleep at most 200ms at a time in balance_dirty_pages().
  41 */
  42#define MAX_PAUSE		max(HZ/5, 1)
  43
  44/*
 
 
 
 
 
 
  45 * Estimate write bandwidth at 200ms intervals.
  46 */
  47#define BANDWIDTH_INTERVAL	max(HZ/5, 1)
  48
 
 
  49/*
  50 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
  51 * will look to see if it needs to force writeback or throttling.
  52 */
  53static long ratelimit_pages = 32;
  54
  55/*
  56 * When balance_dirty_pages decides that the caller needs to perform some
  57 * non-background writeback, this is how many pages it will attempt to write.
  58 * It should be somewhat larger than dirtied pages to ensure that reasonably
  59 * large amounts of I/O are submitted.
  60 */
  61static inline long sync_writeback_pages(unsigned long dirtied)
  62{
  63	if (dirtied < ratelimit_pages)
  64		dirtied = ratelimit_pages;
  65
  66	return dirtied + dirtied / 2;
  67}
  68
  69/* The following parameters are exported via /proc/sys/vm */
  70
  71/*
  72 * Start background writeback (via writeback threads) at this percentage
  73 */
  74int dirty_background_ratio = 10;
  75
  76/*
  77 * dirty_background_bytes starts at 0 (disabled) so that it is a function of
  78 * dirty_background_ratio * the amount of dirtyable memory
  79 */
  80unsigned long dirty_background_bytes;
  81
  82/*
  83 * free highmem will not be subtracted from the total free memory
  84 * for calculating free ratios if vm_highmem_is_dirtyable is true
  85 */
  86int vm_highmem_is_dirtyable;
  87
  88/*
  89 * The generator of dirty data starts writeback at this percentage
  90 */
  91int vm_dirty_ratio = 20;
  92
  93/*
  94 * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
  95 * vm_dirty_ratio * the amount of dirtyable memory
  96 */
  97unsigned long vm_dirty_bytes;
  98
  99/*
 100 * The interval between `kupdate'-style writebacks
 101 */
 102unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
 103
 
 
 104/*
 105 * The longest time for which data is allowed to remain dirty
 106 */
 107unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
 108
 109/*
 110 * Flag that makes the machine dump writes/reads and block dirtyings.
 111 */
 112int block_dump;
 113
 114/*
 115 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
 116 * a full sync is triggered after this time elapses without any disk activity.
 117 */
 118int laptop_mode;
 119
 120EXPORT_SYMBOL(laptop_mode);
 121
 122/* End of sysctl-exported parameters */
 123
 124unsigned long global_dirty_limit;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 125
 126/*
 127 * Scale the writeback cache size proportional to the relative writeout speeds.
 
 
 
 
 128 *
 129 * We do this by keeping a floating proportion between BDIs, based on page
 130 * writeback completions [end_page_writeback()]. Those devices that write out
 131 * pages fastest will get the larger share, while the slower will get a smaller
 132 * share.
 133 *
 134 * We use page writeout completions because we are interested in getting rid of
 135 * dirty pages. Having them written out is the primary goal.
 
 
 
 
 
 
 
 136 *
 137 * We introduce a concept of time, a period over which we measure these events,
 138 * because demand can/will vary over time. The length of this period itself is
 139 * measured in page writeback completions.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 140 *
 
 
 141 */
 142static struct prop_descriptor vm_completions;
 143static struct prop_descriptor vm_dirties;
 
 144
 145/*
 146 * couple the period to the dirty_ratio:
 
 
 
 
 
 
 
 
 147 *
 148 *   period/2 ~ roundup_pow_of_two(dirty limit)
 
 149 */
 150static int calc_period_shift(void)
 151{
 152	unsigned long dirty_total;
 
 
 153
 154	if (vm_dirty_bytes)
 155		dirty_total = vm_dirty_bytes / PAGE_SIZE;
 
 156	else
 157		dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) /
 158				100;
 159	return 2 + ilog2(dirty_total - 1);
 
 
 
 160}
 161
 162/*
 163 * update the period when the dirty threshold changes.
 
 
 
 
 164 */
 165static void update_completion_period(void)
 166{
 167	int shift = calc_period_shift();
 168	prop_change_shift(&vm_completions, shift);
 169	prop_change_shift(&vm_dirties, shift);
 
 
 170}
 171
 172int dirty_background_ratio_handler(struct ctl_table *table, int write,
 173		void __user *buffer, size_t *lenp,
 174		loff_t *ppos)
 175{
 176	int ret;
 177
 178	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 179	if (ret == 0 && write)
 180		dirty_background_bytes = 0;
 181	return ret;
 182}
 183
 184int dirty_background_bytes_handler(struct ctl_table *table, int write,
 185		void __user *buffer, size_t *lenp,
 186		loff_t *ppos)
 187{
 188	int ret;
 189
 190	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 191	if (ret == 0 && write)
 192		dirty_background_ratio = 0;
 193	return ret;
 194}
 195
 196int dirty_ratio_handler(struct ctl_table *table, int write,
 197		void __user *buffer, size_t *lenp,
 198		loff_t *ppos)
 199{
 200	int old_ratio = vm_dirty_ratio;
 201	int ret;
 202
 203	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 204	if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
 205		update_completion_period();
 206		vm_dirty_bytes = 0;
 207	}
 208	return ret;
 209}
 210
 211
 212int dirty_bytes_handler(struct ctl_table *table, int write,
 213		void __user *buffer, size_t *lenp,
 214		loff_t *ppos)
 215{
 216	unsigned long old_bytes = vm_dirty_bytes;
 217	int ret;
 218
 219	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 220	if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
 221		update_completion_period();
 222		vm_dirty_ratio = 0;
 223	}
 224	return ret;
 225}
 226
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 227/*
 228 * Increment the BDI's writeout completion count and the global writeout
 229 * completion count. Called from test_clear_page_writeback().
 230 */
 231static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
 232{
 233	__inc_bdi_stat(bdi, BDI_WRITTEN);
 234	__prop_inc_percpu_max(&vm_completions, &bdi->completions,
 235			      bdi->max_prop_frac);
 
 
 
 
 
 
 
 236}
 237
 238void bdi_writeout_inc(struct backing_dev_info *bdi)
 239{
 240	unsigned long flags;
 241
 242	local_irq_save(flags);
 243	__bdi_writeout_inc(bdi);
 244	local_irq_restore(flags);
 245}
 246EXPORT_SYMBOL_GPL(bdi_writeout_inc);
 247
 248void task_dirty_inc(struct task_struct *tsk)
 249{
 250	prop_inc_single(&vm_dirties, &tsk->dirties);
 251}
 252
 253/*
 254 * Obtain an accurate fraction of the BDI's portion.
 
 255 */
 256static void bdi_writeout_fraction(struct backing_dev_info *bdi,
 257		long *numerator, long *denominator)
 258{
 259	prop_fraction_percpu(&vm_completions, &bdi->completions,
 260				numerator, denominator);
 
 
 
 
 
 
 
 
 
 
 
 
 
 261}
 262
 263static inline void task_dirties_fraction(struct task_struct *tsk,
 264		long *numerator, long *denominator)
 265{
 266	prop_fraction_single(&vm_dirties, &tsk->dirties,
 267				numerator, denominator);
 268}
 269
 270/*
 271 * task_dirty_limit - scale down dirty throttling threshold for one task
 272 *
 273 * task specific dirty limit:
 274 *
 275 *   dirty -= (dirty/8) * p_{t}
 276 *
 277 * To protect light/slow dirtying tasks from heavier/fast ones, we start
 278 * throttling individual tasks before reaching the bdi dirty limit.
 279 * Relatively low thresholds will be allocated to heavy dirtiers. So when
 280 * dirty pages grow large, heavy dirtiers will be throttled first, which will
 281 * effectively curb the growth of dirty pages. Light dirtiers with high enough
 282 * dirty threshold may never get throttled.
 283 */
 284#define TASK_LIMIT_FRACTION 8
 285static unsigned long task_dirty_limit(struct task_struct *tsk,
 286				       unsigned long bdi_dirty)
 287{
 288	long numerator, denominator;
 289	unsigned long dirty = bdi_dirty;
 290	u64 inv = dirty / TASK_LIMIT_FRACTION;
 291
 292	task_dirties_fraction(tsk, &numerator, &denominator);
 293	inv *= numerator;
 294	do_div(inv, denominator);
 295
 296	dirty -= inv;
 297
 298	return max(dirty, bdi_dirty/2);
 299}
 300
 301/* Minimum limit for any task */
 302static unsigned long task_min_dirty_limit(unsigned long bdi_dirty)
 303{
 304	return bdi_dirty - bdi_dirty / TASK_LIMIT_FRACTION;
 
 305}
 
 306
 307/*
 308 *
 
 
 309 */
 310static unsigned int bdi_min_ratio;
 311
 312int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
 313{
 314	int ret = 0;
 315
 316	spin_lock_bh(&bdi_lock);
 317	if (min_ratio > bdi->max_ratio) {
 318		ret = -EINVAL;
 319	} else {
 320		min_ratio -= bdi->min_ratio;
 321		if (bdi_min_ratio + min_ratio < 100) {
 322			bdi_min_ratio += min_ratio;
 323			bdi->min_ratio += min_ratio;
 324		} else {
 325			ret = -EINVAL;
 326		}
 327	}
 328	spin_unlock_bh(&bdi_lock);
 329
 330	return ret;
 331}
 332
 333int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
 334{
 335	int ret = 0;
 336
 337	if (max_ratio > 100)
 338		return -EINVAL;
 339
 340	spin_lock_bh(&bdi_lock);
 341	if (bdi->min_ratio > max_ratio) {
 342		ret = -EINVAL;
 343	} else {
 344		bdi->max_ratio = max_ratio;
 345		bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
 346	}
 347	spin_unlock_bh(&bdi_lock);
 348
 349	return ret;
 350}
 351EXPORT_SYMBOL(bdi_set_max_ratio);
 352
 353/*
 354 * Work out the current dirty-memory clamping and background writeout
 355 * thresholds.
 356 *
 357 * The main aim here is to lower them aggressively if there is a lot of mapped
 358 * memory around.  To avoid stressing page reclaim with lots of unreclaimable
 359 * pages.  It is better to clamp down on writers than to start swapping, and
 360 * performing lots of scanning.
 361 *
 362 * We only allow 1/2 of the currently-unmapped memory to be dirtied.
 363 *
 364 * We don't permit the clamping level to fall below 5% - that is getting rather
 365 * excessive.
 366 *
 367 * We make sure that the background writeout level is below the adjusted
 368 * clamping level.
 369 */
 370
 371static unsigned long highmem_dirtyable_memory(unsigned long total)
 372{
 373#ifdef CONFIG_HIGHMEM
 374	int node;
 375	unsigned long x = 0;
 376
 377	for_each_node_state(node, N_HIGH_MEMORY) {
 378		struct zone *z =
 379			&NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
 380
 381		x += zone_page_state(z, NR_FREE_PAGES) +
 382		     zone_reclaimable_pages(z);
 383	}
 384	/*
 385	 * Make sure that the number of highmem pages is never larger
 386	 * than the number of the total dirtyable memory. This can only
 387	 * occur in very strange VM situations but we want to make sure
 388	 * that this does not occur.
 389	 */
 390	return min(x, total);
 391#else
 392	return 0;
 393#endif
 394}
 395
 396/**
 397 * determine_dirtyable_memory - amount of memory that may be used
 398 *
 399 * Returns the numebr of pages that can currently be freed and used
 400 * by the kernel for direct mappings.
 401 */
 402unsigned long determine_dirtyable_memory(void)
 403{
 404	unsigned long x;
 405
 406	x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
 407
 408	if (!vm_highmem_is_dirtyable)
 409		x -= highmem_dirtyable_memory(x);
 410
 411	return x + 1;	/* Ensure that we never return 0 */
 412}
 413
 414static unsigned long hard_dirty_limit(unsigned long thresh)
 
 415{
 416	return max(thresh, global_dirty_limit);
 417}
 418
 419/*
 420 * global_dirty_limits - background-writeback and dirty-throttling thresholds
 421 *
 422 * Calculate the dirty thresholds based on sysctl parameters
 423 * - vm.dirty_background_ratio  or  vm.dirty_background_bytes
 424 * - vm.dirty_ratio             or  vm.dirty_bytes
 425 * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
 426 * real-time tasks.
 427 */
 428void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
 
 429{
 430	unsigned long background;
 431	unsigned long dirty;
 432	unsigned long uninitialized_var(available_memory);
 433	struct task_struct *tsk;
 434
 435	if (!vm_dirty_bytes || !dirty_background_bytes)
 436		available_memory = determine_dirtyable_memory();
 437
 438	if (vm_dirty_bytes)
 439		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
 440	else
 441		dirty = (vm_dirty_ratio * available_memory) / 100;
 442
 443	if (dirty_background_bytes)
 444		background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
 445	else
 446		background = (dirty_background_ratio * available_memory) / 100;
 447
 448	if (background >= dirty)
 449		background = dirty / 2;
 450	tsk = current;
 451	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
 452		background += background / 4;
 453		dirty += dirty / 4;
 454	}
 455	*pbackground = background;
 456	*pdirty = dirty;
 457	trace_global_dirty_state(background, dirty);
 458}
 459
 460/**
 461 * bdi_dirty_limit - @bdi's share of dirty throttling threshold
 462 * @bdi: the backing_dev_info to query
 463 * @dirty: global dirty limit in pages
 464 *
 465 * Returns @bdi's dirty limit in pages. The term "dirty" in the context of
 466 * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
 467 * And the "limit" in the name is not seriously taken as hard limit in
 468 * balance_dirty_pages().
 
 
 
 
 
 469 *
 470 * It allocates high/low dirty limits to fast/slow devices, in order to prevent
 471 * - starving fast devices
 472 * - piling up dirty pages (that will take long time to sync) on slow devices
 473 *
 474 * The bdi's share of dirty limit will be adapting to its throughput and
 475 * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
 476 */
 477unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
 478{
 479	u64 bdi_dirty;
 
 
 480	long numerator, denominator;
 
 481
 482	/*
 483	 * Calculate this BDI's share of the dirty ratio.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 484	 */
 485	bdi_writeout_fraction(bdi, &numerator, &denominator);
 
 486
 487	bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
 488	bdi_dirty *= numerator;
 489	do_div(bdi_dirty, denominator);
 
 
 490
 491	bdi_dirty += (dirty * bdi->min_ratio) / 100;
 492	if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
 493		bdi_dirty = dirty * bdi->max_ratio / 100;
 
 
 
 
 
 
 
 
 
 
 494
 495	return bdi_dirty;
 496}
 497
 498static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
 499				       unsigned long elapsed,
 500				       unsigned long written)
 501{
 502	const unsigned long period = roundup_pow_of_two(3 * HZ);
 503	unsigned long avg = bdi->avg_write_bandwidth;
 504	unsigned long old = bdi->write_bandwidth;
 505	u64 bw;
 506
 507	/*
 508	 * bw = written * HZ / elapsed
 509	 *
 510	 *                   bw * elapsed + write_bandwidth * (period - elapsed)
 511	 * write_bandwidth = ---------------------------------------------------
 512	 *                                          period
 
 
 
 513	 */
 514	bw = written - bdi->written_stamp;
 515	bw *= HZ;
 516	if (unlikely(elapsed > period)) {
 517		do_div(bw, elapsed);
 518		avg = bw;
 519		goto out;
 520	}
 521	bw += (u64)bdi->write_bandwidth * (period - elapsed);
 522	bw >>= ilog2(period);
 523
 524	/*
 525	 * one more level of smoothing, for filtering out sudden spikes
 526	 */
 527	if (avg > old && old >= (unsigned long)bw)
 528		avg -= (avg - old) >> 3;
 529
 530	if (avg < old && old <= (unsigned long)bw)
 531		avg += (old - avg) >> 3;
 532
 533out:
 534	bdi->write_bandwidth = bw;
 535	bdi->avg_write_bandwidth = avg;
 536}
 537
 538/*
 539 * The global dirtyable memory and dirty threshold could be suddenly knocked
 540 * down by a large amount (eg. on the startup of KVM in a swapless system).
 541 * This may throw the system into deep dirty exceeded state and throttle
 542 * heavy/light dirtiers alike. To retain good responsiveness, maintain
 543 * global_dirty_limit for tracking slowly down to the knocked down dirty
 544 * threshold.
 545 */
 546static void update_dirty_limit(unsigned long thresh, unsigned long dirty)
 547{
 548	unsigned long limit = global_dirty_limit;
 
 549
 550	/*
 551	 * Follow up in one step.
 552	 */
 553	if (limit < thresh) {
 554		limit = thresh;
 555		goto update;
 556	}
 557
 558	/*
 559	 * Follow down slowly. Use the higher one as the target, because thresh
 560	 * may drop below dirty. This is exactly the reason to introduce
 561	 * global_dirty_limit which is guaranteed to lie above the dirty pages.
 562	 */
 563	thresh = max(thresh, dirty);
 564	if (limit > thresh) {
 565		limit -= (limit - thresh) >> 5;
 566		goto update;
 567	}
 568	return;
 569update:
 570	global_dirty_limit = limit;
 571}
 572
 573static void global_update_bandwidth(unsigned long thresh,
 574				    unsigned long dirty,
 575				    unsigned long now)
 576{
 577	static DEFINE_SPINLOCK(dirty_lock);
 578	static unsigned long update_time;
 579
 580	/*
 581	 * check locklessly first to optimize away locking for the most time
 582	 */
 583	if (time_before(now, update_time + BANDWIDTH_INTERVAL))
 584		return;
 585
 586	spin_lock(&dirty_lock);
 587	if (time_after_eq(now, update_time + BANDWIDTH_INTERVAL)) {
 588		update_dirty_limit(thresh, dirty);
 589		update_time = now;
 590	}
 591	spin_unlock(&dirty_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 592}
 593
 594void __bdi_update_bandwidth(struct backing_dev_info *bdi,
 595			    unsigned long thresh,
 596			    unsigned long dirty,
 597			    unsigned long bdi_thresh,
 598			    unsigned long bdi_dirty,
 599			    unsigned long start_time)
 600{
 
 601	unsigned long now = jiffies;
 602	unsigned long elapsed = now - bdi->bw_time_stamp;
 
 603	unsigned long written;
 604
 
 
 605	/*
 606	 * rate-limit, only update once every 200ms.
 607	 */
 608	if (elapsed < BANDWIDTH_INTERVAL)
 609		return;
 610
 611	written = percpu_counter_read(&bdi->bdi_stat[BDI_WRITTEN]);
 
 612
 613	/*
 614	 * Skip quiet periods when disk bandwidth is under-utilized.
 615	 * (at least 1s idle time between two flusher runs)
 616	 */
 617	if (elapsed > HZ && time_before(bdi->bw_time_stamp, start_time))
 618		goto snapshot;
 619
 620	if (thresh)
 621		global_update_bandwidth(thresh, dirty, now);
 
 622
 623	bdi_update_write_bandwidth(bdi, elapsed, written);
 
 
 
 
 
 
 
 
 
 624
 625snapshot:
 626	bdi->written_stamp = written;
 627	bdi->bw_time_stamp = now;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 628}
 629
 630static void bdi_update_bandwidth(struct backing_dev_info *bdi,
 631				 unsigned long thresh,
 632				 unsigned long dirty,
 633				 unsigned long bdi_thresh,
 634				 unsigned long bdi_dirty,
 635				 unsigned long start_time)
 636{
 637	if (time_is_after_eq_jiffies(bdi->bw_time_stamp + BANDWIDTH_INTERVAL))
 638		return;
 639	spin_lock(&bdi->wb.list_lock);
 640	__bdi_update_bandwidth(bdi, thresh, dirty, bdi_thresh, bdi_dirty,
 641			       start_time);
 642	spin_unlock(&bdi->wb.list_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 643}
 644
 645/*
 646 * balance_dirty_pages() must be called by processes which are generating dirty
 647 * data.  It looks at the number of dirty pages in the machine and will force
 648 * the caller to perform writeback if the system is over `vm_dirty_ratio'.
 649 * If we're over `background_thresh' then the writeback threads are woken to
 650 * perform some writeout.
 651 */
 652static void balance_dirty_pages(struct address_space *mapping,
 653				unsigned long write_chunk)
 
 654{
 655	unsigned long nr_reclaimable, bdi_nr_reclaimable;
 656	unsigned long nr_dirty;  /* = file_dirty + writeback + unstable_nfs */
 657	unsigned long bdi_dirty;
 658	unsigned long background_thresh;
 659	unsigned long dirty_thresh;
 660	unsigned long bdi_thresh;
 661	unsigned long task_bdi_thresh;
 662	unsigned long min_task_bdi_thresh;
 663	unsigned long pages_written = 0;
 664	unsigned long pause = 1;
 
 
 665	bool dirty_exceeded = false;
 666	bool clear_dirty_exceeded = true;
 667	struct backing_dev_info *bdi = mapping->backing_dev_info;
 
 
 668	unsigned long start_time = jiffies;
 669
 670	for (;;) {
 
 
 
 
 
 
 
 
 
 
 
 
 671		nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
 672					global_page_state(NR_UNSTABLE_NFS);
 673		nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK);
 
 
 
 674
 675		global_dirty_limits(&background_thresh, &dirty_thresh);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 676
 677		/*
 678		 * Throttle it only when the background writeback cannot
 679		 * catch-up. This avoids (excessively) small writeouts
 680		 * when the bdi limits are ramping up.
 
 
 
 
 
 
 
 681		 */
 682		if (nr_dirty <= (background_thresh + dirty_thresh) / 2)
 
 
 
 
 
 
 
 
 
 
 683			break;
 
 684
 685		bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
 686		min_task_bdi_thresh = task_min_dirty_limit(bdi_thresh);
 687		task_bdi_thresh = task_dirty_limit(current, bdi_thresh);
 688
 689		/*
 690		 * In order to avoid the stacked BDI deadlock we need
 691		 * to ensure we accurately count the 'dirty' pages when
 692		 * the threshold is low.
 693		 *
 694		 * Otherwise it would be possible to get thresh+n pages
 695		 * reported dirty, even though there are thresh-m pages
 696		 * actually dirty; with m+n sitting in the percpu
 697		 * deltas.
 698		 */
 699		if (task_bdi_thresh < 2 * bdi_stat_error(bdi)) {
 700			bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
 701			bdi_dirty = bdi_nr_reclaimable +
 702				    bdi_stat_sum(bdi, BDI_WRITEBACK);
 703		} else {
 704			bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
 705			bdi_dirty = bdi_nr_reclaimable +
 706				    bdi_stat(bdi, BDI_WRITEBACK);
 707		}
 
 
 
 
 
 
 
 
 
 708
 709		/*
 710		 * The bdi thresh is somehow "soft" limit derived from the
 711		 * global "hard" limit. The former helps to prevent heavy IO
 712		 * bdi or process from holding back light ones; The latter is
 713		 * the last resort safeguard.
 714		 */
 715		dirty_exceeded = (bdi_dirty > task_bdi_thresh) ||
 716				  (nr_dirty > dirty_thresh);
 717		clear_dirty_exceeded = (bdi_dirty <= min_task_bdi_thresh) &&
 718					(nr_dirty <= dirty_thresh);
 719
 720		if (!dirty_exceeded)
 721			break;
 
 
 722
 723		if (!bdi->dirty_exceeded)
 724			bdi->dirty_exceeded = 1;
 725
 726		bdi_update_bandwidth(bdi, dirty_thresh, nr_dirty,
 727				     bdi_thresh, bdi_dirty, start_time);
 
 
 
 
 728
 729		/* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
 730		 * Unstable writes are a feature of certain networked
 731		 * filesystems (i.e. NFS) in which data may have been
 732		 * written to the server's write cache, but has not yet
 733		 * been flushed to permanent storage.
 734		 * Only move pages to writeback if this bdi is over its
 735		 * threshold otherwise wait until the disk writes catch
 736		 * up.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 737		 */
 738		trace_balance_dirty_start(bdi);
 739		if (bdi_nr_reclaimable > task_bdi_thresh) {
 740			pages_written += writeback_inodes_wb(&bdi->wb,
 741							     write_chunk);
 742			trace_balance_dirty_written(bdi, pages_written);
 743			if (pages_written >= write_chunk)
 744				break;		/* We've done our duty */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 745		}
 746		__set_current_state(TASK_UNINTERRUPTIBLE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 747		io_schedule_timeout(pause);
 748		trace_balance_dirty_wait(bdi);
 749
 750		dirty_thresh = hard_dirty_limit(dirty_thresh);
 
 
 
 751		/*
 752		 * max-pause area. If dirty exceeded but still within this
 753		 * area, no need to sleep for more than 200ms: (a) 8 pages per
 754		 * 200ms is typically more than enough to curb heavy dirtiers;
 755		 * (b) the pause time limit makes the dirtiers more responsive.
 756		 */
 757		if (nr_dirty < dirty_thresh &&
 758		    bdi_dirty < (task_bdi_thresh + bdi_thresh) / 2 &&
 759		    time_after(jiffies, start_time + MAX_PAUSE))
 760			break;
 761
 762		/*
 763		 * Increase the delay for each loop, up to our previous
 764		 * default of taking a 100ms nap.
 
 
 
 
 
 
 765		 */
 766		pause <<= 1;
 767		if (pause > HZ / 10)
 768			pause = HZ / 10;
 
 
 769	}
 770
 771	/* Clear dirty_exceeded flag only when no task can exceed the limit */
 772	if (clear_dirty_exceeded && bdi->dirty_exceeded)
 773		bdi->dirty_exceeded = 0;
 774
 775	if (writeback_in_progress(bdi))
 776		return;
 777
 778	/*
 779	 * In laptop mode, we wait until hitting the higher threshold before
 780	 * starting background writeout, and then write out all the way down
 781	 * to the lower threshold.  So slow writers cause minimal disk activity.
 782	 *
 783	 * In normal mode, we start background writeout at the lower
 784	 * background_thresh, to keep the amount of dirty memory low.
 785	 */
 786	if ((laptop_mode && pages_written) ||
 787	    (!laptop_mode && (nr_reclaimable > background_thresh)))
 788		bdi_start_background_writeback(bdi);
 
 
 789}
 790
 791void set_page_dirty_balance(struct page *page, int page_mkwrite)
 792{
 793	if (set_page_dirty(page) || page_mkwrite) {
 794		struct address_space *mapping = page_mapping(page);
 795
 796		if (mapping)
 797			balance_dirty_pages_ratelimited(mapping);
 798	}
 799}
 800
 801static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 802
 803/**
 804 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
 805 * @mapping: address_space which was dirtied
 806 * @nr_pages_dirtied: number of pages which the caller has just dirtied
 807 *
 808 * Processes which are dirtying memory should call in here once for each page
 809 * which was newly dirtied.  The function will periodically check the system's
 810 * dirty state and will initiate writeback if needed.
 811 *
 812 * On really big machines, get_writeback_state is expensive, so try to avoid
 813 * calling it too often (ratelimiting).  But once we're over the dirty memory
 814 * limit we decrease the ratelimiting by a lot, to prevent individual processes
 815 * from overshooting the limit by (ratelimit_pages) each.
 816 */
 817void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
 818					unsigned long nr_pages_dirtied)
 819{
 820	struct backing_dev_info *bdi = mapping->backing_dev_info;
 821	unsigned long ratelimit;
 822	unsigned long *p;
 
 
 823
 824	if (!bdi_cap_account_dirty(bdi))
 825		return;
 826
 827	ratelimit = ratelimit_pages;
 828	if (mapping->backing_dev_info->dirty_exceeded)
 829		ratelimit = 8;
 
 
 
 
 
 830
 
 831	/*
 832	 * Check the rate limiting. Also, we do not want to throttle real-time
 833	 * tasks in balance_dirty_pages(). Period.
 
 
 834	 */
 835	preempt_disable();
 836	p =  &__get_cpu_var(bdp_ratelimits);
 837	*p += nr_pages_dirtied;
 838	if (unlikely(*p >= ratelimit)) {
 839		ratelimit = sync_writeback_pages(*p);
 840		*p = 0;
 841		preempt_enable();
 842		balance_dirty_pages(mapping, ratelimit);
 843		return;
 
 
 
 
 
 
 
 
 
 
 844	}
 845	preempt_enable();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 846}
 847EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
 848
 849void throttle_vm_writeout(gfp_t gfp_mask)
 850{
 851	unsigned long background_thresh;
 852	unsigned long dirty_thresh;
 853
 854        for ( ; ; ) {
 855		global_dirty_limits(&background_thresh, &dirty_thresh);
 
 856
 857                /*
 858                 * Boost the allowable dirty threshold a bit for page
 859                 * allocators so they don't get DoS'ed by heavy writers
 860                 */
 861                dirty_thresh += dirty_thresh / 10;      /* wheeee... */
 862
 863                if (global_page_state(NR_UNSTABLE_NFS) +
 864			global_page_state(NR_WRITEBACK) <= dirty_thresh)
 865                        	break;
 866                congestion_wait(BLK_RW_ASYNC, HZ/10);
 867
 868		/*
 869		 * The caller might hold locks which can prevent IO completion
 870		 * or progress in the filesystem.  So we cannot just sit here
 871		 * waiting for IO to complete.
 872		 */
 873		if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
 874			break;
 875        }
 876}
 877
 878/*
 879 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
 880 */
 881int dirty_writeback_centisecs_handler(ctl_table *table, int write,
 882	void __user *buffer, size_t *length, loff_t *ppos)
 883{
 884	proc_dointvec(table, write, buffer, length, ppos);
 885	bdi_arm_supers_timer();
 886	return 0;
 887}
 888
 889#ifdef CONFIG_BLOCK
 890void laptop_mode_timer_fn(unsigned long data)
 891{
 892	struct request_queue *q = (struct request_queue *)data;
 893	int nr_pages = global_page_state(NR_FILE_DIRTY) +
 894		global_page_state(NR_UNSTABLE_NFS);
 
 895
 896	/*
 897	 * We want to write everything out, not just down to the dirty
 898	 * threshold
 899	 */
 900	if (bdi_has_dirty_io(&q->backing_dev_info))
 901		bdi_start_writeback(&q->backing_dev_info, nr_pages);
 
 
 
 
 
 
 
 902}
 903
 904/*
 905 * We've spun up the disk and we're in laptop mode: schedule writeback
 906 * of all dirty data a few seconds from now.  If the flush is already scheduled
 907 * then push it back - the user is still using the disk.
 908 */
 909void laptop_io_completion(struct backing_dev_info *info)
 910{
 911	mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
 912}
 913
 914/*
 915 * We're in laptop mode and we've just synced. The sync's writes will have
 916 * caused another writeback to be scheduled by laptop_io_completion.
 917 * Nothing needs to be written back anymore, so we unschedule the writeback.
 918 */
 919void laptop_sync_completion(void)
 920{
 921	struct backing_dev_info *bdi;
 922
 923	rcu_read_lock();
 924
 925	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
 926		del_timer(&bdi->laptop_mode_wb_timer);
 927
 928	rcu_read_unlock();
 929}
 930#endif
 931
 932/*
 933 * If ratelimit_pages is too high then we can get into dirty-data overload
 934 * if a large number of processes all perform writes at the same time.
 935 * If it is too low then SMP machines will call the (expensive)
 936 * get_writeback_state too often.
 937 *
 938 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
 939 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
 940 * thresholds before writeback cuts in.
 941 *
 942 * But the limit should not be set too high.  Because it also controls the
 943 * amount of memory which the balance_dirty_pages() caller has to write back.
 944 * If this is too large then the caller will block on the IO queue all the
 945 * time.  So limit it to four megabytes - the balance_dirty_pages() caller
 946 * will write six megabyte chunks, max.
 947 */
 948
 949void writeback_set_ratelimit(void)
 950{
 951	ratelimit_pages = vm_total_pages / (num_online_cpus() * 32);
 
 
 
 
 
 
 952	if (ratelimit_pages < 16)
 953		ratelimit_pages = 16;
 954	if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
 955		ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
 956}
 957
 958static int __cpuinit
 959ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
 
 960{
 961	writeback_set_ratelimit();
 962	return NOTIFY_DONE;
 
 
 
 
 
 
 
 963}
 964
 965static struct notifier_block __cpuinitdata ratelimit_nb = {
 966	.notifier_call	= ratelimit_handler,
 967	.next		= NULL,
 968};
 969
 970/*
 971 * Called early on to tune the page writeback dirty limits.
 972 *
 973 * We used to scale dirty pages according to how total memory
 974 * related to pages that could be allocated for buffers (by
 975 * comparing nr_free_buffer_pages() to vm_total_pages.
 976 *
 977 * However, that was when we used "dirty_ratio" to scale with
 978 * all memory, and we don't do that any more. "dirty_ratio"
 979 * is now applied to total non-HIGHPAGE memory (by subtracting
 980 * totalhigh_pages from vm_total_pages), and as such we can't
 981 * get into the old insane situation any more where we had
 982 * large amounts of dirty pages compared to a small amount of
 983 * non-HIGHMEM memory.
 984 *
 985 * But we might still want to scale the dirty_ratio by how
 986 * much memory the box has..
 987 */
 988void __init page_writeback_init(void)
 989{
 990	int shift;
 991
 992	writeback_set_ratelimit();
 993	register_cpu_notifier(&ratelimit_nb);
 994
 995	shift = calc_period_shift();
 996	prop_descriptor_init(&vm_completions, shift);
 997	prop_descriptor_init(&vm_dirties, shift);
 998}
 999
1000/**
1001 * tag_pages_for_writeback - tag pages to be written by write_cache_pages
1002 * @mapping: address space structure to write
1003 * @start: starting page index
1004 * @end: ending page index (inclusive)
1005 *
1006 * This function scans the page range from @start to @end (inclusive) and tags
1007 * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is
1008 * that write_cache_pages (or whoever calls this function) will then use
1009 * TOWRITE tag to identify pages eligible for writeback.  This mechanism is
1010 * used to avoid livelocking of writeback by a process steadily creating new
1011 * dirty pages in the file (thus it is important for this function to be quick
1012 * so that it can tag pages faster than a dirtying process can create them).
1013 */
1014/*
1015 * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency.
1016 */
1017void tag_pages_for_writeback(struct address_space *mapping,
1018			     pgoff_t start, pgoff_t end)
1019{
1020#define WRITEBACK_TAG_BATCH 4096
1021	unsigned long tagged;
1022
1023	do {
1024		spin_lock_irq(&mapping->tree_lock);
1025		tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree,
1026				&start, end, WRITEBACK_TAG_BATCH,
1027				PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE);
1028		spin_unlock_irq(&mapping->tree_lock);
1029		WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);
1030		cond_resched();
1031		/* We check 'start' to handle wrapping when end == ~0UL */
1032	} while (tagged >= WRITEBACK_TAG_BATCH && start);
1033}
1034EXPORT_SYMBOL(tag_pages_for_writeback);
1035
1036/**
1037 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
1038 * @mapping: address space structure to write
1039 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1040 * @writepage: function called for each page
1041 * @data: data passed to writepage function
1042 *
1043 * If a page is already under I/O, write_cache_pages() skips it, even
1044 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
1045 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
1046 * and msync() need to guarantee that all the data which was dirty at the time
1047 * the call was made get new I/O started against them.  If wbc->sync_mode is
1048 * WB_SYNC_ALL then we were called for data integrity and we must wait for
1049 * existing IO to complete.
1050 *
1051 * To avoid livelocks (when other process dirties new pages), we first tag
1052 * pages which should be written back with TOWRITE tag and only then start
1053 * writing them. For data-integrity sync we have to be careful so that we do
1054 * not miss some pages (e.g., because some other process has cleared TOWRITE
1055 * tag we set). The rule we follow is that TOWRITE tag can be cleared only
1056 * by the process clearing the DIRTY tag (and submitting the page for IO).
1057 */
1058int write_cache_pages(struct address_space *mapping,
1059		      struct writeback_control *wbc, writepage_t writepage,
1060		      void *data)
1061{
1062	int ret = 0;
1063	int done = 0;
1064	struct pagevec pvec;
1065	int nr_pages;
1066	pgoff_t uninitialized_var(writeback_index);
1067	pgoff_t index;
1068	pgoff_t end;		/* Inclusive */
1069	pgoff_t done_index;
1070	int cycled;
1071	int range_whole = 0;
1072	int tag;
1073
1074	pagevec_init(&pvec, 0);
1075	if (wbc->range_cyclic) {
1076		writeback_index = mapping->writeback_index; /* prev offset */
1077		index = writeback_index;
1078		if (index == 0)
1079			cycled = 1;
1080		else
1081			cycled = 0;
1082		end = -1;
1083	} else {
1084		index = wbc->range_start >> PAGE_CACHE_SHIFT;
1085		end = wbc->range_end >> PAGE_CACHE_SHIFT;
1086		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1087			range_whole = 1;
1088		cycled = 1; /* ignore range_cyclic tests */
1089	}
1090	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1091		tag = PAGECACHE_TAG_TOWRITE;
1092	else
1093		tag = PAGECACHE_TAG_DIRTY;
1094retry:
1095	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1096		tag_pages_for_writeback(mapping, index, end);
1097	done_index = index;
1098	while (!done && (index <= end)) {
1099		int i;
1100
1101		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
1102			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1103		if (nr_pages == 0)
1104			break;
1105
1106		for (i = 0; i < nr_pages; i++) {
1107			struct page *page = pvec.pages[i];
1108
1109			/*
1110			 * At this point, the page may be truncated or
1111			 * invalidated (changing page->mapping to NULL), or
1112			 * even swizzled back from swapper_space to tmpfs file
1113			 * mapping. However, page->index will not change
1114			 * because we have a reference on the page.
1115			 */
1116			if (page->index > end) {
1117				/*
1118				 * can't be range_cyclic (1st pass) because
1119				 * end == -1 in that case.
1120				 */
1121				done = 1;
1122				break;
1123			}
1124
1125			done_index = page->index;
1126
1127			lock_page(page);
1128
1129			/*
1130			 * Page truncated or invalidated. We can freely skip it
1131			 * then, even for data integrity operations: the page
1132			 * has disappeared concurrently, so there could be no
1133			 * real expectation of this data interity operation
1134			 * even if there is now a new, dirty page at the same
1135			 * pagecache address.
1136			 */
1137			if (unlikely(page->mapping != mapping)) {
1138continue_unlock:
1139				unlock_page(page);
1140				continue;
1141			}
1142
1143			if (!PageDirty(page)) {
1144				/* someone wrote it for us */
1145				goto continue_unlock;
1146			}
1147
1148			if (PageWriteback(page)) {
1149				if (wbc->sync_mode != WB_SYNC_NONE)
1150					wait_on_page_writeback(page);
1151				else
1152					goto continue_unlock;
1153			}
1154
1155			BUG_ON(PageWriteback(page));
1156			if (!clear_page_dirty_for_io(page))
1157				goto continue_unlock;
1158
1159			trace_wbc_writepage(wbc, mapping->backing_dev_info);
1160			ret = (*writepage)(page, wbc, data);
1161			if (unlikely(ret)) {
1162				if (ret == AOP_WRITEPAGE_ACTIVATE) {
1163					unlock_page(page);
1164					ret = 0;
1165				} else {
1166					/*
1167					 * done_index is set past this page,
1168					 * so media errors will not choke
1169					 * background writeout for the entire
1170					 * file. This has consequences for
1171					 * range_cyclic semantics (ie. it may
1172					 * not be suitable for data integrity
1173					 * writeout).
1174					 */
1175					done_index = page->index + 1;
1176					done = 1;
1177					break;
1178				}
1179			}
1180
1181			/*
1182			 * We stop writing back only if we are not doing
1183			 * integrity sync. In case of integrity sync we have to
1184			 * keep going until we have written all the pages
1185			 * we tagged for writeback prior to entering this loop.
1186			 */
1187			if (--wbc->nr_to_write <= 0 &&
1188			    wbc->sync_mode == WB_SYNC_NONE) {
1189				done = 1;
1190				break;
1191			}
1192		}
1193		pagevec_release(&pvec);
1194		cond_resched();
1195	}
1196	if (!cycled && !done) {
1197		/*
1198		 * range_cyclic:
1199		 * We hit the last page and there is more work to be done: wrap
1200		 * back to the start of the file
1201		 */
1202		cycled = 1;
1203		index = 0;
1204		end = writeback_index - 1;
1205		goto retry;
1206	}
1207	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1208		mapping->writeback_index = done_index;
1209
1210	return ret;
1211}
1212EXPORT_SYMBOL(write_cache_pages);
1213
1214/*
1215 * Function used by generic_writepages to call the real writepage
1216 * function and set the mapping flags on error
1217 */
1218static int __writepage(struct page *page, struct writeback_control *wbc,
1219		       void *data)
1220{
1221	struct address_space *mapping = data;
1222	int ret = mapping->a_ops->writepage(page, wbc);
1223	mapping_set_error(mapping, ret);
1224	return ret;
1225}
1226
1227/**
1228 * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
1229 * @mapping: address space structure to write
1230 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1231 *
1232 * This is a library function, which implements the writepages()
1233 * address_space_operation.
1234 */
1235int generic_writepages(struct address_space *mapping,
1236		       struct writeback_control *wbc)
1237{
1238	struct blk_plug plug;
1239	int ret;
1240
1241	/* deal with chardevs and other special file */
1242	if (!mapping->a_ops->writepage)
1243		return 0;
1244
1245	blk_start_plug(&plug);
1246	ret = write_cache_pages(mapping, wbc, __writepage, mapping);
1247	blk_finish_plug(&plug);
1248	return ret;
1249}
1250
1251EXPORT_SYMBOL(generic_writepages);
1252
1253int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
1254{
1255	int ret;
1256
1257	if (wbc->nr_to_write <= 0)
1258		return 0;
1259	if (mapping->a_ops->writepages)
1260		ret = mapping->a_ops->writepages(mapping, wbc);
1261	else
1262		ret = generic_writepages(mapping, wbc);
1263	return ret;
1264}
1265
1266/**
1267 * write_one_page - write out a single page and optionally wait on I/O
1268 * @page: the page to write
1269 * @wait: if true, wait on writeout
1270 *
1271 * The page must be locked by the caller and will be unlocked upon return.
1272 *
1273 * write_one_page() returns a negative error code if I/O failed.
1274 */
1275int write_one_page(struct page *page, int wait)
1276{
1277	struct address_space *mapping = page->mapping;
1278	int ret = 0;
1279	struct writeback_control wbc = {
1280		.sync_mode = WB_SYNC_ALL,
1281		.nr_to_write = 1,
1282	};
1283
1284	BUG_ON(!PageLocked(page));
1285
1286	if (wait)
1287		wait_on_page_writeback(page);
1288
1289	if (clear_page_dirty_for_io(page)) {
1290		page_cache_get(page);
1291		ret = mapping->a_ops->writepage(page, &wbc);
1292		if (ret == 0 && wait) {
1293			wait_on_page_writeback(page);
1294			if (PageError(page))
1295				ret = -EIO;
1296		}
1297		page_cache_release(page);
1298	} else {
1299		unlock_page(page);
1300	}
1301	return ret;
1302}
1303EXPORT_SYMBOL(write_one_page);
1304
1305/*
1306 * For address_spaces which do not use buffers nor write back.
1307 */
1308int __set_page_dirty_no_writeback(struct page *page)
1309{
1310	if (!PageDirty(page))
1311		return !TestSetPageDirty(page);
1312	return 0;
1313}
1314
1315/*
1316 * Helper function for set_page_dirty family.
 
 
 
1317 * NOTE: This relies on being atomic wrt interrupts.
1318 */
1319void account_page_dirtied(struct page *page, struct address_space *mapping)
1320{
 
 
 
 
1321	if (mapping_cap_account_dirty(mapping)) {
 
 
 
 
 
 
1322		__inc_zone_page_state(page, NR_FILE_DIRTY);
1323		__inc_zone_page_state(page, NR_DIRTIED);
1324		__inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
1325		task_dirty_inc(current);
1326		task_io_account_write(PAGE_CACHE_SIZE);
 
 
1327	}
1328}
1329EXPORT_SYMBOL(account_page_dirtied);
1330
1331/*
1332 * Helper function for set_page_writeback family.
1333 * NOTE: Unlike account_page_dirtied this does not rely on being atomic
1334 * wrt interrupts.
1335 */
1336void account_page_writeback(struct page *page)
 
1337{
1338	inc_zone_page_state(page, NR_WRITEBACK);
 
 
 
 
 
1339}
1340EXPORT_SYMBOL(account_page_writeback);
1341
1342/*
1343 * For address_spaces which do not use buffers.  Just tag the page as dirty in
1344 * its radix tree.
1345 *
1346 * This is also used when a single buffer is being dirtied: we want to set the
1347 * page dirty in that case, but not all the buffers.  This is a "bottom-up"
1348 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
1349 *
1350 * Most callers have locked the page, which pins the address_space in memory.
1351 * But zap_pte_range() does not lock the page, however in that case the
1352 * mapping is pinned by the vma's ->vm_file reference.
1353 *
1354 * We take care to handle the case where the page was truncated from the
1355 * mapping by re-checking page_mapping() inside tree_lock.
1356 */
1357int __set_page_dirty_nobuffers(struct page *page)
1358{
 
1359	if (!TestSetPageDirty(page)) {
1360		struct address_space *mapping = page_mapping(page);
1361		struct address_space *mapping2;
1362
1363		if (!mapping)
 
1364			return 1;
 
 
 
 
 
 
 
 
 
 
1365
1366		spin_lock_irq(&mapping->tree_lock);
1367		mapping2 = page_mapping(page);
1368		if (mapping2) { /* Race with truncate? */
1369			BUG_ON(mapping2 != mapping);
1370			WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
1371			account_page_dirtied(page, mapping);
1372			radix_tree_tag_set(&mapping->page_tree,
1373				page_index(page), PAGECACHE_TAG_DIRTY);
1374		}
1375		spin_unlock_irq(&mapping->tree_lock);
1376		if (mapping->host) {
1377			/* !PageAnon && !swapper_space */
1378			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1379		}
1380		return 1;
1381	}
 
1382	return 0;
1383}
1384EXPORT_SYMBOL(__set_page_dirty_nobuffers);
1385
1386/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1387 * When a writepage implementation decides that it doesn't want to write this
1388 * page for some reason, it should redirty the locked page via
1389 * redirty_page_for_writepage() and it should then unlock the page and return 0
1390 */
1391int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
1392{
 
 
1393	wbc->pages_skipped++;
1394	return __set_page_dirty_nobuffers(page);
 
 
1395}
1396EXPORT_SYMBOL(redirty_page_for_writepage);
1397
1398/*
1399 * Dirty a page.
1400 *
1401 * For pages with a mapping this should be done under the page lock
1402 * for the benefit of asynchronous memory errors who prefer a consistent
1403 * dirty state. This rule can be broken in some special cases,
1404 * but should be better not to.
1405 *
1406 * If the mapping doesn't provide a set_page_dirty a_op, then
1407 * just fall through and assume that it wants buffer_heads.
1408 */
1409int set_page_dirty(struct page *page)
1410{
1411	struct address_space *mapping = page_mapping(page);
1412
1413	if (likely(mapping)) {
1414		int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
1415		/*
1416		 * readahead/lru_deactivate_page could remain
1417		 * PG_readahead/PG_reclaim due to race with end_page_writeback
1418		 * About readahead, if the page is written, the flags would be
1419		 * reset. So no problem.
1420		 * About lru_deactivate_page, if the page is redirty, the flag
1421		 * will be reset. So no problem. but if the page is used by readahead
1422		 * it will confuse readahead and make it restart the size rampup
1423		 * process. But it's a trivial problem.
1424		 */
1425		ClearPageReclaim(page);
 
1426#ifdef CONFIG_BLOCK
1427		if (!spd)
1428			spd = __set_page_dirty_buffers;
1429#endif
1430		return (*spd)(page);
1431	}
1432	if (!PageDirty(page)) {
1433		if (!TestSetPageDirty(page))
1434			return 1;
1435	}
1436	return 0;
1437}
1438EXPORT_SYMBOL(set_page_dirty);
1439
1440/*
1441 * set_page_dirty() is racy if the caller has no reference against
1442 * page->mapping->host, and if the page is unlocked.  This is because another
1443 * CPU could truncate the page off the mapping and then free the mapping.
1444 *
1445 * Usually, the page _is_ locked, or the caller is a user-space process which
1446 * holds a reference on the inode by having an open file.
1447 *
1448 * In other cases, the page should be locked before running set_page_dirty().
1449 */
1450int set_page_dirty_lock(struct page *page)
1451{
1452	int ret;
1453
1454	lock_page(page);
1455	ret = set_page_dirty(page);
1456	unlock_page(page);
1457	return ret;
1458}
1459EXPORT_SYMBOL(set_page_dirty_lock);
1460
1461/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1462 * Clear a page's dirty flag, while caring for dirty memory accounting.
1463 * Returns true if the page was previously dirty.
1464 *
1465 * This is for preparing to put the page under writeout.  We leave the page
1466 * tagged as dirty in the radix tree so that a concurrent write-for-sync
1467 * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage
1468 * implementation will run either set_page_writeback() or set_page_dirty(),
1469 * at which stage we bring the page's dirty flag and radix-tree dirty tag
1470 * back into sync.
1471 *
1472 * This incoherency between the page's dirty flag and radix-tree tag is
1473 * unfortunate, but it only exists while the page is locked.
1474 */
1475int clear_page_dirty_for_io(struct page *page)
1476{
1477	struct address_space *mapping = page_mapping(page);
 
1478
1479	BUG_ON(!PageLocked(page));
1480
1481	if (mapping && mapping_cap_account_dirty(mapping)) {
 
 
 
 
1482		/*
1483		 * Yes, Virginia, this is indeed insane.
1484		 *
1485		 * We use this sequence to make sure that
1486		 *  (a) we account for dirty stats properly
1487		 *  (b) we tell the low-level filesystem to
1488		 *      mark the whole page dirty if it was
1489		 *      dirty in a pagetable. Only to then
1490		 *  (c) clean the page again and return 1 to
1491		 *      cause the writeback.
1492		 *
1493		 * This way we avoid all nasty races with the
1494		 * dirty bit in multiple places and clearing
1495		 * them concurrently from different threads.
1496		 *
1497		 * Note! Normally the "set_page_dirty(page)"
1498		 * has no effect on the actual dirty bit - since
1499		 * that will already usually be set. But we
1500		 * need the side effects, and it can help us
1501		 * avoid races.
1502		 *
1503		 * We basically use the page "master dirty bit"
1504		 * as a serialization point for all the different
1505		 * threads doing their things.
1506		 */
1507		if (page_mkclean(page))
1508			set_page_dirty(page);
1509		/*
1510		 * We carefully synchronise fault handlers against
1511		 * installing a dirty pte and marking the page dirty
1512		 * at this point. We do this by having them hold the
1513		 * page lock at some point after installing their
1514		 * pte, but before marking the page dirty.
1515		 * Pages are always locked coming in here, so we get
1516		 * the desired exclusion. See mm/memory.c:do_wp_page()
1517		 * for more comments.
1518		 */
 
1519		if (TestClearPageDirty(page)) {
 
1520			dec_zone_page_state(page, NR_FILE_DIRTY);
1521			dec_bdi_stat(mapping->backing_dev_info,
1522					BDI_RECLAIMABLE);
1523			return 1;
1524		}
1525		return 0;
 
1526	}
1527	return TestClearPageDirty(page);
1528}
1529EXPORT_SYMBOL(clear_page_dirty_for_io);
1530
1531int test_clear_page_writeback(struct page *page)
1532{
1533	struct address_space *mapping = page_mapping(page);
1534	int ret;
1535
 
1536	if (mapping) {
1537		struct backing_dev_info *bdi = mapping->backing_dev_info;
 
1538		unsigned long flags;
1539
1540		spin_lock_irqsave(&mapping->tree_lock, flags);
1541		ret = TestClearPageWriteback(page);
1542		if (ret) {
1543			radix_tree_tag_clear(&mapping->page_tree,
1544						page_index(page),
1545						PAGECACHE_TAG_WRITEBACK);
1546			if (bdi_cap_account_writeback(bdi)) {
1547				__dec_bdi_stat(bdi, BDI_WRITEBACK);
1548				__bdi_writeout_inc(bdi);
 
 
1549			}
1550		}
1551		spin_unlock_irqrestore(&mapping->tree_lock, flags);
1552	} else {
1553		ret = TestClearPageWriteback(page);
1554	}
1555	if (ret) {
 
1556		dec_zone_page_state(page, NR_WRITEBACK);
1557		inc_zone_page_state(page, NR_WRITTEN);
1558	}
 
1559	return ret;
1560}
1561
1562int test_set_page_writeback(struct page *page)
1563{
1564	struct address_space *mapping = page_mapping(page);
1565	int ret;
1566
 
1567	if (mapping) {
1568		struct backing_dev_info *bdi = mapping->backing_dev_info;
 
1569		unsigned long flags;
1570
1571		spin_lock_irqsave(&mapping->tree_lock, flags);
1572		ret = TestSetPageWriteback(page);
1573		if (!ret) {
1574			radix_tree_tag_set(&mapping->page_tree,
1575						page_index(page),
1576						PAGECACHE_TAG_WRITEBACK);
1577			if (bdi_cap_account_writeback(bdi))
1578				__inc_bdi_stat(bdi, BDI_WRITEBACK);
1579		}
1580		if (!PageDirty(page))
1581			radix_tree_tag_clear(&mapping->page_tree,
1582						page_index(page),
1583						PAGECACHE_TAG_DIRTY);
1584		radix_tree_tag_clear(&mapping->page_tree,
1585				     page_index(page),
1586				     PAGECACHE_TAG_TOWRITE);
 
1587		spin_unlock_irqrestore(&mapping->tree_lock, flags);
1588	} else {
1589		ret = TestSetPageWriteback(page);
1590	}
1591	if (!ret)
1592		account_page_writeback(page);
 
 
 
1593	return ret;
1594
1595}
1596EXPORT_SYMBOL(test_set_page_writeback);
1597
1598/*
1599 * Return true if any of the pages in the mapping are marked with the
1600 * passed tag.
1601 */
1602int mapping_tagged(struct address_space *mapping, int tag)
1603{
1604	return radix_tree_tagged(&mapping->page_tree, tag);
1605}
1606EXPORT_SYMBOL(mapping_tagged);