Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v4.6
 
   1/*
   2 * mm/page-writeback.c
   3 *
   4 * Copyright (C) 2002, Linus Torvalds.
   5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
   6 *
   7 * Contains functions related to writing back dirty pages at the
   8 * address_space level.
   9 *
  10 * 10Apr2002	Andrew Morton
  11 *		Initial version
  12 */
  13
  14#include <linux/kernel.h>
 
  15#include <linux/export.h>
  16#include <linux/spinlock.h>
  17#include <linux/fs.h>
  18#include <linux/mm.h>
  19#include <linux/swap.h>
  20#include <linux/slab.h>
  21#include <linux/pagemap.h>
  22#include <linux/writeback.h>
  23#include <linux/init.h>
  24#include <linux/backing-dev.h>
  25#include <linux/task_io_accounting_ops.h>
  26#include <linux/blkdev.h>
  27#include <linux/mpage.h>
  28#include <linux/rmap.h>
  29#include <linux/percpu.h>
  30#include <linux/notifier.h>
  31#include <linux/smp.h>
  32#include <linux/sysctl.h>
  33#include <linux/cpu.h>
  34#include <linux/syscalls.h>
  35#include <linux/buffer_head.h> /* __set_page_dirty_buffers */
  36#include <linux/pagevec.h>
  37#include <linux/timer.h>
  38#include <linux/sched/rt.h>
 
  39#include <linux/mm_inline.h>
  40#include <trace/events/writeback.h>
  41
  42#include "internal.h"
  43
  44/*
  45 * Sleep at most 200ms at a time in balance_dirty_pages().
  46 */
  47#define MAX_PAUSE		max(HZ/5, 1)
  48
  49/*
  50 * Try to keep balance_dirty_pages() call intervals higher than this many pages
  51 * by raising pause time to max_pause when falls below it.
  52 */
  53#define DIRTY_POLL_THRESH	(128 >> (PAGE_SHIFT - 10))
  54
  55/*
  56 * Estimate write bandwidth at 200ms intervals.
  57 */
  58#define BANDWIDTH_INTERVAL	max(HZ/5, 1)
  59
  60#define RATELIMIT_CALC_SHIFT	10
  61
  62/*
  63 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
  64 * will look to see if it needs to force writeback or throttling.
  65 */
  66static long ratelimit_pages = 32;
  67
  68/* The following parameters are exported via /proc/sys/vm */
  69
  70/*
  71 * Start background writeback (via writeback threads) at this percentage
  72 */
  73int dirty_background_ratio = 10;
  74
  75/*
  76 * dirty_background_bytes starts at 0 (disabled) so that it is a function of
  77 * dirty_background_ratio * the amount of dirtyable memory
  78 */
  79unsigned long dirty_background_bytes;
  80
  81/*
  82 * free highmem will not be subtracted from the total free memory
  83 * for calculating free ratios if vm_highmem_is_dirtyable is true
  84 */
  85int vm_highmem_is_dirtyable;
  86
  87/*
  88 * The generator of dirty data starts writeback at this percentage
  89 */
  90int vm_dirty_ratio = 20;
  91
  92/*
  93 * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
  94 * vm_dirty_ratio * the amount of dirtyable memory
  95 */
  96unsigned long vm_dirty_bytes;
  97
  98/*
  99 * The interval between `kupdate'-style writebacks
 100 */
 101unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
 102
 103EXPORT_SYMBOL_GPL(dirty_writeback_interval);
 104
 105/*
 106 * The longest time for which data is allowed to remain dirty
 107 */
 108unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
 109
 110/*
 111 * Flag that makes the machine dump writes/reads and block dirtyings.
 112 */
 113int block_dump;
 114
 115/*
 116 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
 117 * a full sync is triggered after this time elapses without any disk activity.
 118 */
 119int laptop_mode;
 120
 121EXPORT_SYMBOL(laptop_mode);
 122
 123/* End of sysctl-exported parameters */
 124
 125struct wb_domain global_wb_domain;
 126
 127/* consolidated parameters for balance_dirty_pages() and its subroutines */
 128struct dirty_throttle_control {
 129#ifdef CONFIG_CGROUP_WRITEBACK
 130	struct wb_domain	*dom;
 131	struct dirty_throttle_control *gdtc;	/* only set in memcg dtc's */
 132#endif
 133	struct bdi_writeback	*wb;
 134	struct fprop_local_percpu *wb_completions;
 135
 136	unsigned long		avail;		/* dirtyable */
 137	unsigned long		dirty;		/* file_dirty + write + nfs */
 138	unsigned long		thresh;		/* dirty threshold */
 139	unsigned long		bg_thresh;	/* dirty background threshold */
 140
 141	unsigned long		wb_dirty;	/* per-wb counterparts */
 142	unsigned long		wb_thresh;
 143	unsigned long		wb_bg_thresh;
 144
 145	unsigned long		pos_ratio;
 146};
 147
 148/*
 149 * Length of period for aging writeout fractions of bdis. This is an
 150 * arbitrarily chosen number. The longer the period, the slower fractions will
 151 * reflect changes in current writeout rate.
 152 */
 153#define VM_COMPLETIONS_PERIOD_LEN (3*HZ)
 154
 155#ifdef CONFIG_CGROUP_WRITEBACK
 156
 157#define GDTC_INIT(__wb)		.wb = (__wb),				\
 158				.dom = &global_wb_domain,		\
 159				.wb_completions = &(__wb)->completions
 160
 161#define GDTC_INIT_NO_WB		.dom = &global_wb_domain
 162
 163#define MDTC_INIT(__wb, __gdtc)	.wb = (__wb),				\
 164				.dom = mem_cgroup_wb_domain(__wb),	\
 165				.wb_completions = &(__wb)->memcg_completions, \
 166				.gdtc = __gdtc
 167
 168static bool mdtc_valid(struct dirty_throttle_control *dtc)
 169{
 170	return dtc->dom;
 171}
 172
 173static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
 174{
 175	return dtc->dom;
 176}
 177
 178static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
 179{
 180	return mdtc->gdtc;
 181}
 182
 183static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
 184{
 185	return &wb->memcg_completions;
 186}
 187
 188static void wb_min_max_ratio(struct bdi_writeback *wb,
 189			     unsigned long *minp, unsigned long *maxp)
 190{
 191	unsigned long this_bw = wb->avg_write_bandwidth;
 192	unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
 193	unsigned long long min = wb->bdi->min_ratio;
 194	unsigned long long max = wb->bdi->max_ratio;
 195
 196	/*
 197	 * @wb may already be clean by the time control reaches here and
 198	 * the total may not include its bw.
 199	 */
 200	if (this_bw < tot_bw) {
 201		if (min) {
 202			min *= this_bw;
 203			do_div(min, tot_bw);
 204		}
 205		if (max < 100) {
 206			max *= this_bw;
 207			do_div(max, tot_bw);
 208		}
 209	}
 210
 211	*minp = min;
 212	*maxp = max;
 213}
 214
 215#else	/* CONFIG_CGROUP_WRITEBACK */
 216
 217#define GDTC_INIT(__wb)		.wb = (__wb),                           \
 218				.wb_completions = &(__wb)->completions
 219#define GDTC_INIT_NO_WB
 220#define MDTC_INIT(__wb, __gdtc)
 221
 222static bool mdtc_valid(struct dirty_throttle_control *dtc)
 223{
 224	return false;
 225}
 226
 227static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
 228{
 229	return &global_wb_domain;
 230}
 231
 232static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
 233{
 234	return NULL;
 235}
 236
 237static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
 238{
 239	return NULL;
 240}
 241
 242static void wb_min_max_ratio(struct bdi_writeback *wb,
 243			     unsigned long *minp, unsigned long *maxp)
 244{
 245	*minp = wb->bdi->min_ratio;
 246	*maxp = wb->bdi->max_ratio;
 247}
 248
 249#endif	/* CONFIG_CGROUP_WRITEBACK */
 250
 251/*
 252 * In a memory zone, there is a certain amount of pages we consider
 253 * available for the page cache, which is essentially the number of
 254 * free and reclaimable pages, minus some zone reserves to protect
 255 * lowmem and the ability to uphold the zone's watermarks without
 256 * requiring writeback.
 257 *
 258 * This number of dirtyable pages is the base value of which the
 259 * user-configurable dirty ratio is the effictive number of pages that
 260 * are allowed to be actually dirtied.  Per individual zone, or
 261 * globally by using the sum of dirtyable pages over all zones.
 262 *
 263 * Because the user is allowed to specify the dirty limit globally as
 264 * absolute number of bytes, calculating the per-zone dirty limit can
 265 * require translating the configured limit into a percentage of
 266 * global dirtyable memory first.
 267 */
 268
 269/**
 270 * zone_dirtyable_memory - number of dirtyable pages in a zone
 271 * @zone: the zone
 272 *
 273 * Returns the zone's number of pages potentially available for dirty
 274 * page cache.  This is the base value for the per-zone dirty limits.
 275 */
 276static unsigned long zone_dirtyable_memory(struct zone *zone)
 277{
 278	unsigned long nr_pages;
 
 
 
 
 
 
 
 
 
 
 279
 280	nr_pages = zone_page_state(zone, NR_FREE_PAGES);
 281	/*
 282	 * Pages reserved for the kernel should not be considered
 283	 * dirtyable, to prevent a situation where reclaim has to
 284	 * clean pages in order to balance the zones.
 285	 */
 286	nr_pages -= min(nr_pages, zone->totalreserve_pages);
 287
 288	nr_pages += zone_page_state(zone, NR_INACTIVE_FILE);
 289	nr_pages += zone_page_state(zone, NR_ACTIVE_FILE);
 290
 291	return nr_pages;
 292}
 293
 294static unsigned long highmem_dirtyable_memory(unsigned long total)
 295{
 296#ifdef CONFIG_HIGHMEM
 297	int node;
 298	unsigned long x = 0;
 
 299
 300	for_each_node_state(node, N_HIGH_MEMORY) {
 301		struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
 
 
 
 
 
 
 
 
 
 302
 303		x += zone_dirtyable_memory(z);
 
 
 
 
 
 
 304	}
 305	/*
 306	 * Unreclaimable memory (kernel memory or anonymous memory
 307	 * without swap) can bring down the dirtyable pages below
 308	 * the zone's dirty balance reserve and the above calculation
 309	 * will underflow.  However we still want to add in nodes
 310	 * which are below threshold (negative values) to get a more
 311	 * accurate calculation but make sure that the total never
 312	 * underflows.
 313	 */
 314	if ((long)x < 0)
 315		x = 0;
 316
 317	/*
 318	 * Make sure that the number of highmem pages is never larger
 319	 * than the number of the total dirtyable memory. This can only
 320	 * occur in very strange VM situations but we want to make sure
 321	 * that this does not occur.
 322	 */
 323	return min(x, total);
 324#else
 325	return 0;
 326#endif
 327}
 328
 329/**
 330 * global_dirtyable_memory - number of globally dirtyable pages
 331 *
 332 * Returns the global number of pages potentially available for dirty
 333 * page cache.  This is the base value for the global dirty limits.
 334 */
 335static unsigned long global_dirtyable_memory(void)
 336{
 337	unsigned long x;
 338
 339	x = global_page_state(NR_FREE_PAGES);
 340	/*
 341	 * Pages reserved for the kernel should not be considered
 342	 * dirtyable, to prevent a situation where reclaim has to
 343	 * clean pages in order to balance the zones.
 344	 */
 345	x -= min(x, totalreserve_pages);
 346
 347	x += global_page_state(NR_INACTIVE_FILE);
 348	x += global_page_state(NR_ACTIVE_FILE);
 349
 350	if (!vm_highmem_is_dirtyable)
 351		x -= highmem_dirtyable_memory(x);
 352
 353	return x + 1;	/* Ensure that we never return 0 */
 354}
 355
 356/**
 357 * domain_dirty_limits - calculate thresh and bg_thresh for a wb_domain
 358 * @dtc: dirty_throttle_control of interest
 359 *
 360 * Calculate @dtc->thresh and ->bg_thresh considering
 361 * vm_dirty_{bytes|ratio} and dirty_background_{bytes|ratio}.  The caller
 362 * must ensure that @dtc->avail is set before calling this function.  The
 363 * dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
 364 * real-time tasks.
 365 */
 366static void domain_dirty_limits(struct dirty_throttle_control *dtc)
 367{
 368	const unsigned long available_memory = dtc->avail;
 369	struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
 370	unsigned long bytes = vm_dirty_bytes;
 371	unsigned long bg_bytes = dirty_background_bytes;
 372	unsigned long ratio = vm_dirty_ratio;
 373	unsigned long bg_ratio = dirty_background_ratio;
 
 374	unsigned long thresh;
 375	unsigned long bg_thresh;
 376	struct task_struct *tsk;
 377
 378	/* gdtc is !NULL iff @dtc is for memcg domain */
 379	if (gdtc) {
 380		unsigned long global_avail = gdtc->avail;
 381
 382		/*
 383		 * The byte settings can't be applied directly to memcg
 384		 * domains.  Convert them to ratios by scaling against
 385		 * globally available memory.
 
 
 386		 */
 387		if (bytes)
 388			ratio = min(DIV_ROUND_UP(bytes, PAGE_SIZE) * 100 /
 389				    global_avail, 100UL);
 390		if (bg_bytes)
 391			bg_ratio = min(DIV_ROUND_UP(bg_bytes, PAGE_SIZE) * 100 /
 392				       global_avail, 100UL);
 393		bytes = bg_bytes = 0;
 394	}
 395
 396	if (bytes)
 397		thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
 398	else
 399		thresh = (ratio * available_memory) / 100;
 400
 401	if (bg_bytes)
 402		bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
 403	else
 404		bg_thresh = (bg_ratio * available_memory) / 100;
 405
 406	if (bg_thresh >= thresh)
 407		bg_thresh = thresh / 2;
 408	tsk = current;
 409	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
 410		bg_thresh += bg_thresh / 4;
 411		thresh += thresh / 4;
 412	}
 413	dtc->thresh = thresh;
 414	dtc->bg_thresh = bg_thresh;
 415
 416	/* we should eventually report the domain in the TP */
 417	if (!gdtc)
 418		trace_global_dirty_state(bg_thresh, thresh);
 419}
 420
 421/**
 422 * global_dirty_limits - background-writeback and dirty-throttling thresholds
 423 * @pbackground: out parameter for bg_thresh
 424 * @pdirty: out parameter for thresh
 425 *
 426 * Calculate bg_thresh and thresh for global_wb_domain.  See
 427 * domain_dirty_limits() for details.
 428 */
 429void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
 430{
 431	struct dirty_throttle_control gdtc = { GDTC_INIT_NO_WB };
 432
 433	gdtc.avail = global_dirtyable_memory();
 434	domain_dirty_limits(&gdtc);
 435
 436	*pbackground = gdtc.bg_thresh;
 437	*pdirty = gdtc.thresh;
 438}
 439
 440/**
 441 * zone_dirty_limit - maximum number of dirty pages allowed in a zone
 442 * @zone: the zone
 443 *
 444 * Returns the maximum number of dirty pages allowed in a zone, based
 445 * on the zone's dirtyable memory.
 446 */
 447static unsigned long zone_dirty_limit(struct zone *zone)
 448{
 449	unsigned long zone_memory = zone_dirtyable_memory(zone);
 450	struct task_struct *tsk = current;
 451	unsigned long dirty;
 452
 453	if (vm_dirty_bytes)
 454		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) *
 455			zone_memory / global_dirtyable_memory();
 456	else
 457		dirty = vm_dirty_ratio * zone_memory / 100;
 458
 459	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk))
 460		dirty += dirty / 4;
 461
 462	return dirty;
 463}
 464
 465/**
 466 * zone_dirty_ok - tells whether a zone is within its dirty limits
 467 * @zone: the zone to check
 468 *
 469 * Returns %true when the dirty pages in @zone are within the zone's
 470 * dirty limit, %false if the limit is exceeded.
 471 */
 472bool zone_dirty_ok(struct zone *zone)
 473{
 474	unsigned long limit = zone_dirty_limit(zone);
 
 475
 476	return zone_page_state(zone, NR_FILE_DIRTY) +
 477	       zone_page_state(zone, NR_UNSTABLE_NFS) +
 478	       zone_page_state(zone, NR_WRITEBACK) <= limit;
 
 479}
 480
 481int dirty_background_ratio_handler(struct ctl_table *table, int write,
 482		void __user *buffer, size_t *lenp,
 483		loff_t *ppos)
 484{
 485	int ret;
 486
 487	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 488	if (ret == 0 && write)
 489		dirty_background_bytes = 0;
 490	return ret;
 491}
 492
 493int dirty_background_bytes_handler(struct ctl_table *table, int write,
 494		void __user *buffer, size_t *lenp,
 495		loff_t *ppos)
 496{
 497	int ret;
 498
 499	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 500	if (ret == 0 && write)
 501		dirty_background_ratio = 0;
 502	return ret;
 503}
 504
 505int dirty_ratio_handler(struct ctl_table *table, int write,
 506		void __user *buffer, size_t *lenp,
 507		loff_t *ppos)
 508{
 509	int old_ratio = vm_dirty_ratio;
 510	int ret;
 511
 512	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 513	if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
 514		writeback_set_ratelimit();
 515		vm_dirty_bytes = 0;
 516	}
 517	return ret;
 518}
 519
 520int dirty_bytes_handler(struct ctl_table *table, int write,
 521		void __user *buffer, size_t *lenp,
 522		loff_t *ppos)
 523{
 524	unsigned long old_bytes = vm_dirty_bytes;
 525	int ret;
 526
 527	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 528	if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
 529		writeback_set_ratelimit();
 530		vm_dirty_ratio = 0;
 531	}
 532	return ret;
 533}
 
 534
 535static unsigned long wp_next_time(unsigned long cur_time)
 536{
 537	cur_time += VM_COMPLETIONS_PERIOD_LEN;
 538	/* 0 has a special meaning... */
 539	if (!cur_time)
 540		return 1;
 541	return cur_time;
 542}
 543
 544static void wb_domain_writeout_inc(struct wb_domain *dom,
 545				   struct fprop_local_percpu *completions,
 546				   unsigned int max_prop_frac)
 547{
 548	__fprop_inc_percpu_max(&dom->completions, completions,
 549			       max_prop_frac);
 550	/* First event after period switching was turned off? */
 551	if (!unlikely(dom->period_time)) {
 552		/*
 553		 * We can race with other __bdi_writeout_inc calls here but
 554		 * it does not cause any harm since the resulting time when
 555		 * timer will fire and what is in writeout_period_time will be
 556		 * roughly the same.
 557		 */
 558		dom->period_time = wp_next_time(jiffies);
 559		mod_timer(&dom->period_timer, dom->period_time);
 560	}
 561}
 562
 563/*
 564 * Increment @wb's writeout completion count and the global writeout
 565 * completion count. Called from test_clear_page_writeback().
 566 */
 567static inline void __wb_writeout_inc(struct bdi_writeback *wb)
 568{
 569	struct wb_domain *cgdom;
 570
 571	__inc_wb_stat(wb, WB_WRITTEN);
 572	wb_domain_writeout_inc(&global_wb_domain, &wb->completions,
 573			       wb->bdi->max_prop_frac);
 574
 575	cgdom = mem_cgroup_wb_domain(wb);
 576	if (cgdom)
 577		wb_domain_writeout_inc(cgdom, wb_memcg_completions(wb),
 578				       wb->bdi->max_prop_frac);
 579}
 580
 581void wb_writeout_inc(struct bdi_writeback *wb)
 582{
 583	unsigned long flags;
 584
 585	local_irq_save(flags);
 586	__wb_writeout_inc(wb);
 587	local_irq_restore(flags);
 588}
 589EXPORT_SYMBOL_GPL(wb_writeout_inc);
 590
 591/*
 592 * On idle system, we can be called long after we scheduled because we use
 593 * deferred timers so count with missed periods.
 594 */
 595static void writeout_period(unsigned long t)
 596{
 597	struct wb_domain *dom = (void *)t;
 598	int miss_periods = (jiffies - dom->period_time) /
 599						 VM_COMPLETIONS_PERIOD_LEN;
 600
 601	if (fprop_new_period(&dom->completions, miss_periods + 1)) {
 602		dom->period_time = wp_next_time(dom->period_time +
 603				miss_periods * VM_COMPLETIONS_PERIOD_LEN);
 604		mod_timer(&dom->period_timer, dom->period_time);
 605	} else {
 606		/*
 607		 * Aging has zeroed all fractions. Stop wasting CPU on period
 608		 * updates.
 609		 */
 610		dom->period_time = 0;
 611	}
 612}
 613
 614int wb_domain_init(struct wb_domain *dom, gfp_t gfp)
 615{
 616	memset(dom, 0, sizeof(*dom));
 617
 618	spin_lock_init(&dom->lock);
 619
 620	init_timer_deferrable(&dom->period_timer);
 621	dom->period_timer.function = writeout_period;
 622	dom->period_timer.data = (unsigned long)dom;
 623
 624	dom->dirty_limit_tstamp = jiffies;
 625
 626	return fprop_global_init(&dom->completions, gfp);
 627}
 628
 629#ifdef CONFIG_CGROUP_WRITEBACK
 630void wb_domain_exit(struct wb_domain *dom)
 631{
 632	del_timer_sync(&dom->period_timer);
 633	fprop_global_destroy(&dom->completions);
 634}
 635#endif
 636
 637/*
 638 * bdi_min_ratio keeps the sum of the minimum dirty shares of all
 639 * registered backing devices, which, for obvious reasons, can not
 640 * exceed 100%.
 641 */
 642static unsigned int bdi_min_ratio;
 643
 644int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 645{
 
 
 
 
 
 
 
 
 
 
 
 
 
 646	int ret = 0;
 647
 
 
 
 648	spin_lock_bh(&bdi_lock);
 649	if (min_ratio > bdi->max_ratio) {
 650		ret = -EINVAL;
 651	} else {
 652		min_ratio -= bdi->min_ratio;
 653		if (bdi_min_ratio + min_ratio < 100) {
 654			bdi_min_ratio += min_ratio;
 655			bdi->min_ratio += min_ratio;
 656		} else {
 657			ret = -EINVAL;
 
 
 
 
 
 
 658		}
 659	}
 660	spin_unlock_bh(&bdi_lock);
 661
 662	return ret;
 663}
 664
 665int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
 666{
 667	int ret = 0;
 668
 669	if (max_ratio > 100)
 670		return -EINVAL;
 671
 672	spin_lock_bh(&bdi_lock);
 673	if (bdi->min_ratio > max_ratio) {
 674		ret = -EINVAL;
 675	} else {
 676		bdi->max_ratio = max_ratio;
 677		bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 100;
 
 678	}
 679	spin_unlock_bh(&bdi_lock);
 680
 681	return ret;
 682}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 683EXPORT_SYMBOL(bdi_set_max_ratio);
 684
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 685static unsigned long dirty_freerun_ceiling(unsigned long thresh,
 686					   unsigned long bg_thresh)
 687{
 688	return (thresh + bg_thresh) / 2;
 689}
 690
 691static unsigned long hard_dirty_limit(struct wb_domain *dom,
 692				      unsigned long thresh)
 693{
 694	return max(thresh, dom->dirty_limit);
 695}
 696
 697/*
 698 * Memory which can be further allocated to a memcg domain is capped by
 699 * system-wide clean memory excluding the amount being used in the domain.
 700 */
 701static void mdtc_calc_avail(struct dirty_throttle_control *mdtc,
 702			    unsigned long filepages, unsigned long headroom)
 703{
 704	struct dirty_throttle_control *gdtc = mdtc_gdtc(mdtc);
 705	unsigned long clean = filepages - min(filepages, mdtc->dirty);
 706	unsigned long global_clean = gdtc->avail - min(gdtc->avail, gdtc->dirty);
 707	unsigned long other_clean = global_clean - min(global_clean, clean);
 708
 709	mdtc->avail = filepages + min(headroom, other_clean);
 710}
 711
 712/**
 713 * __wb_calc_thresh - @wb's share of dirty throttling threshold
 714 * @dtc: dirty_throttle_context of interest
 715 *
 716 * Returns @wb's dirty limit in pages. The term "dirty" in the context of
 717 * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
 718 *
 719 * Note that balance_dirty_pages() will only seriously take it as a hard limit
 720 * when sleeping max_pause per page is not enough to keep the dirty pages under
 721 * control. For example, when the device is completely stalled due to some error
 722 * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
 723 * In the other normal situations, it acts more gently by throttling the tasks
 724 * more (rather than completely block them) when the wb dirty pages go high.
 725 *
 726 * It allocates high/low dirty limits to fast/slow devices, in order to prevent
 727 * - starving fast devices
 728 * - piling up dirty pages (that will take long time to sync) on slow devices
 729 *
 730 * The wb's share of dirty limit will be adapting to its throughput and
 731 * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
 
 
 
 732 */
 733static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc)
 734{
 735	struct wb_domain *dom = dtc_dom(dtc);
 736	unsigned long thresh = dtc->thresh;
 737	u64 wb_thresh;
 738	long numerator, denominator;
 739	unsigned long wb_min_ratio, wb_max_ratio;
 740
 741	/*
 742	 * Calculate this BDI's share of the thresh ratio.
 743	 */
 744	fprop_fraction_percpu(&dom->completions, dtc->wb_completions,
 745			      &numerator, &denominator);
 746
 747	wb_thresh = (thresh * (100 - bdi_min_ratio)) / 100;
 748	wb_thresh *= numerator;
 749	do_div(wb_thresh, denominator);
 750
 751	wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio);
 752
 753	wb_thresh += (thresh * wb_min_ratio) / 100;
 754	if (wb_thresh > (thresh * wb_max_ratio) / 100)
 755		wb_thresh = thresh * wb_max_ratio / 100;
 756
 757	return wb_thresh;
 758}
 759
 760unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh)
 761{
 762	struct dirty_throttle_control gdtc = { GDTC_INIT(wb),
 763					       .thresh = thresh };
 764	return __wb_calc_thresh(&gdtc);
 765}
 766
 767/*
 768 *                           setpoint - dirty 3
 769 *        f(dirty) := 1.0 + (----------------)
 770 *                           limit - setpoint
 771 *
 772 * it's a 3rd order polynomial that subjects to
 773 *
 774 * (1) f(freerun)  = 2.0 => rampup dirty_ratelimit reasonably fast
 775 * (2) f(setpoint) = 1.0 => the balance point
 776 * (3) f(limit)    = 0   => the hard limit
 777 * (4) df/dx      <= 0	 => negative feedback control
 778 * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
 779 *     => fast response on large errors; small oscillation near setpoint
 780 */
 781static long long pos_ratio_polynom(unsigned long setpoint,
 782					  unsigned long dirty,
 783					  unsigned long limit)
 784{
 785	long long pos_ratio;
 786	long x;
 787
 788	x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
 789		      (limit - setpoint) | 1);
 790	pos_ratio = x;
 791	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
 792	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
 793	pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
 794
 795	return clamp(pos_ratio, 0LL, 2LL << RATELIMIT_CALC_SHIFT);
 796}
 797
 798/*
 799 * Dirty position control.
 800 *
 801 * (o) global/bdi setpoints
 802 *
 803 * We want the dirty pages be balanced around the global/wb setpoints.
 804 * When the number of dirty pages is higher/lower than the setpoint, the
 805 * dirty position control ratio (and hence task dirty ratelimit) will be
 806 * decreased/increased to bring the dirty pages back to the setpoint.
 807 *
 808 *     pos_ratio = 1 << RATELIMIT_CALC_SHIFT
 809 *
 810 *     if (dirty < setpoint) scale up   pos_ratio
 811 *     if (dirty > setpoint) scale down pos_ratio
 812 *
 813 *     if (wb_dirty < wb_setpoint) scale up   pos_ratio
 814 *     if (wb_dirty > wb_setpoint) scale down pos_ratio
 815 *
 816 *     task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT
 817 *
 818 * (o) global control line
 819 *
 820 *     ^ pos_ratio
 821 *     |
 822 *     |            |<===== global dirty control scope ======>|
 823 * 2.0 .............*
 824 *     |            .*
 825 *     |            . *
 826 *     |            .   *
 827 *     |            .     *
 828 *     |            .        *
 829 *     |            .            *
 830 * 1.0 ................................*
 831 *     |            .                  .     *
 832 *     |            .                  .          *
 833 *     |            .                  .              *
 834 *     |            .                  .                 *
 835 *     |            .                  .                    *
 836 *   0 +------------.------------------.----------------------*------------->
 837 *           freerun^          setpoint^                 limit^   dirty pages
 838 *
 839 * (o) wb control line
 840 *
 841 *     ^ pos_ratio
 842 *     |
 843 *     |            *
 844 *     |              *
 845 *     |                *
 846 *     |                  *
 847 *     |                    * |<=========== span ============>|
 848 * 1.0 .......................*
 849 *     |                      . *
 850 *     |                      .   *
 851 *     |                      .     *
 852 *     |                      .       *
 853 *     |                      .         *
 854 *     |                      .           *
 855 *     |                      .             *
 856 *     |                      .               *
 857 *     |                      .                 *
 858 *     |                      .                   *
 859 *     |                      .                     *
 860 * 1/4 ...............................................* * * * * * * * * * * *
 861 *     |                      .                         .
 862 *     |                      .                           .
 863 *     |                      .                             .
 864 *   0 +----------------------.-------------------------------.------------->
 865 *                wb_setpoint^                    x_intercept^
 866 *
 867 * The wb control line won't drop below pos_ratio=1/4, so that wb_dirty can
 868 * be smoothly throttled down to normal if it starts high in situations like
 869 * - start writing to a slow SD card and a fast disk at the same time. The SD
 870 *   card's wb_dirty may rush to many times higher than wb_setpoint.
 871 * - the wb dirty thresh drops quickly due to change of JBOD workload
 872 */
 873static void wb_position_ratio(struct dirty_throttle_control *dtc)
 874{
 875	struct bdi_writeback *wb = dtc->wb;
 876	unsigned long write_bw = wb->avg_write_bandwidth;
 877	unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
 878	unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
 879	unsigned long wb_thresh = dtc->wb_thresh;
 880	unsigned long x_intercept;
 881	unsigned long setpoint;		/* dirty pages' target balance point */
 882	unsigned long wb_setpoint;
 883	unsigned long span;
 884	long long pos_ratio;		/* for scaling up/down the rate limit */
 885	long x;
 886
 887	dtc->pos_ratio = 0;
 888
 889	if (unlikely(dtc->dirty >= limit))
 890		return;
 891
 892	/*
 893	 * global setpoint
 894	 *
 895	 * See comment for pos_ratio_polynom().
 896	 */
 897	setpoint = (freerun + limit) / 2;
 898	pos_ratio = pos_ratio_polynom(setpoint, dtc->dirty, limit);
 899
 900	/*
 901	 * The strictlimit feature is a tool preventing mistrusted filesystems
 902	 * from growing a large number of dirty pages before throttling. For
 903	 * such filesystems balance_dirty_pages always checks wb counters
 904	 * against wb limits. Even if global "nr_dirty" is under "freerun".
 905	 * This is especially important for fuse which sets bdi->max_ratio to
 906	 * 1% by default. Without strictlimit feature, fuse writeback may
 907	 * consume arbitrary amount of RAM because it is accounted in
 908	 * NR_WRITEBACK_TEMP which is not involved in calculating "nr_dirty".
 909	 *
 910	 * Here, in wb_position_ratio(), we calculate pos_ratio based on
 911	 * two values: wb_dirty and wb_thresh. Let's consider an example:
 912	 * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global
 913	 * limits are set by default to 10% and 20% (background and throttle).
 914	 * Then wb_thresh is 1% of 20% of 16GB. This amounts to ~8K pages.
 915	 * wb_calc_thresh(wb, bg_thresh) is about ~4K pages. wb_setpoint is
 916	 * about ~6K pages (as the average of background and throttle wb
 917	 * limits). The 3rd order polynomial will provide positive feedback if
 918	 * wb_dirty is under wb_setpoint and vice versa.
 919	 *
 920	 * Note, that we cannot use global counters in these calculations
 921	 * because we want to throttle process writing to a strictlimit wb
 922	 * much earlier than global "freerun" is reached (~23MB vs. ~2.3GB
 923	 * in the example above).
 924	 */
 925	if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
 926		long long wb_pos_ratio;
 927
 928		if (dtc->wb_dirty < 8) {
 929			dtc->pos_ratio = min_t(long long, pos_ratio * 2,
 930					   2 << RATELIMIT_CALC_SHIFT);
 931			return;
 932		}
 933
 934		if (dtc->wb_dirty >= wb_thresh)
 935			return;
 936
 937		wb_setpoint = dirty_freerun_ceiling(wb_thresh,
 938						    dtc->wb_bg_thresh);
 939
 940		if (wb_setpoint == 0 || wb_setpoint == wb_thresh)
 941			return;
 942
 943		wb_pos_ratio = pos_ratio_polynom(wb_setpoint, dtc->wb_dirty,
 944						 wb_thresh);
 945
 946		/*
 947		 * Typically, for strictlimit case, wb_setpoint << setpoint
 948		 * and pos_ratio >> wb_pos_ratio. In the other words global
 949		 * state ("dirty") is not limiting factor and we have to
 950		 * make decision based on wb counters. But there is an
 951		 * important case when global pos_ratio should get precedence:
 952		 * global limits are exceeded (e.g. due to activities on other
 953		 * wb's) while given strictlimit wb is below limit.
 954		 *
 955		 * "pos_ratio * wb_pos_ratio" would work for the case above,
 956		 * but it would look too non-natural for the case of all
 957		 * activity in the system coming from a single strictlimit wb
 958		 * with bdi->max_ratio == 100%.
 959		 *
 960		 * Note that min() below somewhat changes the dynamics of the
 961		 * control system. Normally, pos_ratio value can be well over 3
 962		 * (when globally we are at freerun and wb is well below wb
 963		 * setpoint). Now the maximum pos_ratio in the same situation
 964		 * is 2. We might want to tweak this if we observe the control
 965		 * system is too slow to adapt.
 966		 */
 967		dtc->pos_ratio = min(pos_ratio, wb_pos_ratio);
 968		return;
 969	}
 970
 971	/*
 972	 * We have computed basic pos_ratio above based on global situation. If
 973	 * the wb is over/under its share of dirty pages, we want to scale
 974	 * pos_ratio further down/up. That is done by the following mechanism.
 975	 */
 976
 977	/*
 978	 * wb setpoint
 979	 *
 980	 *        f(wb_dirty) := 1.0 + k * (wb_dirty - wb_setpoint)
 981	 *
 982	 *                        x_intercept - wb_dirty
 983	 *                     := --------------------------
 984	 *                        x_intercept - wb_setpoint
 985	 *
 986	 * The main wb control line is a linear function that subjects to
 987	 *
 988	 * (1) f(wb_setpoint) = 1.0
 989	 * (2) k = - 1 / (8 * write_bw)  (in single wb case)
 990	 *     or equally: x_intercept = wb_setpoint + 8 * write_bw
 991	 *
 992	 * For single wb case, the dirty pages are observed to fluctuate
 993	 * regularly within range
 994	 *        [wb_setpoint - write_bw/2, wb_setpoint + write_bw/2]
 995	 * for various filesystems, where (2) can yield in a reasonable 12.5%
 996	 * fluctuation range for pos_ratio.
 997	 *
 998	 * For JBOD case, wb_thresh (not wb_dirty!) could fluctuate up to its
 999	 * own size, so move the slope over accordingly and choose a slope that
1000	 * yields 100% pos_ratio fluctuation on suddenly doubled wb_thresh.
1001	 */
1002	if (unlikely(wb_thresh > dtc->thresh))
1003		wb_thresh = dtc->thresh;
1004	/*
1005	 * It's very possible that wb_thresh is close to 0 not because the
1006	 * device is slow, but that it has remained inactive for long time.
1007	 * Honour such devices a reasonable good (hopefully IO efficient)
1008	 * threshold, so that the occasional writes won't be blocked and active
1009	 * writes can rampup the threshold quickly.
1010	 */
1011	wb_thresh = max(wb_thresh, (limit - dtc->dirty) / 8);
1012	/*
1013	 * scale global setpoint to wb's:
1014	 *	wb_setpoint = setpoint * wb_thresh / thresh
1015	 */
1016	x = div_u64((u64)wb_thresh << 16, dtc->thresh | 1);
1017	wb_setpoint = setpoint * (u64)x >> 16;
1018	/*
1019	 * Use span=(8*write_bw) in single wb case as indicated by
1020	 * (thresh - wb_thresh ~= 0) and transit to wb_thresh in JBOD case.
1021	 *
1022	 *        wb_thresh                    thresh - wb_thresh
1023	 * span = --------- * (8 * write_bw) + ------------------ * wb_thresh
1024	 *         thresh                           thresh
1025	 */
1026	span = (dtc->thresh - wb_thresh + 8 * write_bw) * (u64)x >> 16;
1027	x_intercept = wb_setpoint + span;
1028
1029	if (dtc->wb_dirty < x_intercept - span / 4) {
1030		pos_ratio = div64_u64(pos_ratio * (x_intercept - dtc->wb_dirty),
1031				      (x_intercept - wb_setpoint) | 1);
1032	} else
1033		pos_ratio /= 4;
1034
1035	/*
1036	 * wb reserve area, safeguard against dirty pool underrun and disk idle
1037	 * It may push the desired control point of global dirty pages higher
1038	 * than setpoint.
1039	 */
1040	x_intercept = wb_thresh / 2;
1041	if (dtc->wb_dirty < x_intercept) {
1042		if (dtc->wb_dirty > x_intercept / 8)
1043			pos_ratio = div_u64(pos_ratio * x_intercept,
1044					    dtc->wb_dirty);
1045		else
1046			pos_ratio *= 8;
1047	}
1048
1049	dtc->pos_ratio = pos_ratio;
1050}
1051
1052static void wb_update_write_bandwidth(struct bdi_writeback *wb,
1053				      unsigned long elapsed,
1054				      unsigned long written)
1055{
1056	const unsigned long period = roundup_pow_of_two(3 * HZ);
1057	unsigned long avg = wb->avg_write_bandwidth;
1058	unsigned long old = wb->write_bandwidth;
1059	u64 bw;
1060
1061	/*
1062	 * bw = written * HZ / elapsed
1063	 *
1064	 *                   bw * elapsed + write_bandwidth * (period - elapsed)
1065	 * write_bandwidth = ---------------------------------------------------
1066	 *                                          period
1067	 *
1068	 * @written may have decreased due to account_page_redirty().
1069	 * Avoid underflowing @bw calculation.
1070	 */
1071	bw = written - min(written, wb->written_stamp);
1072	bw *= HZ;
1073	if (unlikely(elapsed > period)) {
1074		do_div(bw, elapsed);
1075		avg = bw;
1076		goto out;
1077	}
1078	bw += (u64)wb->write_bandwidth * (period - elapsed);
1079	bw >>= ilog2(period);
1080
1081	/*
1082	 * one more level of smoothing, for filtering out sudden spikes
1083	 */
1084	if (avg > old && old >= (unsigned long)bw)
1085		avg -= (avg - old) >> 3;
1086
1087	if (avg < old && old <= (unsigned long)bw)
1088		avg += (old - avg) >> 3;
1089
1090out:
1091	/* keep avg > 0 to guarantee that tot > 0 if there are dirty wbs */
1092	avg = max(avg, 1LU);
1093	if (wb_has_dirty_io(wb)) {
1094		long delta = avg - wb->avg_write_bandwidth;
1095		WARN_ON_ONCE(atomic_long_add_return(delta,
1096					&wb->bdi->tot_write_bandwidth) <= 0);
1097	}
1098	wb->write_bandwidth = bw;
1099	wb->avg_write_bandwidth = avg;
1100}
1101
1102static void update_dirty_limit(struct dirty_throttle_control *dtc)
1103{
1104	struct wb_domain *dom = dtc_dom(dtc);
1105	unsigned long thresh = dtc->thresh;
1106	unsigned long limit = dom->dirty_limit;
1107
1108	/*
1109	 * Follow up in one step.
1110	 */
1111	if (limit < thresh) {
1112		limit = thresh;
1113		goto update;
1114	}
1115
1116	/*
1117	 * Follow down slowly. Use the higher one as the target, because thresh
1118	 * may drop below dirty. This is exactly the reason to introduce
1119	 * dom->dirty_limit which is guaranteed to lie above the dirty pages.
1120	 */
1121	thresh = max(thresh, dtc->dirty);
1122	if (limit > thresh) {
1123		limit -= (limit - thresh) >> 5;
1124		goto update;
1125	}
1126	return;
1127update:
1128	dom->dirty_limit = limit;
1129}
1130
1131static void domain_update_bandwidth(struct dirty_throttle_control *dtc,
1132				    unsigned long now)
1133{
1134	struct wb_domain *dom = dtc_dom(dtc);
1135
1136	/*
1137	 * check locklessly first to optimize away locking for the most time
1138	 */
1139	if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL))
1140		return;
1141
1142	spin_lock(&dom->lock);
1143	if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) {
1144		update_dirty_limit(dtc);
1145		dom->dirty_limit_tstamp = now;
1146	}
1147	spin_unlock(&dom->lock);
1148}
1149
1150/*
1151 * Maintain wb->dirty_ratelimit, the base dirty throttle rate.
1152 *
1153 * Normal wb tasks will be curbed at or below it in long term.
1154 * Obviously it should be around (write_bw / N) when there are N dd tasks.
1155 */
1156static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc,
1157				      unsigned long dirtied,
1158				      unsigned long elapsed)
1159{
1160	struct bdi_writeback *wb = dtc->wb;
1161	unsigned long dirty = dtc->dirty;
1162	unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
1163	unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
1164	unsigned long setpoint = (freerun + limit) / 2;
1165	unsigned long write_bw = wb->avg_write_bandwidth;
1166	unsigned long dirty_ratelimit = wb->dirty_ratelimit;
1167	unsigned long dirty_rate;
1168	unsigned long task_ratelimit;
1169	unsigned long balanced_dirty_ratelimit;
1170	unsigned long step;
1171	unsigned long x;
1172	unsigned long shift;
1173
1174	/*
1175	 * The dirty rate will match the writeout rate in long term, except
1176	 * when dirty pages are truncated by userspace or re-dirtied by FS.
1177	 */
1178	dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed;
1179
1180	/*
1181	 * task_ratelimit reflects each dd's dirty rate for the past 200ms.
1182	 */
1183	task_ratelimit = (u64)dirty_ratelimit *
1184					dtc->pos_ratio >> RATELIMIT_CALC_SHIFT;
1185	task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */
1186
1187	/*
1188	 * A linear estimation of the "balanced" throttle rate. The theory is,
1189	 * if there are N dd tasks, each throttled at task_ratelimit, the wb's
1190	 * dirty_rate will be measured to be (N * task_ratelimit). So the below
1191	 * formula will yield the balanced rate limit (write_bw / N).
1192	 *
1193	 * Note that the expanded form is not a pure rate feedback:
1194	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate)		     (1)
1195	 * but also takes pos_ratio into account:
1196	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio  (2)
1197	 *
1198	 * (1) is not realistic because pos_ratio also takes part in balancing
1199	 * the dirty rate.  Consider the state
1200	 *	pos_ratio = 0.5						     (3)
1201	 *	rate = 2 * (write_bw / N)				     (4)
1202	 * If (1) is used, it will stuck in that state! Because each dd will
1203	 * be throttled at
1204	 *	task_ratelimit = pos_ratio * rate = (write_bw / N)	     (5)
1205	 * yielding
1206	 *	dirty_rate = N * task_ratelimit = write_bw		     (6)
1207	 * put (6) into (1) we get
1208	 *	rate_(i+1) = rate_(i)					     (7)
1209	 *
1210	 * So we end up using (2) to always keep
1211	 *	rate_(i+1) ~= (write_bw / N)				     (8)
1212	 * regardless of the value of pos_ratio. As long as (8) is satisfied,
1213	 * pos_ratio is able to drive itself to 1.0, which is not only where
1214	 * the dirty count meet the setpoint, but also where the slope of
1215	 * pos_ratio is most flat and hence task_ratelimit is least fluctuated.
1216	 */
1217	balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
1218					   dirty_rate | 1);
1219	/*
1220	 * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw
1221	 */
1222	if (unlikely(balanced_dirty_ratelimit > write_bw))
1223		balanced_dirty_ratelimit = write_bw;
1224
1225	/*
1226	 * We could safely do this and return immediately:
1227	 *
1228	 *	wb->dirty_ratelimit = balanced_dirty_ratelimit;
1229	 *
1230	 * However to get a more stable dirty_ratelimit, the below elaborated
1231	 * code makes use of task_ratelimit to filter out singular points and
1232	 * limit the step size.
1233	 *
1234	 * The below code essentially only uses the relative value of
1235	 *
1236	 *	task_ratelimit - dirty_ratelimit
1237	 *	= (pos_ratio - 1) * dirty_ratelimit
1238	 *
1239	 * which reflects the direction and size of dirty position error.
1240	 */
1241
1242	/*
1243	 * dirty_ratelimit will follow balanced_dirty_ratelimit iff
1244	 * task_ratelimit is on the same side of dirty_ratelimit, too.
1245	 * For example, when
1246	 * - dirty_ratelimit > balanced_dirty_ratelimit
1247	 * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint)
1248	 * lowering dirty_ratelimit will help meet both the position and rate
1249	 * control targets. Otherwise, don't update dirty_ratelimit if it will
1250	 * only help meet the rate target. After all, what the users ultimately
1251	 * feel and care are stable dirty rate and small position error.
1252	 *
1253	 * |task_ratelimit - dirty_ratelimit| is used to limit the step size
1254	 * and filter out the singular points of balanced_dirty_ratelimit. Which
1255	 * keeps jumping around randomly and can even leap far away at times
1256	 * due to the small 200ms estimation period of dirty_rate (we want to
1257	 * keep that period small to reduce time lags).
1258	 */
1259	step = 0;
1260
1261	/*
1262	 * For strictlimit case, calculations above were based on wb counters
1263	 * and limits (starting from pos_ratio = wb_position_ratio() and up to
1264	 * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate).
1265	 * Hence, to calculate "step" properly, we have to use wb_dirty as
1266	 * "dirty" and wb_setpoint as "setpoint".
1267	 *
1268	 * We rampup dirty_ratelimit forcibly if wb_dirty is low because
1269	 * it's possible that wb_thresh is close to zero due to inactivity
1270	 * of backing device.
1271	 */
1272	if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
1273		dirty = dtc->wb_dirty;
1274		if (dtc->wb_dirty < 8)
1275			setpoint = dtc->wb_dirty + 1;
1276		else
1277			setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2;
1278	}
1279
1280	if (dirty < setpoint) {
1281		x = min3(wb->balanced_dirty_ratelimit,
1282			 balanced_dirty_ratelimit, task_ratelimit);
1283		if (dirty_ratelimit < x)
1284			step = x - dirty_ratelimit;
1285	} else {
1286		x = max3(wb->balanced_dirty_ratelimit,
1287			 balanced_dirty_ratelimit, task_ratelimit);
1288		if (dirty_ratelimit > x)
1289			step = dirty_ratelimit - x;
1290	}
1291
1292	/*
1293	 * Don't pursue 100% rate matching. It's impossible since the balanced
1294	 * rate itself is constantly fluctuating. So decrease the track speed
1295	 * when it gets close to the target. Helps eliminate pointless tremors.
1296	 */
1297	shift = dirty_ratelimit / (2 * step + 1);
1298	if (shift < BITS_PER_LONG)
1299		step = DIV_ROUND_UP(step >> shift, 8);
1300	else
1301		step = 0;
1302
1303	if (dirty_ratelimit < balanced_dirty_ratelimit)
1304		dirty_ratelimit += step;
1305	else
1306		dirty_ratelimit -= step;
1307
1308	wb->dirty_ratelimit = max(dirty_ratelimit, 1UL);
1309	wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
1310
1311	trace_bdi_dirty_ratelimit(wb, dirty_rate, task_ratelimit);
1312}
1313
1314static void __wb_update_bandwidth(struct dirty_throttle_control *gdtc,
1315				  struct dirty_throttle_control *mdtc,
1316				  unsigned long start_time,
1317				  bool update_ratelimit)
1318{
1319	struct bdi_writeback *wb = gdtc->wb;
1320	unsigned long now = jiffies;
1321	unsigned long elapsed = now - wb->bw_time_stamp;
1322	unsigned long dirtied;
1323	unsigned long written;
1324
1325	lockdep_assert_held(&wb->list_lock);
1326
1327	/*
1328	 * rate-limit, only update once every 200ms.
 
 
 
1329	 */
1330	if (elapsed < BANDWIDTH_INTERVAL)
1331		return;
1332
1333	dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]);
1334	written = percpu_counter_read(&wb->stat[WB_WRITTEN]);
1335
1336	/*
1337	 * Skip quiet periods when disk bandwidth is under-utilized.
1338	 * (at least 1s idle time between two flusher runs)
1339	 */
1340	if (elapsed > HZ && time_before(wb->bw_time_stamp, start_time))
1341		goto snapshot;
1342
1343	if (update_ratelimit) {
1344		domain_update_bandwidth(gdtc, now);
1345		wb_update_dirty_ratelimit(gdtc, dirtied, elapsed);
1346
1347		/*
1348		 * @mdtc is always NULL if !CGROUP_WRITEBACK but the
1349		 * compiler has no way to figure that out.  Help it.
1350		 */
1351		if (IS_ENABLED(CONFIG_CGROUP_WRITEBACK) && mdtc) {
1352			domain_update_bandwidth(mdtc, now);
1353			wb_update_dirty_ratelimit(mdtc, dirtied, elapsed);
1354		}
1355	}
1356	wb_update_write_bandwidth(wb, elapsed, written);
1357
1358snapshot:
1359	wb->dirtied_stamp = dirtied;
1360	wb->written_stamp = written;
1361	wb->bw_time_stamp = now;
 
1362}
1363
1364void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time)
1365{
1366	struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };
1367
1368	__wb_update_bandwidth(&gdtc, NULL, start_time, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1369}
1370
1371/*
1372 * After a task dirtied this many pages, balance_dirty_pages_ratelimited()
1373 * will look to see if it needs to start dirty throttling.
1374 *
1375 * If dirty_poll_interval is too low, big NUMA machines will call the expensive
1376 * global_page_state() too often. So scale it near-sqrt to the safety margin
1377 * (the number of pages we may dirty without exceeding the dirty limits).
1378 */
1379static unsigned long dirty_poll_interval(unsigned long dirty,
1380					 unsigned long thresh)
1381{
1382	if (thresh > dirty)
1383		return 1UL << (ilog2(thresh - dirty) >> 1);
1384
1385	return 1;
1386}
1387
1388static unsigned long wb_max_pause(struct bdi_writeback *wb,
1389				  unsigned long wb_dirty)
1390{
1391	unsigned long bw = wb->avg_write_bandwidth;
1392	unsigned long t;
1393
1394	/*
1395	 * Limit pause time for small memory systems. If sleeping for too long
1396	 * time, a small pool of dirty/writeback pages may go empty and disk go
1397	 * idle.
1398	 *
1399	 * 8 serves as the safety ratio.
1400	 */
1401	t = wb_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
1402	t++;
1403
1404	return min_t(unsigned long, t, MAX_PAUSE);
1405}
1406
1407static long wb_min_pause(struct bdi_writeback *wb,
1408			 long max_pause,
1409			 unsigned long task_ratelimit,
1410			 unsigned long dirty_ratelimit,
1411			 int *nr_dirtied_pause)
1412{
1413	long hi = ilog2(wb->avg_write_bandwidth);
1414	long lo = ilog2(wb->dirty_ratelimit);
1415	long t;		/* target pause */
1416	long pause;	/* estimated next pause */
1417	int pages;	/* target nr_dirtied_pause */
1418
1419	/* target for 10ms pause on 1-dd case */
1420	t = max(1, HZ / 100);
1421
1422	/*
1423	 * Scale up pause time for concurrent dirtiers in order to reduce CPU
1424	 * overheads.
1425	 *
1426	 * (N * 10ms) on 2^N concurrent tasks.
1427	 */
1428	if (hi > lo)
1429		t += (hi - lo) * (10 * HZ) / 1024;
1430
1431	/*
1432	 * This is a bit convoluted. We try to base the next nr_dirtied_pause
1433	 * on the much more stable dirty_ratelimit. However the next pause time
1434	 * will be computed based on task_ratelimit and the two rate limits may
1435	 * depart considerably at some time. Especially if task_ratelimit goes
1436	 * below dirty_ratelimit/2 and the target pause is max_pause, the next
1437	 * pause time will be max_pause*2 _trimmed down_ to max_pause.  As a
1438	 * result task_ratelimit won't be executed faithfully, which could
1439	 * eventually bring down dirty_ratelimit.
1440	 *
1441	 * We apply two rules to fix it up:
1442	 * 1) try to estimate the next pause time and if necessary, use a lower
1443	 *    nr_dirtied_pause so as not to exceed max_pause. When this happens,
1444	 *    nr_dirtied_pause will be "dancing" with task_ratelimit.
1445	 * 2) limit the target pause time to max_pause/2, so that the normal
1446	 *    small fluctuations of task_ratelimit won't trigger rule (1) and
1447	 *    nr_dirtied_pause will remain as stable as dirty_ratelimit.
1448	 */
1449	t = min(t, 1 + max_pause / 2);
1450	pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1451
1452	/*
1453	 * Tiny nr_dirtied_pause is found to hurt I/O performance in the test
1454	 * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}.
1455	 * When the 16 consecutive reads are often interrupted by some dirty
1456	 * throttling pause during the async writes, cfq will go into idles
1457	 * (deadline is fine). So push nr_dirtied_pause as high as possible
1458	 * until reaches DIRTY_POLL_THRESH=32 pages.
1459	 */
1460	if (pages < DIRTY_POLL_THRESH) {
1461		t = max_pause;
1462		pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1463		if (pages > DIRTY_POLL_THRESH) {
1464			pages = DIRTY_POLL_THRESH;
1465			t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit;
1466		}
1467	}
1468
1469	pause = HZ * pages / (task_ratelimit + 1);
1470	if (pause > max_pause) {
1471		t = max_pause;
1472		pages = task_ratelimit * t / roundup_pow_of_two(HZ);
1473	}
1474
1475	*nr_dirtied_pause = pages;
1476	/*
1477	 * The minimal pause time will normally be half the target pause time.
1478	 */
1479	return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
1480}
1481
1482static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
1483{
1484	struct bdi_writeback *wb = dtc->wb;
1485	unsigned long wb_reclaimable;
1486
1487	/*
1488	 * wb_thresh is not treated as some limiting factor as
1489	 * dirty_thresh, due to reasons
1490	 * - in JBOD setup, wb_thresh can fluctuate a lot
1491	 * - in a system with HDD and USB key, the USB key may somehow
1492	 *   go into state (wb_dirty >> wb_thresh) either because
1493	 *   wb_dirty starts high, or because wb_thresh drops low.
1494	 *   In this case we don't want to hard throttle the USB key
1495	 *   dirtiers for 100 seconds until wb_dirty drops under
1496	 *   wb_thresh. Instead the auxiliary wb control line in
1497	 *   wb_position_ratio() will let the dirtier task progress
1498	 *   at some rate <= (write_bw / 2) for bringing down wb_dirty.
1499	 */
1500	dtc->wb_thresh = __wb_calc_thresh(dtc);
1501	dtc->wb_bg_thresh = dtc->thresh ?
1502		div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
1503
1504	/*
1505	 * In order to avoid the stacked BDI deadlock we need
1506	 * to ensure we accurately count the 'dirty' pages when
1507	 * the threshold is low.
1508	 *
1509	 * Otherwise it would be possible to get thresh+n pages
1510	 * reported dirty, even though there are thresh-m pages
1511	 * actually dirty; with m+n sitting in the percpu
1512	 * deltas.
1513	 */
1514	if (dtc->wb_thresh < 2 * wb_stat_error(wb)) {
1515		wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
1516		dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK);
1517	} else {
1518		wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE);
1519		dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK);
1520	}
1521}
1522
1523/*
1524 * balance_dirty_pages() must be called by processes which are generating dirty
1525 * data.  It looks at the number of dirty pages in the machine and will force
1526 * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2.
1527 * If we're over `background_thresh' then the writeback threads are woken to
1528 * perform some writeout.
1529 */
1530static void balance_dirty_pages(struct address_space *mapping,
1531				struct bdi_writeback *wb,
1532				unsigned long pages_dirtied)
1533{
1534	struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
1535	struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
1536	struct dirty_throttle_control * const gdtc = &gdtc_stor;
1537	struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
1538						     &mdtc_stor : NULL;
1539	struct dirty_throttle_control *sdtc;
1540	unsigned long nr_reclaimable;	/* = file_dirty + unstable_nfs */
1541	long period;
1542	long pause;
1543	long max_pause;
1544	long min_pause;
1545	int nr_dirtied_pause;
1546	bool dirty_exceeded = false;
1547	unsigned long task_ratelimit;
1548	unsigned long dirty_ratelimit;
1549	struct backing_dev_info *bdi = wb->bdi;
1550	bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT;
1551	unsigned long start_time = jiffies;
 
1552
1553	for (;;) {
1554		unsigned long now = jiffies;
1555		unsigned long dirty, thresh, bg_thresh;
1556		unsigned long m_dirty = 0;	/* stop bogus uninit warnings */
1557		unsigned long m_thresh = 0;
1558		unsigned long m_bg_thresh = 0;
1559
1560		/*
1561		 * Unstable writes are a feature of certain networked
1562		 * filesystems (i.e. NFS) in which data may have been
1563		 * written to the server's write cache, but has not yet
1564		 * been flushed to permanent storage.
1565		 */
1566		nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
1567					global_page_state(NR_UNSTABLE_NFS);
1568		gdtc->avail = global_dirtyable_memory();
1569		gdtc->dirty = nr_reclaimable + global_page_state(NR_WRITEBACK);
1570
1571		domain_dirty_limits(gdtc);
1572
1573		if (unlikely(strictlimit)) {
1574			wb_dirty_limits(gdtc);
1575
1576			dirty = gdtc->wb_dirty;
1577			thresh = gdtc->wb_thresh;
1578			bg_thresh = gdtc->wb_bg_thresh;
1579		} else {
1580			dirty = gdtc->dirty;
1581			thresh = gdtc->thresh;
1582			bg_thresh = gdtc->bg_thresh;
1583		}
1584
1585		if (mdtc) {
1586			unsigned long filepages, headroom, writeback;
1587
1588			/*
1589			 * If @wb belongs to !root memcg, repeat the same
1590			 * basic calculations for the memcg domain.
1591			 */
1592			mem_cgroup_wb_stats(wb, &filepages, &headroom,
1593					    &mdtc->dirty, &writeback);
1594			mdtc->dirty += writeback;
1595			mdtc_calc_avail(mdtc, filepages, headroom);
1596
1597			domain_dirty_limits(mdtc);
1598
1599			if (unlikely(strictlimit)) {
1600				wb_dirty_limits(mdtc);
1601				m_dirty = mdtc->wb_dirty;
1602				m_thresh = mdtc->wb_thresh;
1603				m_bg_thresh = mdtc->wb_bg_thresh;
1604			} else {
1605				m_dirty = mdtc->dirty;
1606				m_thresh = mdtc->thresh;
1607				m_bg_thresh = mdtc->bg_thresh;
1608			}
1609		}
1610
1611		/*
 
 
 
 
 
 
 
 
 
 
 
 
 
1612		 * Throttle it only when the background writeback cannot
1613		 * catch-up. This avoids (excessively) small writeouts
1614		 * when the wb limits are ramping up in case of !strictlimit.
1615		 *
1616		 * In strictlimit case make decision based on the wb counters
1617		 * and limits. Small writeouts when the wb limits are ramping
1618		 * up are the price we consciously pay for strictlimit-ing.
1619		 *
1620		 * If memcg domain is in effect, @dirty should be under
1621		 * both global and memcg freerun ceilings.
1622		 */
1623		if (dirty <= dirty_freerun_ceiling(thresh, bg_thresh) &&
1624		    (!mdtc ||
1625		     m_dirty <= dirty_freerun_ceiling(m_thresh, m_bg_thresh))) {
1626			unsigned long intv = dirty_poll_interval(dirty, thresh);
1627			unsigned long m_intv = ULONG_MAX;
 
 
 
 
1628
1629			current->dirty_paused_when = now;
1630			current->nr_dirtied = 0;
1631			if (mdtc)
1632				m_intv = dirty_poll_interval(m_dirty, m_thresh);
1633			current->nr_dirtied_pause = min(intv, m_intv);
1634			break;
1635		}
1636
 
1637		if (unlikely(!writeback_in_progress(wb)))
1638			wb_start_background_writeback(wb);
1639
 
 
1640		/*
1641		 * Calculate global domain's pos_ratio and select the
1642		 * global dtc by default.
1643		 */
1644		if (!strictlimit)
1645			wb_dirty_limits(gdtc);
1646
 
 
 
 
 
 
 
 
 
 
 
1647		dirty_exceeded = (gdtc->wb_dirty > gdtc->wb_thresh) &&
1648			((gdtc->dirty > gdtc->thresh) || strictlimit);
1649
1650		wb_position_ratio(gdtc);
1651		sdtc = gdtc;
1652
1653		if (mdtc) {
1654			/*
1655			 * If memcg domain is in effect, calculate its
1656			 * pos_ratio.  @wb should satisfy constraints from
1657			 * both global and memcg domains.  Choose the one
1658			 * w/ lower pos_ratio.
1659			 */
1660			if (!strictlimit)
1661				wb_dirty_limits(mdtc);
1662
 
 
 
 
 
 
 
 
 
 
 
1663			dirty_exceeded |= (mdtc->wb_dirty > mdtc->wb_thresh) &&
1664				((mdtc->dirty > mdtc->thresh) || strictlimit);
1665
1666			wb_position_ratio(mdtc);
1667			if (mdtc->pos_ratio < gdtc->pos_ratio)
1668				sdtc = mdtc;
1669		}
1670
1671		if (dirty_exceeded && !wb->dirty_exceeded)
1672			wb->dirty_exceeded = 1;
1673
1674		if (time_is_before_jiffies(wb->bw_time_stamp +
1675					   BANDWIDTH_INTERVAL)) {
1676			spin_lock(&wb->list_lock);
1677			__wb_update_bandwidth(gdtc, mdtc, start_time, true);
1678			spin_unlock(&wb->list_lock);
1679		}
1680
1681		/* throttle according to the chosen dtc */
1682		dirty_ratelimit = wb->dirty_ratelimit;
1683		task_ratelimit = ((u64)dirty_ratelimit * sdtc->pos_ratio) >>
1684							RATELIMIT_CALC_SHIFT;
1685		max_pause = wb_max_pause(wb, sdtc->wb_dirty);
1686		min_pause = wb_min_pause(wb, max_pause,
1687					 task_ratelimit, dirty_ratelimit,
1688					 &nr_dirtied_pause);
1689
1690		if (unlikely(task_ratelimit == 0)) {
1691			period = max_pause;
1692			pause = max_pause;
1693			goto pause;
1694		}
1695		period = HZ * pages_dirtied / task_ratelimit;
1696		pause = period;
1697		if (current->dirty_paused_when)
1698			pause -= now - current->dirty_paused_when;
1699		/*
1700		 * For less than 1s think time (ext3/4 may block the dirtier
1701		 * for up to 800ms from time to time on 1-HDD; so does xfs,
1702		 * however at much less frequency), try to compensate it in
1703		 * future periods by updating the virtual time; otherwise just
1704		 * do a reset, as it may be a light dirtier.
1705		 */
1706		if (pause < min_pause) {
1707			trace_balance_dirty_pages(wb,
1708						  sdtc->thresh,
1709						  sdtc->bg_thresh,
1710						  sdtc->dirty,
1711						  sdtc->wb_thresh,
1712						  sdtc->wb_dirty,
1713						  dirty_ratelimit,
1714						  task_ratelimit,
1715						  pages_dirtied,
1716						  period,
1717						  min(pause, 0L),
1718						  start_time);
1719			if (pause < -HZ) {
1720				current->dirty_paused_when = now;
1721				current->nr_dirtied = 0;
1722			} else if (period) {
1723				current->dirty_paused_when += period;
1724				current->nr_dirtied = 0;
1725			} else if (current->nr_dirtied_pause <= pages_dirtied)
1726				current->nr_dirtied_pause += pages_dirtied;
1727			break;
1728		}
1729		if (unlikely(pause > max_pause)) {
1730			/* for occasional dropped task_ratelimit */
1731			now += min(pause - max_pause, max_pause);
1732			pause = max_pause;
1733		}
1734
1735pause:
1736		trace_balance_dirty_pages(wb,
1737					  sdtc->thresh,
1738					  sdtc->bg_thresh,
1739					  sdtc->dirty,
1740					  sdtc->wb_thresh,
1741					  sdtc->wb_dirty,
1742					  dirty_ratelimit,
1743					  task_ratelimit,
1744					  pages_dirtied,
1745					  period,
1746					  pause,
1747					  start_time);
 
 
 
 
1748		__set_current_state(TASK_KILLABLE);
 
1749		io_schedule_timeout(pause);
1750
1751		current->dirty_paused_when = now + pause;
1752		current->nr_dirtied = 0;
1753		current->nr_dirtied_pause = nr_dirtied_pause;
1754
1755		/*
1756		 * This is typically equal to (dirty < thresh) and can also
1757		 * keep "1000+ dd on a slow USB stick" under control.
1758		 */
1759		if (task_ratelimit)
1760			break;
1761
1762		/*
1763		 * In the case of an unresponding NFS server and the NFS dirty
1764		 * pages exceeds dirty_thresh, give the other good wb's a pipe
1765		 * to go through, so that tasks on them still remain responsive.
1766		 *
1767		 * In theory 1 page is enough to keep the comsumer-producer
1768		 * pipe going: the flusher cleans 1 page => the task dirties 1
1769		 * more page. However wb_dirty has accounting errors.  So use
1770		 * the larger and more IO friendly wb_stat_error.
1771		 */
1772		if (sdtc->wb_dirty <= wb_stat_error(wb))
1773			break;
1774
1775		if (fatal_signal_pending(current))
1776			break;
1777	}
1778
1779	if (!dirty_exceeded && wb->dirty_exceeded)
1780		wb->dirty_exceeded = 0;
1781
1782	if (writeback_in_progress(wb))
1783		return;
1784
1785	/*
1786	 * In laptop mode, we wait until hitting the higher threshold before
1787	 * starting background writeout, and then write out all the way down
1788	 * to the lower threshold.  So slow writers cause minimal disk activity.
1789	 *
1790	 * In normal mode, we start background writeout at the lower
1791	 * background_thresh, to keep the amount of dirty memory low.
1792	 */
1793	if (laptop_mode)
1794		return;
1795
1796	if (nr_reclaimable > gdtc->bg_thresh)
1797		wb_start_background_writeback(wb);
1798}
1799
1800static DEFINE_PER_CPU(int, bdp_ratelimits);
1801
1802/*
1803 * Normal tasks are throttled by
1804 *	loop {
1805 *		dirty tsk->nr_dirtied_pause pages;
1806 *		take a snap in balance_dirty_pages();
1807 *	}
1808 * However there is a worst case. If every task exit immediately when dirtied
1809 * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
1810 * called to throttle the page dirties. The solution is to save the not yet
1811 * throttled page dirties in dirty_throttle_leaks on task exit and charge them
1812 * randomly into the running tasks. This works well for the above worst case,
1813 * as the new task will pick up and accumulate the old task's leaked dirty
1814 * count and eventually get throttled.
1815 */
1816DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
1817
1818/**
1819 * balance_dirty_pages_ratelimited - balance dirty memory state
1820 * @mapping: address_space which was dirtied
 
1821 *
1822 * Processes which are dirtying memory should call in here once for each page
1823 * which was newly dirtied.  The function will periodically check the system's
1824 * dirty state and will initiate writeback if needed.
1825 *
1826 * On really big machines, get_writeback_state is expensive, so try to avoid
1827 * calling it too often (ratelimiting).  But once we're over the dirty memory
1828 * limit we decrease the ratelimiting by a lot, to prevent individual processes
1829 * from overshooting the limit by (ratelimit_pages) each.
 
 
 
1830 */
1831void balance_dirty_pages_ratelimited(struct address_space *mapping)
 
1832{
1833	struct inode *inode = mapping->host;
1834	struct backing_dev_info *bdi = inode_to_bdi(inode);
1835	struct bdi_writeback *wb = NULL;
1836	int ratelimit;
 
1837	int *p;
1838
1839	if (!bdi_cap_account_dirty(bdi))
1840		return;
1841
1842	if (inode_cgwb_enabled(inode))
1843		wb = wb_get_create_current(bdi, GFP_KERNEL);
1844	if (!wb)
1845		wb = &bdi->wb;
1846
1847	ratelimit = current->nr_dirtied_pause;
1848	if (wb->dirty_exceeded)
1849		ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));
1850
1851	preempt_disable();
1852	/*
1853	 * This prevents one CPU to accumulate too many dirtied pages without
1854	 * calling into balance_dirty_pages(), which can happen when there are
1855	 * 1000+ tasks, all of them start dirtying pages at exactly the same
1856	 * time, hence all honoured too large initial task->nr_dirtied_pause.
1857	 */
1858	p =  this_cpu_ptr(&bdp_ratelimits);
1859	if (unlikely(current->nr_dirtied >= ratelimit))
1860		*p = 0;
1861	else if (unlikely(*p >= ratelimit_pages)) {
1862		*p = 0;
1863		ratelimit = 0;
1864	}
1865	/*
1866	 * Pick up the dirtied pages by the exited tasks. This avoids lots of
1867	 * short-lived tasks (eg. gcc invocations in a kernel build) escaping
1868	 * the dirty throttling and livelock other long-run dirtiers.
1869	 */
1870	p = this_cpu_ptr(&dirty_throttle_leaks);
1871	if (*p > 0 && current->nr_dirtied < ratelimit) {
1872		unsigned long nr_pages_dirtied;
1873		nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
1874		*p -= nr_pages_dirtied;
1875		current->nr_dirtied += nr_pages_dirtied;
1876	}
1877	preempt_enable();
1878
1879	if (unlikely(current->nr_dirtied >= ratelimit))
1880		balance_dirty_pages(mapping, wb, current->nr_dirtied);
1881
1882	wb_put(wb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1883}
1884EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
1885
1886/**
1887 * wb_over_bg_thresh - does @wb need to be written back?
1888 * @wb: bdi_writeback of interest
1889 *
1890 * Determines whether background writeback should keep writing @wb or it's
1891 * clean enough.  Returns %true if writeback should continue.
 
 
1892 */
1893bool wb_over_bg_thresh(struct bdi_writeback *wb)
1894{
1895	struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
1896	struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
1897	struct dirty_throttle_control * const gdtc = &gdtc_stor;
1898	struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
1899						     &mdtc_stor : NULL;
 
 
1900
1901	/*
1902	 * Similar to balance_dirty_pages() but ignores pages being written
1903	 * as we're trying to decide whether to put more under writeback.
1904	 */
1905	gdtc->avail = global_dirtyable_memory();
1906	gdtc->dirty = global_page_state(NR_FILE_DIRTY) +
1907		      global_page_state(NR_UNSTABLE_NFS);
1908	domain_dirty_limits(gdtc);
1909
1910	if (gdtc->dirty > gdtc->bg_thresh)
1911		return true;
1912
1913	if (wb_stat(wb, WB_RECLAIMABLE) >
1914	    wb_calc_thresh(gdtc->wb, gdtc->bg_thresh))
 
 
 
 
 
1915		return true;
1916
1917	if (mdtc) {
1918		unsigned long filepages, headroom, writeback;
1919
1920		mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty,
1921				    &writeback);
1922		mdtc_calc_avail(mdtc, filepages, headroom);
1923		domain_dirty_limits(mdtc);	/* ditto, ignore writeback */
1924
1925		if (mdtc->dirty > mdtc->bg_thresh)
1926			return true;
1927
1928		if (wb_stat(wb, WB_RECLAIMABLE) >
1929		    wb_calc_thresh(mdtc->wb, mdtc->bg_thresh))
 
 
 
 
 
1930			return true;
1931	}
1932
1933	return false;
1934}
1935
1936void throttle_vm_writeout(gfp_t gfp_mask)
1937{
1938	unsigned long background_thresh;
1939	unsigned long dirty_thresh;
1940
1941        for ( ; ; ) {
1942		global_dirty_limits(&background_thresh, &dirty_thresh);
1943		dirty_thresh = hard_dirty_limit(&global_wb_domain, dirty_thresh);
1944
1945                /*
1946                 * Boost the allowable dirty threshold a bit for page
1947                 * allocators so they don't get DoS'ed by heavy writers
1948                 */
1949                dirty_thresh += dirty_thresh / 10;      /* wheeee... */
1950
1951                if (global_page_state(NR_UNSTABLE_NFS) +
1952			global_page_state(NR_WRITEBACK) <= dirty_thresh)
1953                        	break;
1954                congestion_wait(BLK_RW_ASYNC, HZ/10);
1955
1956		/*
1957		 * The caller might hold locks which can prevent IO completion
1958		 * or progress in the filesystem.  So we cannot just sit here
1959		 * waiting for IO to complete.
1960		 */
1961		if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
1962			break;
1963        }
1964}
1965
1966/*
1967 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
1968 */
1969int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
1970	void __user *buffer, size_t *length, loff_t *ppos)
1971{
1972	proc_dointvec(table, write, buffer, length, ppos);
1973	return 0;
1974}
1975
1976#ifdef CONFIG_BLOCK
1977void laptop_mode_timer_fn(unsigned long data)
1978{
1979	struct request_queue *q = (struct request_queue *)data;
1980	int nr_pages = global_page_state(NR_FILE_DIRTY) +
1981		global_page_state(NR_UNSTABLE_NFS);
1982	struct bdi_writeback *wb;
1983
1984	/*
1985	 * We want to write everything out, not just down to the dirty
1986	 * threshold
1987	 */
1988	if (!bdi_has_dirty_io(&q->backing_dev_info))
1989		return;
 
 
 
 
1990
1991	rcu_read_lock();
1992	list_for_each_entry_rcu(wb, &q->backing_dev_info.wb_list, bdi_node)
1993		if (wb_has_dirty_io(wb))
1994			wb_start_writeback(wb, nr_pages, true,
1995					   WB_REASON_LAPTOP_TIMER);
1996	rcu_read_unlock();
 
 
 
 
1997}
1998
1999/*
2000 * We've spun up the disk and we're in laptop mode: schedule writeback
2001 * of all dirty data a few seconds from now.  If the flush is already scheduled
2002 * then push it back - the user is still using the disk.
2003 */
2004void laptop_io_completion(struct backing_dev_info *info)
2005{
2006	mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
2007}
2008
2009/*
2010 * We're in laptop mode and we've just synced. The sync's writes will have
2011 * caused another writeback to be scheduled by laptop_io_completion.
2012 * Nothing needs to be written back anymore, so we unschedule the writeback.
2013 */
2014void laptop_sync_completion(void)
2015{
2016	struct backing_dev_info *bdi;
2017
2018	rcu_read_lock();
2019
2020	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
2021		del_timer(&bdi->laptop_mode_wb_timer);
2022
2023	rcu_read_unlock();
2024}
2025#endif
2026
2027/*
2028 * If ratelimit_pages is too high then we can get into dirty-data overload
2029 * if a large number of processes all perform writes at the same time.
2030 * If it is too low then SMP machines will call the (expensive)
2031 * get_writeback_state too often.
2032 *
2033 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
2034 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
2035 * thresholds.
2036 */
2037
2038void writeback_set_ratelimit(void)
2039{
2040	struct wb_domain *dom = &global_wb_domain;
2041	unsigned long background_thresh;
2042	unsigned long dirty_thresh;
2043
2044	global_dirty_limits(&background_thresh, &dirty_thresh);
2045	dom->dirty_limit = dirty_thresh;
2046	ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
2047	if (ratelimit_pages < 16)
2048		ratelimit_pages = 16;
2049}
2050
2051static int
2052ratelimit_handler(struct notifier_block *self, unsigned long action,
2053		  void *hcpu)
2054{
2055
2056	switch (action & ~CPU_TASKS_FROZEN) {
2057	case CPU_ONLINE:
2058	case CPU_DEAD:
2059		writeback_set_ratelimit();
2060		return NOTIFY_OK;
2061	default:
2062		return NOTIFY_DONE;
2063	}
2064}
2065
2066static struct notifier_block ratelimit_nb = {
2067	.notifier_call	= ratelimit_handler,
2068	.next		= NULL,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2069};
 
2070
2071/*
2072 * Called early on to tune the page writeback dirty limits.
2073 *
2074 * We used to scale dirty pages according to how total memory
2075 * related to pages that could be allocated for buffers (by
2076 * comparing nr_free_buffer_pages() to vm_total_pages.
2077 *
2078 * However, that was when we used "dirty_ratio" to scale with
2079 * all memory, and we don't do that any more. "dirty_ratio"
2080 * is now applied to total non-HIGHPAGE memory (by subtracting
2081 * totalhigh_pages from vm_total_pages), and as such we can't
2082 * get into the old insane situation any more where we had
2083 * large amounts of dirty pages compared to a small amount of
2084 * non-HIGHMEM memory.
2085 *
2086 * But we might still want to scale the dirty_ratio by how
2087 * much memory the box has..
2088 */
2089void __init page_writeback_init(void)
2090{
2091	BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
2092
2093	writeback_set_ratelimit();
2094	register_cpu_notifier(&ratelimit_nb);
 
 
 
 
 
2095}
2096
2097/**
2098 * tag_pages_for_writeback - tag pages to be written by write_cache_pages
2099 * @mapping: address space structure to write
2100 * @start: starting page index
2101 * @end: ending page index (inclusive)
2102 *
2103 * This function scans the page range from @start to @end (inclusive) and tags
2104 * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is
2105 * that write_cache_pages (or whoever calls this function) will then use
2106 * TOWRITE tag to identify pages eligible for writeback.  This mechanism is
2107 * used to avoid livelocking of writeback by a process steadily creating new
2108 * dirty pages in the file (thus it is important for this function to be quick
2109 * so that it can tag pages faster than a dirtying process can create them).
2110 */
2111/*
2112 * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency.
2113 */
2114void tag_pages_for_writeback(struct address_space *mapping,
2115			     pgoff_t start, pgoff_t end)
2116{
2117#define WRITEBACK_TAG_BATCH 4096
2118	unsigned long tagged;
 
 
 
 
 
 
 
2119
2120	do {
2121		spin_lock_irq(&mapping->tree_lock);
2122		tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree,
2123				&start, end, WRITEBACK_TAG_BATCH,
2124				PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE);
2125		spin_unlock_irq(&mapping->tree_lock);
2126		WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);
2127		cond_resched();
2128		/* We check 'start' to handle wrapping when end == ~0UL */
2129	} while (tagged >= WRITEBACK_TAG_BATCH && start);
 
2130}
2131EXPORT_SYMBOL(tag_pages_for_writeback);
2132
2133/**
2134 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2135 * @mapping: address space structure to write
2136 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2137 * @writepage: function called for each page
2138 * @data: data passed to writepage function
2139 *
2140 * If a page is already under I/O, write_cache_pages() skips it, even
2141 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
2142 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
2143 * and msync() need to guarantee that all the data which was dirty at the time
2144 * the call was made get new I/O started against them.  If wbc->sync_mode is
2145 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2146 * existing IO to complete.
2147 *
2148 * To avoid livelocks (when other process dirties new pages), we first tag
2149 * pages which should be written back with TOWRITE tag and only then start
2150 * writing them. For data-integrity sync we have to be careful so that we do
2151 * not miss some pages (e.g., because some other process has cleared TOWRITE
2152 * tag we set). The rule we follow is that TOWRITE tag can be cleared only
2153 * by the process clearing the DIRTY tag (and submitting the page for IO).
 
 
 
 
 
 
 
 
 
2154 */
2155int write_cache_pages(struct address_space *mapping,
2156		      struct writeback_control *wbc, writepage_t writepage,
2157		      void *data)
2158{
2159	int ret = 0;
2160	int done = 0;
2161	struct pagevec pvec;
2162	int nr_pages;
2163	pgoff_t uninitialized_var(writeback_index);
2164	pgoff_t index;
2165	pgoff_t end;		/* Inclusive */
2166	pgoff_t done_index;
2167	int cycled;
2168	int range_whole = 0;
2169	int tag;
2170
2171	pagevec_init(&pvec, 0);
2172	if (wbc->range_cyclic) {
2173		writeback_index = mapping->writeback_index; /* prev offset */
2174		index = writeback_index;
2175		if (index == 0)
2176			cycled = 1;
2177		else
2178			cycled = 0;
2179		end = -1;
2180	} else {
2181		index = wbc->range_start >> PAGE_SHIFT;
2182		end = wbc->range_end >> PAGE_SHIFT;
2183		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2184			range_whole = 1;
2185		cycled = 1; /* ignore range_cyclic tests */
2186	}
2187	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
 
2188		tag = PAGECACHE_TAG_TOWRITE;
2189	else
2190		tag = PAGECACHE_TAG_DIRTY;
2191retry:
2192	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2193		tag_pages_for_writeback(mapping, index, end);
2194	done_index = index;
2195	while (!done && (index <= end)) {
2196		int i;
2197
2198		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
2199			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
2200		if (nr_pages == 0)
2201			break;
2202
2203		for (i = 0; i < nr_pages; i++) {
2204			struct page *page = pvec.pages[i];
2205
2206			/*
2207			 * At this point, the page may be truncated or
2208			 * invalidated (changing page->mapping to NULL), or
2209			 * even swizzled back from swapper_space to tmpfs file
2210			 * mapping. However, page->index will not change
2211			 * because we have a reference on the page.
2212			 */
2213			if (page->index > end) {
2214				/*
2215				 * can't be range_cyclic (1st pass) because
2216				 * end == -1 in that case.
2217				 */
2218				done = 1;
2219				break;
2220			}
2221
2222			done_index = page->index;
2223
2224			lock_page(page);
2225
2226			/*
2227			 * Page truncated or invalidated. We can freely skip it
2228			 * then, even for data integrity operations: the page
2229			 * has disappeared concurrently, so there could be no
2230			 * real expectation of this data interity operation
2231			 * even if there is now a new, dirty page at the same
2232			 * pagecache address.
2233			 */
2234			if (unlikely(page->mapping != mapping)) {
2235continue_unlock:
2236				unlock_page(page);
2237				continue;
2238			}
2239
2240			if (!PageDirty(page)) {
2241				/* someone wrote it for us */
2242				goto continue_unlock;
2243			}
2244
2245			if (PageWriteback(page)) {
2246				if (wbc->sync_mode != WB_SYNC_NONE)
2247					wait_on_page_writeback(page);
2248				else
2249					goto continue_unlock;
2250			}
2251
2252			BUG_ON(PageWriteback(page));
2253			if (!clear_page_dirty_for_io(page))
2254				goto continue_unlock;
2255
2256			trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
2257			ret = (*writepage)(page, wbc, data);
2258			if (unlikely(ret)) {
2259				if (ret == AOP_WRITEPAGE_ACTIVATE) {
2260					unlock_page(page);
2261					ret = 0;
2262				} else {
2263					/*
2264					 * done_index is set past this page,
2265					 * so media errors will not choke
2266					 * background writeout for the entire
2267					 * file. This has consequences for
2268					 * range_cyclic semantics (ie. it may
2269					 * not be suitable for data integrity
2270					 * writeout).
2271					 */
2272					done_index = page->index + 1;
 
 
 
 
 
2273					done = 1;
2274					break;
2275				}
 
 
2276			}
2277
2278			/*
2279			 * We stop writing back only if we are not doing
2280			 * integrity sync. In case of integrity sync we have to
2281			 * keep going until we have written all the pages
2282			 * we tagged for writeback prior to entering this loop.
2283			 */
2284			if (--wbc->nr_to_write <= 0 &&
 
2285			    wbc->sync_mode == WB_SYNC_NONE) {
2286				done = 1;
2287				break;
2288			}
2289		}
2290		pagevec_release(&pvec);
2291		cond_resched();
2292	}
2293	if (!cycled && !done) {
2294		/*
2295		 * range_cyclic:
2296		 * We hit the last page and there is more work to be done: wrap
2297		 * back to the start of the file
2298		 */
2299		cycled = 1;
2300		index = 0;
2301		end = writeback_index - 1;
2302		goto retry;
2303	}
2304	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2305		mapping->writeback_index = done_index;
2306
2307	return ret;
2308}
2309EXPORT_SYMBOL(write_cache_pages);
2310
2311/*
2312 * Function used by generic_writepages to call the real writepage
2313 * function and set the mapping flags on error
2314 */
2315static int __writepage(struct page *page, struct writeback_control *wbc,
2316		       void *data)
2317{
2318	struct address_space *mapping = data;
2319	int ret = mapping->a_ops->writepage(page, wbc);
2320	mapping_set_error(mapping, ret);
2321	return ret;
2322}
2323
2324/**
2325 * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
2326 * @mapping: address space structure to write
2327 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2328 *
2329 * This is a library function, which implements the writepages()
2330 * address_space_operation.
2331 */
2332int generic_writepages(struct address_space *mapping,
2333		       struct writeback_control *wbc)
2334{
2335	struct blk_plug plug;
2336	int ret;
2337
2338	/* deal with chardevs and other special file */
2339	if (!mapping->a_ops->writepage)
2340		return 0;
2341
2342	blk_start_plug(&plug);
2343	ret = write_cache_pages(mapping, wbc, __writepage, mapping);
2344	blk_finish_plug(&plug);
2345	return ret;
2346}
2347
2348EXPORT_SYMBOL(generic_writepages);
2349
2350int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
2351{
2352	int ret;
 
2353
2354	if (wbc->nr_to_write <= 0)
2355		return 0;
2356	if (mapping->a_ops->writepages)
2357		ret = mapping->a_ops->writepages(mapping, wbc);
2358	else
2359		ret = generic_writepages(mapping, wbc);
2360	return ret;
2361}
2362
2363/**
2364 * write_one_page - write out a single page and optionally wait on I/O
2365 * @page: the page to write
2366 * @wait: if true, wait on writeout
2367 *
2368 * The page must be locked by the caller and will be unlocked upon return.
2369 *
2370 * write_one_page() returns a negative error code if I/O failed.
2371 */
2372int write_one_page(struct page *page, int wait)
2373{
2374	struct address_space *mapping = page->mapping;
2375	int ret = 0;
2376	struct writeback_control wbc = {
2377		.sync_mode = WB_SYNC_ALL,
2378		.nr_to_write = 1,
2379	};
2380
2381	BUG_ON(!PageLocked(page));
2382
2383	if (wait)
2384		wait_on_page_writeback(page);
2385
2386	if (clear_page_dirty_for_io(page)) {
2387		get_page(page);
2388		ret = mapping->a_ops->writepage(page, &wbc);
2389		if (ret == 0 && wait) {
2390			wait_on_page_writeback(page);
2391			if (PageError(page))
2392				ret = -EIO;
2393		}
2394		put_page(page);
2395	} else {
2396		unlock_page(page);
 
 
 
 
 
 
 
 
2397	}
 
 
 
 
 
 
 
 
2398	return ret;
2399}
2400EXPORT_SYMBOL(write_one_page);
2401
2402/*
2403 * For address_spaces which do not use buffers nor write back.
2404 */
2405int __set_page_dirty_no_writeback(struct page *page)
2406{
2407	if (!PageDirty(page))
2408		return !TestSetPageDirty(page);
2409	return 0;
2410}
 
2411
2412/*
2413 * Helper function for set_page_dirty family.
2414 *
2415 * Caller must hold lock_page_memcg().
2416 *
2417 * NOTE: This relies on being atomic wrt interrupts.
2418 */
2419void account_page_dirtied(struct page *page, struct address_space *mapping)
 
2420{
2421	struct inode *inode = mapping->host;
2422
2423	trace_writeback_dirty_page(page, mapping);
2424
2425	if (mapping_cap_account_dirty(mapping)) {
2426		struct bdi_writeback *wb;
 
2427
2428		inode_attach_wb(inode, page);
2429		wb = inode_to_wb(inode);
2430
2431		mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_DIRTY);
2432		__inc_zone_page_state(page, NR_FILE_DIRTY);
2433		__inc_zone_page_state(page, NR_DIRTIED);
2434		__inc_wb_stat(wb, WB_RECLAIMABLE);
2435		__inc_wb_stat(wb, WB_DIRTIED);
2436		task_io_account_write(PAGE_SIZE);
2437		current->nr_dirtied++;
2438		this_cpu_inc(bdp_ratelimits);
 
 
2439	}
2440}
2441EXPORT_SYMBOL(account_page_dirtied);
2442
2443/*
2444 * Helper function for deaccounting dirty page without writeback.
2445 *
2446 * Caller must hold lock_page_memcg().
2447 */
2448void account_page_cleaned(struct page *page, struct address_space *mapping,
2449			  struct bdi_writeback *wb)
2450{
2451	if (mapping_cap_account_dirty(mapping)) {
2452		mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
2453		dec_zone_page_state(page, NR_FILE_DIRTY);
2454		dec_wb_stat(wb, WB_RECLAIMABLE);
2455		task_io_account_cancelled_write(PAGE_SIZE);
2456	}
2457}
2458
2459/*
2460 * For address_spaces which do not use buffers.  Just tag the page as dirty in
2461 * its radix tree.
2462 *
2463 * This is also used when a single buffer is being dirtied: we want to set the
2464 * page dirty in that case, but not all the buffers.  This is a "bottom-up"
2465 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
2466 *
2467 * The caller must ensure this doesn't race with truncation.  Most will simply
2468 * hold the page lock, but e.g. zap_pte_range() calls with the page mapped and
2469 * the pte lock held, which also locks out truncation.
 
 
2470 */
2471int __set_page_dirty_nobuffers(struct page *page)
 
2472{
2473	lock_page_memcg(page);
2474	if (!TestSetPageDirty(page)) {
2475		struct address_space *mapping = page_mapping(page);
2476		unsigned long flags;
2477
2478		if (!mapping) {
2479			unlock_page_memcg(page);
2480			return 1;
2481		}
2482
2483		spin_lock_irqsave(&mapping->tree_lock, flags);
2484		BUG_ON(page_mapping(page) != mapping);
2485		WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
2486		account_page_dirtied(page, mapping);
2487		radix_tree_tag_set(&mapping->page_tree, page_index(page),
2488				   PAGECACHE_TAG_DIRTY);
2489		spin_unlock_irqrestore(&mapping->tree_lock, flags);
2490		unlock_page_memcg(page);
2491
2492		if (mapping->host) {
2493			/* !PageAnon && !swapper_space */
2494			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
2495		}
2496		return 1;
2497	}
2498	unlock_page_memcg(page);
2499	return 0;
2500}
2501EXPORT_SYMBOL(__set_page_dirty_nobuffers);
2502
2503/*
2504 * Call this whenever redirtying a page, to de-account the dirty counters
2505 * (NR_DIRTIED, BDI_DIRTIED, tsk->nr_dirtied), so that they match the written
2506 * counters (NR_WRITTEN, BDI_WRITTEN) in long term. The mismatches will lead to
2507 * systematic errors in balanced_dirty_ratelimit and the dirty pages position
2508 * control.
 
 
 
 
 
 
 
 
 
 
 
 
2509 */
2510void account_page_redirty(struct page *page)
2511{
2512	struct address_space *mapping = page->mapping;
 
 
 
 
2513
2514	if (mapping && mapping_cap_account_dirty(mapping)) {
2515		struct inode *inode = mapping->host;
2516		struct bdi_writeback *wb;
2517		bool locked;
2518
2519		wb = unlocked_inode_to_wb_begin(inode, &locked);
2520		current->nr_dirtied--;
2521		dec_zone_page_state(page, NR_DIRTIED);
2522		dec_wb_stat(wb, WB_DIRTIED);
2523		unlocked_inode_to_wb_end(inode, locked);
2524	}
 
2525}
2526EXPORT_SYMBOL(account_page_redirty);
2527
2528/*
2529 * When a writepage implementation decides that it doesn't want to write this
2530 * page for some reason, it should redirty the locked page via
2531 * redirty_page_for_writepage() and it should then unlock the page and return 0
2532 */
2533int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
2534{
2535	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2536
2537	wbc->pages_skipped++;
2538	ret = __set_page_dirty_nobuffers(page);
2539	account_page_redirty(page);
 
 
 
2540	return ret;
2541}
2542EXPORT_SYMBOL(redirty_page_for_writepage);
2543
2544/*
2545 * Dirty a page.
 
2546 *
2547 * For pages with a mapping this should be done under the page lock
2548 * for the benefit of asynchronous memory errors who prefer a consistent
2549 * dirty state. This rule can be broken in some special cases,
2550 * but should be better not to.
 
 
2551 *
2552 * If the mapping doesn't provide a set_page_dirty a_op, then
2553 * just fall through and assume that it wants buffer_heads.
2554 */
2555int set_page_dirty(struct page *page)
2556{
2557	struct address_space *mapping = page_mapping(page);
2558
2559	if (likely(mapping)) {
2560		int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
2561		/*
2562		 * readahead/lru_deactivate_page could remain
2563		 * PG_readahead/PG_reclaim due to race with end_page_writeback
2564		 * About readahead, if the page is written, the flags would be
2565		 * reset. So no problem.
2566		 * About lru_deactivate_page, if the page is redirty, the flag
2567		 * will be reset. So no problem. but if the page is used by readahead
2568		 * it will confuse readahead and make it restart the size rampup
2569		 * process. But it's a trivial problem.
 
2570		 */
2571		if (PageReclaim(page))
2572			ClearPageReclaim(page);
2573#ifdef CONFIG_BLOCK
2574		if (!spd)
2575			spd = __set_page_dirty_buffers;
2576#endif
2577		return (*spd)(page);
2578	}
2579	if (!PageDirty(page)) {
2580		if (!TestSetPageDirty(page))
2581			return 1;
2582	}
2583	return 0;
 
2584}
2585EXPORT_SYMBOL(set_page_dirty);
2586
2587/*
2588 * set_page_dirty() is racy if the caller has no reference against
2589 * page->mapping->host, and if the page is unlocked.  This is because another
2590 * CPU could truncate the page off the mapping and then free the mapping.
2591 *
2592 * Usually, the page _is_ locked, or the caller is a user-space process which
2593 * holds a reference on the inode by having an open file.
2594 *
2595 * In other cases, the page should be locked before running set_page_dirty().
2596 */
2597int set_page_dirty_lock(struct page *page)
2598{
2599	int ret;
2600
2601	lock_page(page);
2602	ret = set_page_dirty(page);
2603	unlock_page(page);
2604	return ret;
2605}
2606EXPORT_SYMBOL(set_page_dirty_lock);
2607
2608/*
2609 * This cancels just the dirty bit on the kernel page itself, it does NOT
2610 * actually remove dirty bits on any mmap's that may be around. It also
2611 * leaves the page tagged dirty, so any sync activity will still find it on
2612 * the dirty lists, and in particular, clear_page_dirty_for_io() will still
2613 * look at the dirty bits in the VM.
2614 *
2615 * Doing this should *normally* only ever be done when a page is truncated,
2616 * and is not actually mapped anywhere at all. However, fs/buffer.c does
2617 * this when it notices that somebody has cleaned out all the buffers on a
2618 * page without actually doing it through the VM. Can you say "ext3 is
2619 * horribly ugly"? Thought you could.
2620 */
2621void cancel_dirty_page(struct page *page)
2622{
2623	struct address_space *mapping = page_mapping(page);
2624
2625	if (mapping_cap_account_dirty(mapping)) {
2626		struct inode *inode = mapping->host;
2627		struct bdi_writeback *wb;
2628		bool locked;
2629
2630		lock_page_memcg(page);
2631		wb = unlocked_inode_to_wb_begin(inode, &locked);
2632
2633		if (TestClearPageDirty(page))
2634			account_page_cleaned(page, mapping, wb);
2635
2636		unlocked_inode_to_wb_end(inode, locked);
2637		unlock_page_memcg(page);
2638	} else {
2639		ClearPageDirty(page);
2640	}
2641}
2642EXPORT_SYMBOL(cancel_dirty_page);
2643
2644/*
2645 * Clear a page's dirty flag, while caring for dirty memory accounting.
2646 * Returns true if the page was previously dirty.
2647 *
2648 * This is for preparing to put the page under writeout.  We leave the page
2649 * tagged as dirty in the radix tree so that a concurrent write-for-sync
2650 * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage
2651 * implementation will run either set_page_writeback() or set_page_dirty(),
2652 * at which stage we bring the page's dirty flag and radix-tree dirty tag
2653 * back into sync.
2654 *
2655 * This incoherency between the page's dirty flag and radix-tree tag is
2656 * unfortunate, but it only exists while the page is locked.
2657 */
2658int clear_page_dirty_for_io(struct page *page)
2659{
2660	struct address_space *mapping = page_mapping(page);
2661	int ret = 0;
2662
2663	BUG_ON(!PageLocked(page));
2664
2665	if (mapping && mapping_cap_account_dirty(mapping)) {
2666		struct inode *inode = mapping->host;
2667		struct bdi_writeback *wb;
2668		bool locked;
2669
2670		/*
2671		 * Yes, Virginia, this is indeed insane.
2672		 *
2673		 * We use this sequence to make sure that
2674		 *  (a) we account for dirty stats properly
2675		 *  (b) we tell the low-level filesystem to
2676		 *      mark the whole page dirty if it was
2677		 *      dirty in a pagetable. Only to then
2678		 *  (c) clean the page again and return 1 to
2679		 *      cause the writeback.
2680		 *
2681		 * This way we avoid all nasty races with the
2682		 * dirty bit in multiple places and clearing
2683		 * them concurrently from different threads.
2684		 *
2685		 * Note! Normally the "set_page_dirty(page)"
2686		 * has no effect on the actual dirty bit - since
2687		 * that will already usually be set. But we
2688		 * need the side effects, and it can help us
2689		 * avoid races.
2690		 *
2691		 * We basically use the page "master dirty bit"
2692		 * as a serialization point for all the different
2693		 * threads doing their things.
2694		 */
2695		if (page_mkclean(page))
2696			set_page_dirty(page);
2697		/*
2698		 * We carefully synchronise fault handlers against
2699		 * installing a dirty pte and marking the page dirty
2700		 * at this point.  We do this by having them hold the
2701		 * page lock while dirtying the page, and pages are
2702		 * always locked coming in here, so we get the desired
2703		 * exclusion.
2704		 */
2705		wb = unlocked_inode_to_wb_begin(inode, &locked);
2706		if (TestClearPageDirty(page)) {
2707			mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
2708			dec_zone_page_state(page, NR_FILE_DIRTY);
2709			dec_wb_stat(wb, WB_RECLAIMABLE);
2710			ret = 1;
 
2711		}
2712		unlocked_inode_to_wb_end(inode, locked);
2713		return ret;
2714	}
2715	return TestClearPageDirty(page);
2716}
2717EXPORT_SYMBOL(clear_page_dirty_for_io);
2718
2719int test_clear_page_writeback(struct page *page)
2720{
2721	struct address_space *mapping = page_mapping(page);
2722	int ret;
2723
2724	lock_page_memcg(page);
2725	if (mapping) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2726		struct inode *inode = mapping->host;
2727		struct backing_dev_info *bdi = inode_to_bdi(inode);
2728		unsigned long flags;
2729
2730		spin_lock_irqsave(&mapping->tree_lock, flags);
2731		ret = TestClearPageWriteback(page);
2732		if (ret) {
2733			radix_tree_tag_clear(&mapping->page_tree,
2734						page_index(page),
2735						PAGECACHE_TAG_WRITEBACK);
2736			if (bdi_cap_account_writeback(bdi)) {
2737				struct bdi_writeback *wb = inode_to_wb(inode);
 
 
 
 
 
 
 
 
2738
2739				__dec_wb_stat(wb, WB_WRITEBACK);
2740				__wb_writeout_inc(wb);
2741			}
2742		}
2743		spin_unlock_irqrestore(&mapping->tree_lock, flags);
2744	} else {
2745		ret = TestClearPageWriteback(page);
2746	}
2747	if (ret) {
2748		mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
2749		dec_zone_page_state(page, NR_WRITEBACK);
2750		inc_zone_page_state(page, NR_WRITTEN);
2751	}
2752	unlock_page_memcg(page);
 
 
 
 
 
2753	return ret;
2754}
2755
2756int __test_set_page_writeback(struct page *page, bool keep_write)
2757{
2758	struct address_space *mapping = page_mapping(page);
2759	int ret;
 
2760
2761	lock_page_memcg(page);
2762	if (mapping) {
 
 
 
2763		struct inode *inode = mapping->host;
2764		struct backing_dev_info *bdi = inode_to_bdi(inode);
2765		unsigned long flags;
 
 
 
 
 
2766
2767		spin_lock_irqsave(&mapping->tree_lock, flags);
2768		ret = TestSetPageWriteback(page);
2769		if (!ret) {
2770			radix_tree_tag_set(&mapping->page_tree,
2771						page_index(page),
2772						PAGECACHE_TAG_WRITEBACK);
2773			if (bdi_cap_account_writeback(bdi))
2774				__inc_wb_stat(inode_to_wb(inode), WB_WRITEBACK);
2775		}
2776		if (!PageDirty(page))
2777			radix_tree_tag_clear(&mapping->page_tree,
2778						page_index(page),
2779						PAGECACHE_TAG_DIRTY);
 
 
 
 
 
 
 
2780		if (!keep_write)
2781			radix_tree_tag_clear(&mapping->page_tree,
2782						page_index(page),
2783						PAGECACHE_TAG_TOWRITE);
2784		spin_unlock_irqrestore(&mapping->tree_lock, flags);
2785	} else {
2786		ret = TestSetPageWriteback(page);
2787	}
2788	if (!ret) {
2789		mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
2790		inc_zone_page_state(page, NR_WRITEBACK);
2791	}
2792	unlock_page_memcg(page);
2793	return ret;
2794
 
 
 
 
 
 
 
 
 
 
2795}
2796EXPORT_SYMBOL(__test_set_page_writeback);
2797
2798/*
2799 * Return true if any of the pages in the mapping are marked with the
2800 * passed tag.
 
 
 
 
 
 
 
 
2801 */
2802int mapping_tagged(struct address_space *mapping, int tag)
2803{
2804	return radix_tree_tagged(&mapping->page_tree, tag);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2805}
2806EXPORT_SYMBOL(mapping_tagged);
2807
2808/**
2809 * wait_for_stable_page() - wait for writeback to finish, if necessary.
2810 * @page:	The page to wait on.
 
 
 
 
2811 *
2812 * This function determines if the given page is related to a backing device
2813 * that requires page contents to be held stable during writeback.  If so, then
2814 * it will wait for any pending writeback to complete.
 
2815 */
2816void wait_for_stable_page(struct page *page)
2817{
2818	if (bdi_cap_stable_pages_required(inode_to_bdi(page->mapping->host)))
2819		wait_on_page_writeback(page);
2820}
2821EXPORT_SYMBOL_GPL(wait_for_stable_page);
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * mm/page-writeback.c
   4 *
   5 * Copyright (C) 2002, Linus Torvalds.
   6 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
   7 *
   8 * Contains functions related to writing back dirty pages at the
   9 * address_space level.
  10 *
  11 * 10Apr2002	Andrew Morton
  12 *		Initial version
  13 */
  14
  15#include <linux/kernel.h>
  16#include <linux/math64.h>
  17#include <linux/export.h>
  18#include <linux/spinlock.h>
  19#include <linux/fs.h>
  20#include <linux/mm.h>
  21#include <linux/swap.h>
  22#include <linux/slab.h>
  23#include <linux/pagemap.h>
  24#include <linux/writeback.h>
  25#include <linux/init.h>
  26#include <linux/backing-dev.h>
  27#include <linux/task_io_accounting_ops.h>
  28#include <linux/blkdev.h>
  29#include <linux/mpage.h>
  30#include <linux/rmap.h>
  31#include <linux/percpu.h>
 
  32#include <linux/smp.h>
  33#include <linux/sysctl.h>
  34#include <linux/cpu.h>
  35#include <linux/syscalls.h>
 
  36#include <linux/pagevec.h>
  37#include <linux/timer.h>
  38#include <linux/sched/rt.h>
  39#include <linux/sched/signal.h>
  40#include <linux/mm_inline.h>
  41#include <trace/events/writeback.h>
  42
  43#include "internal.h"
  44
  45/*
  46 * Sleep at most 200ms at a time in balance_dirty_pages().
  47 */
  48#define MAX_PAUSE		max(HZ/5, 1)
  49
  50/*
  51 * Try to keep balance_dirty_pages() call intervals higher than this many pages
  52 * by raising pause time to max_pause when falls below it.
  53 */
  54#define DIRTY_POLL_THRESH	(128 >> (PAGE_SHIFT - 10))
  55
  56/*
  57 * Estimate write bandwidth at 200ms intervals.
  58 */
  59#define BANDWIDTH_INTERVAL	max(HZ/5, 1)
  60
  61#define RATELIMIT_CALC_SHIFT	10
  62
  63/*
  64 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
  65 * will look to see if it needs to force writeback or throttling.
  66 */
  67static long ratelimit_pages = 32;
  68
  69/* The following parameters are exported via /proc/sys/vm */
  70
  71/*
  72 * Start background writeback (via writeback threads) at this percentage
  73 */
  74static int dirty_background_ratio = 10;
  75
  76/*
  77 * dirty_background_bytes starts at 0 (disabled) so that it is a function of
  78 * dirty_background_ratio * the amount of dirtyable memory
  79 */
  80static unsigned long dirty_background_bytes;
  81
  82/*
  83 * free highmem will not be subtracted from the total free memory
  84 * for calculating free ratios if vm_highmem_is_dirtyable is true
  85 */
  86static int vm_highmem_is_dirtyable;
  87
  88/*
  89 * The generator of dirty data starts writeback at this percentage
  90 */
  91static int vm_dirty_ratio = 20;
  92
  93/*
  94 * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
  95 * vm_dirty_ratio * the amount of dirtyable memory
  96 */
  97static unsigned long vm_dirty_bytes;
  98
  99/*
 100 * The interval between `kupdate'-style writebacks
 101 */
 102unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
 103
 104EXPORT_SYMBOL_GPL(dirty_writeback_interval);
 105
 106/*
 107 * The longest time for which data is allowed to remain dirty
 108 */
 109unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
 110
 111/*
 
 
 
 
 
 112 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
 113 * a full sync is triggered after this time elapses without any disk activity.
 114 */
 115int laptop_mode;
 116
 117EXPORT_SYMBOL(laptop_mode);
 118
 119/* End of sysctl-exported parameters */
 120
 121struct wb_domain global_wb_domain;
 122
 123/* consolidated parameters for balance_dirty_pages() and its subroutines */
 124struct dirty_throttle_control {
 125#ifdef CONFIG_CGROUP_WRITEBACK
 126	struct wb_domain	*dom;
 127	struct dirty_throttle_control *gdtc;	/* only set in memcg dtc's */
 128#endif
 129	struct bdi_writeback	*wb;
 130	struct fprop_local_percpu *wb_completions;
 131
 132	unsigned long		avail;		/* dirtyable */
 133	unsigned long		dirty;		/* file_dirty + write + nfs */
 134	unsigned long		thresh;		/* dirty threshold */
 135	unsigned long		bg_thresh;	/* dirty background threshold */
 136
 137	unsigned long		wb_dirty;	/* per-wb counterparts */
 138	unsigned long		wb_thresh;
 139	unsigned long		wb_bg_thresh;
 140
 141	unsigned long		pos_ratio;
 142};
 143
 144/*
 145 * Length of period for aging writeout fractions of bdis. This is an
 146 * arbitrarily chosen number. The longer the period, the slower fractions will
 147 * reflect changes in current writeout rate.
 148 */
 149#define VM_COMPLETIONS_PERIOD_LEN (3*HZ)
 150
 151#ifdef CONFIG_CGROUP_WRITEBACK
 152
 153#define GDTC_INIT(__wb)		.wb = (__wb),				\
 154				.dom = &global_wb_domain,		\
 155				.wb_completions = &(__wb)->completions
 156
 157#define GDTC_INIT_NO_WB		.dom = &global_wb_domain
 158
 159#define MDTC_INIT(__wb, __gdtc)	.wb = (__wb),				\
 160				.dom = mem_cgroup_wb_domain(__wb),	\
 161				.wb_completions = &(__wb)->memcg_completions, \
 162				.gdtc = __gdtc
 163
 164static bool mdtc_valid(struct dirty_throttle_control *dtc)
 165{
 166	return dtc->dom;
 167}
 168
 169static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
 170{
 171	return dtc->dom;
 172}
 173
 174static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
 175{
 176	return mdtc->gdtc;
 177}
 178
 179static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
 180{
 181	return &wb->memcg_completions;
 182}
 183
 184static void wb_min_max_ratio(struct bdi_writeback *wb,
 185			     unsigned long *minp, unsigned long *maxp)
 186{
 187	unsigned long this_bw = READ_ONCE(wb->avg_write_bandwidth);
 188	unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
 189	unsigned long long min = wb->bdi->min_ratio;
 190	unsigned long long max = wb->bdi->max_ratio;
 191
 192	/*
 193	 * @wb may already be clean by the time control reaches here and
 194	 * the total may not include its bw.
 195	 */
 196	if (this_bw < tot_bw) {
 197		if (min) {
 198			min *= this_bw;
 199			min = div64_ul(min, tot_bw);
 200		}
 201		if (max < 100 * BDI_RATIO_SCALE) {
 202			max *= this_bw;
 203			max = div64_ul(max, tot_bw);
 204		}
 205	}
 206
 207	*minp = min;
 208	*maxp = max;
 209}
 210
 211#else	/* CONFIG_CGROUP_WRITEBACK */
 212
 213#define GDTC_INIT(__wb)		.wb = (__wb),                           \
 214				.wb_completions = &(__wb)->completions
 215#define GDTC_INIT_NO_WB
 216#define MDTC_INIT(__wb, __gdtc)
 217
 218static bool mdtc_valid(struct dirty_throttle_control *dtc)
 219{
 220	return false;
 221}
 222
 223static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
 224{
 225	return &global_wb_domain;
 226}
 227
 228static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
 229{
 230	return NULL;
 231}
 232
 233static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
 234{
 235	return NULL;
 236}
 237
 238static void wb_min_max_ratio(struct bdi_writeback *wb,
 239			     unsigned long *minp, unsigned long *maxp)
 240{
 241	*minp = wb->bdi->min_ratio;
 242	*maxp = wb->bdi->max_ratio;
 243}
 244
 245#endif	/* CONFIG_CGROUP_WRITEBACK */
 246
 247/*
 248 * In a memory zone, there is a certain amount of pages we consider
 249 * available for the page cache, which is essentially the number of
 250 * free and reclaimable pages, minus some zone reserves to protect
 251 * lowmem and the ability to uphold the zone's watermarks without
 252 * requiring writeback.
 253 *
 254 * This number of dirtyable pages is the base value of which the
 255 * user-configurable dirty ratio is the effective number of pages that
 256 * are allowed to be actually dirtied.  Per individual zone, or
 257 * globally by using the sum of dirtyable pages over all zones.
 258 *
 259 * Because the user is allowed to specify the dirty limit globally as
 260 * absolute number of bytes, calculating the per-zone dirty limit can
 261 * require translating the configured limit into a percentage of
 262 * global dirtyable memory first.
 263 */
 264
 265/**
 266 * node_dirtyable_memory - number of dirtyable pages in a node
 267 * @pgdat: the node
 268 *
 269 * Return: the node's number of pages potentially available for dirty
 270 * page cache.  This is the base value for the per-node dirty limits.
 271 */
 272static unsigned long node_dirtyable_memory(struct pglist_data *pgdat)
 273{
 274	unsigned long nr_pages = 0;
 275	int z;
 276
 277	for (z = 0; z < MAX_NR_ZONES; z++) {
 278		struct zone *zone = pgdat->node_zones + z;
 279
 280		if (!populated_zone(zone))
 281			continue;
 282
 283		nr_pages += zone_page_state(zone, NR_FREE_PAGES);
 284	}
 285
 
 286	/*
 287	 * Pages reserved for the kernel should not be considered
 288	 * dirtyable, to prevent a situation where reclaim has to
 289	 * clean pages in order to balance the zones.
 290	 */
 291	nr_pages -= min(nr_pages, pgdat->totalreserve_pages);
 292
 293	nr_pages += node_page_state(pgdat, NR_INACTIVE_FILE);
 294	nr_pages += node_page_state(pgdat, NR_ACTIVE_FILE);
 295
 296	return nr_pages;
 297}
 298
 299static unsigned long highmem_dirtyable_memory(unsigned long total)
 300{
 301#ifdef CONFIG_HIGHMEM
 302	int node;
 303	unsigned long x = 0;
 304	int i;
 305
 306	for_each_node_state(node, N_HIGH_MEMORY) {
 307		for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) {
 308			struct zone *z;
 309			unsigned long nr_pages;
 310
 311			if (!is_highmem_idx(i))
 312				continue;
 313
 314			z = &NODE_DATA(node)->node_zones[i];
 315			if (!populated_zone(z))
 316				continue;
 317
 318			nr_pages = zone_page_state(z, NR_FREE_PAGES);
 319			/* watch for underflows */
 320			nr_pages -= min(nr_pages, high_wmark_pages(z));
 321			nr_pages += zone_page_state(z, NR_ZONE_INACTIVE_FILE);
 322			nr_pages += zone_page_state(z, NR_ZONE_ACTIVE_FILE);
 323			x += nr_pages;
 324		}
 325	}
 
 
 
 
 
 
 
 
 
 
 
 326
 327	/*
 328	 * Make sure that the number of highmem pages is never larger
 329	 * than the number of the total dirtyable memory. This can only
 330	 * occur in very strange VM situations but we want to make sure
 331	 * that this does not occur.
 332	 */
 333	return min(x, total);
 334#else
 335	return 0;
 336#endif
 337}
 338
 339/**
 340 * global_dirtyable_memory - number of globally dirtyable pages
 341 *
 342 * Return: the global number of pages potentially available for dirty
 343 * page cache.  This is the base value for the global dirty limits.
 344 */
 345static unsigned long global_dirtyable_memory(void)
 346{
 347	unsigned long x;
 348
 349	x = global_zone_page_state(NR_FREE_PAGES);
 350	/*
 351	 * Pages reserved for the kernel should not be considered
 352	 * dirtyable, to prevent a situation where reclaim has to
 353	 * clean pages in order to balance the zones.
 354	 */
 355	x -= min(x, totalreserve_pages);
 356
 357	x += global_node_page_state(NR_INACTIVE_FILE);
 358	x += global_node_page_state(NR_ACTIVE_FILE);
 359
 360	if (!vm_highmem_is_dirtyable)
 361		x -= highmem_dirtyable_memory(x);
 362
 363	return x + 1;	/* Ensure that we never return 0 */
 364}
 365
 366/**
 367 * domain_dirty_limits - calculate thresh and bg_thresh for a wb_domain
 368 * @dtc: dirty_throttle_control of interest
 369 *
 370 * Calculate @dtc->thresh and ->bg_thresh considering
 371 * vm_dirty_{bytes|ratio} and dirty_background_{bytes|ratio}.  The caller
 372 * must ensure that @dtc->avail is set before calling this function.  The
 373 * dirty limits will be lifted by 1/4 for real-time tasks.
 
 374 */
 375static void domain_dirty_limits(struct dirty_throttle_control *dtc)
 376{
 377	const unsigned long available_memory = dtc->avail;
 378	struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
 379	unsigned long bytes = vm_dirty_bytes;
 380	unsigned long bg_bytes = dirty_background_bytes;
 381	/* convert ratios to per-PAGE_SIZE for higher precision */
 382	unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100;
 383	unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100;
 384	unsigned long thresh;
 385	unsigned long bg_thresh;
 386	struct task_struct *tsk;
 387
 388	/* gdtc is !NULL iff @dtc is for memcg domain */
 389	if (gdtc) {
 390		unsigned long global_avail = gdtc->avail;
 391
 392		/*
 393		 * The byte settings can't be applied directly to memcg
 394		 * domains.  Convert them to ratios by scaling against
 395		 * globally available memory.  As the ratios are in
 396		 * per-PAGE_SIZE, they can be obtained by dividing bytes by
 397		 * number of pages.
 398		 */
 399		if (bytes)
 400			ratio = min(DIV_ROUND_UP(bytes, global_avail),
 401				    PAGE_SIZE);
 402		if (bg_bytes)
 403			bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail),
 404				       PAGE_SIZE);
 405		bytes = bg_bytes = 0;
 406	}
 407
 408	if (bytes)
 409		thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
 410	else
 411		thresh = (ratio * available_memory) / PAGE_SIZE;
 412
 413	if (bg_bytes)
 414		bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
 415	else
 416		bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
 417
 418	if (bg_thresh >= thresh)
 419		bg_thresh = thresh / 2;
 420	tsk = current;
 421	if (rt_task(tsk)) {
 422		bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
 423		thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
 424	}
 425	dtc->thresh = thresh;
 426	dtc->bg_thresh = bg_thresh;
 427
 428	/* we should eventually report the domain in the TP */
 429	if (!gdtc)
 430		trace_global_dirty_state(bg_thresh, thresh);
 431}
 432
 433/**
 434 * global_dirty_limits - background-writeback and dirty-throttling thresholds
 435 * @pbackground: out parameter for bg_thresh
 436 * @pdirty: out parameter for thresh
 437 *
 438 * Calculate bg_thresh and thresh for global_wb_domain.  See
 439 * domain_dirty_limits() for details.
 440 */
 441void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
 442{
 443	struct dirty_throttle_control gdtc = { GDTC_INIT_NO_WB };
 444
 445	gdtc.avail = global_dirtyable_memory();
 446	domain_dirty_limits(&gdtc);
 447
 448	*pbackground = gdtc.bg_thresh;
 449	*pdirty = gdtc.thresh;
 450}
 451
 452/**
 453 * node_dirty_limit - maximum number of dirty pages allowed in a node
 454 * @pgdat: the node
 455 *
 456 * Return: the maximum number of dirty pages allowed in a node, based
 457 * on the node's dirtyable memory.
 458 */
 459static unsigned long node_dirty_limit(struct pglist_data *pgdat)
 460{
 461	unsigned long node_memory = node_dirtyable_memory(pgdat);
 462	struct task_struct *tsk = current;
 463	unsigned long dirty;
 464
 465	if (vm_dirty_bytes)
 466		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) *
 467			node_memory / global_dirtyable_memory();
 468	else
 469		dirty = vm_dirty_ratio * node_memory / 100;
 470
 471	if (rt_task(tsk))
 472		dirty += dirty / 4;
 473
 474	return dirty;
 475}
 476
 477/**
 478 * node_dirty_ok - tells whether a node is within its dirty limits
 479 * @pgdat: the node to check
 480 *
 481 * Return: %true when the dirty pages in @pgdat are within the node's
 482 * dirty limit, %false if the limit is exceeded.
 483 */
 484bool node_dirty_ok(struct pglist_data *pgdat)
 485{
 486	unsigned long limit = node_dirty_limit(pgdat);
 487	unsigned long nr_pages = 0;
 488
 489	nr_pages += node_page_state(pgdat, NR_FILE_DIRTY);
 490	nr_pages += node_page_state(pgdat, NR_WRITEBACK);
 491
 492	return nr_pages <= limit;
 493}
 494
 495#ifdef CONFIG_SYSCTL
 496static int dirty_background_ratio_handler(struct ctl_table *table, int write,
 497		void *buffer, size_t *lenp, loff_t *ppos)
 498{
 499	int ret;
 500
 501	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 502	if (ret == 0 && write)
 503		dirty_background_bytes = 0;
 504	return ret;
 505}
 506
 507static int dirty_background_bytes_handler(struct ctl_table *table, int write,
 508		void *buffer, size_t *lenp, loff_t *ppos)
 
 509{
 510	int ret;
 511
 512	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 513	if (ret == 0 && write)
 514		dirty_background_ratio = 0;
 515	return ret;
 516}
 517
 518static int dirty_ratio_handler(struct ctl_table *table, int write, void *buffer,
 519		size_t *lenp, loff_t *ppos)
 
 520{
 521	int old_ratio = vm_dirty_ratio;
 522	int ret;
 523
 524	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 525	if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
 526		writeback_set_ratelimit();
 527		vm_dirty_bytes = 0;
 528	}
 529	return ret;
 530}
 531
 532static int dirty_bytes_handler(struct ctl_table *table, int write,
 533		void *buffer, size_t *lenp, loff_t *ppos)
 
 534{
 535	unsigned long old_bytes = vm_dirty_bytes;
 536	int ret;
 537
 538	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 539	if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
 540		writeback_set_ratelimit();
 541		vm_dirty_ratio = 0;
 542	}
 543	return ret;
 544}
 545#endif
 546
 547static unsigned long wp_next_time(unsigned long cur_time)
 548{
 549	cur_time += VM_COMPLETIONS_PERIOD_LEN;
 550	/* 0 has a special meaning... */
 551	if (!cur_time)
 552		return 1;
 553	return cur_time;
 554}
 555
 556static void wb_domain_writeout_add(struct wb_domain *dom,
 557				   struct fprop_local_percpu *completions,
 558				   unsigned int max_prop_frac, long nr)
 559{
 560	__fprop_add_percpu_max(&dom->completions, completions,
 561			       max_prop_frac, nr);
 562	/* First event after period switching was turned off? */
 563	if (unlikely(!dom->period_time)) {
 564		/*
 565		 * We can race with other __bdi_writeout_inc calls here but
 566		 * it does not cause any harm since the resulting time when
 567		 * timer will fire and what is in writeout_period_time will be
 568		 * roughly the same.
 569		 */
 570		dom->period_time = wp_next_time(jiffies);
 571		mod_timer(&dom->period_timer, dom->period_time);
 572	}
 573}
 574
 575/*
 576 * Increment @wb's writeout completion count and the global writeout
 577 * completion count. Called from __folio_end_writeback().
 578 */
 579static inline void __wb_writeout_add(struct bdi_writeback *wb, long nr)
 580{
 581	struct wb_domain *cgdom;
 582
 583	wb_stat_mod(wb, WB_WRITTEN, nr);
 584	wb_domain_writeout_add(&global_wb_domain, &wb->completions,
 585			       wb->bdi->max_prop_frac, nr);
 586
 587	cgdom = mem_cgroup_wb_domain(wb);
 588	if (cgdom)
 589		wb_domain_writeout_add(cgdom, wb_memcg_completions(wb),
 590				       wb->bdi->max_prop_frac, nr);
 591}
 592
 593void wb_writeout_inc(struct bdi_writeback *wb)
 594{
 595	unsigned long flags;
 596
 597	local_irq_save(flags);
 598	__wb_writeout_add(wb, 1);
 599	local_irq_restore(flags);
 600}
 601EXPORT_SYMBOL_GPL(wb_writeout_inc);
 602
 603/*
 604 * On idle system, we can be called long after we scheduled because we use
 605 * deferred timers so count with missed periods.
 606 */
 607static void writeout_period(struct timer_list *t)
 608{
 609	struct wb_domain *dom = from_timer(dom, t, period_timer);
 610	int miss_periods = (jiffies - dom->period_time) /
 611						 VM_COMPLETIONS_PERIOD_LEN;
 612
 613	if (fprop_new_period(&dom->completions, miss_periods + 1)) {
 614		dom->period_time = wp_next_time(dom->period_time +
 615				miss_periods * VM_COMPLETIONS_PERIOD_LEN);
 616		mod_timer(&dom->period_timer, dom->period_time);
 617	} else {
 618		/*
 619		 * Aging has zeroed all fractions. Stop wasting CPU on period
 620		 * updates.
 621		 */
 622		dom->period_time = 0;
 623	}
 624}
 625
 626int wb_domain_init(struct wb_domain *dom, gfp_t gfp)
 627{
 628	memset(dom, 0, sizeof(*dom));
 629
 630	spin_lock_init(&dom->lock);
 631
 632	timer_setup(&dom->period_timer, writeout_period, TIMER_DEFERRABLE);
 
 
 633
 634	dom->dirty_limit_tstamp = jiffies;
 635
 636	return fprop_global_init(&dom->completions, gfp);
 637}
 638
 639#ifdef CONFIG_CGROUP_WRITEBACK
 640void wb_domain_exit(struct wb_domain *dom)
 641{
 642	del_timer_sync(&dom->period_timer);
 643	fprop_global_destroy(&dom->completions);
 644}
 645#endif
 646
 647/*
 648 * bdi_min_ratio keeps the sum of the minimum dirty shares of all
 649 * registered backing devices, which, for obvious reasons, can not
 650 * exceed 100%.
 651 */
 652static unsigned int bdi_min_ratio;
 653
 654static int bdi_check_pages_limit(unsigned long pages)
 655{
 656	unsigned long max_dirty_pages = global_dirtyable_memory();
 657
 658	if (pages > max_dirty_pages)
 659		return -EINVAL;
 660
 661	return 0;
 662}
 663
 664static unsigned long bdi_ratio_from_pages(unsigned long pages)
 665{
 666	unsigned long background_thresh;
 667	unsigned long dirty_thresh;
 668	unsigned long ratio;
 669
 670	global_dirty_limits(&background_thresh, &dirty_thresh);
 671	ratio = div64_u64(pages * 100ULL * BDI_RATIO_SCALE, dirty_thresh);
 672
 673	return ratio;
 674}
 675
 676static u64 bdi_get_bytes(unsigned int ratio)
 677{
 678	unsigned long background_thresh;
 679	unsigned long dirty_thresh;
 680	u64 bytes;
 681
 682	global_dirty_limits(&background_thresh, &dirty_thresh);
 683	bytes = (dirty_thresh * PAGE_SIZE * ratio) / BDI_RATIO_SCALE / 100;
 684
 685	return bytes;
 686}
 687
 688static int __bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
 689{
 690	unsigned int delta;
 691	int ret = 0;
 692
 693	if (min_ratio > 100 * BDI_RATIO_SCALE)
 694		return -EINVAL;
 695
 696	spin_lock_bh(&bdi_lock);
 697	if (min_ratio > bdi->max_ratio) {
 698		ret = -EINVAL;
 699	} else {
 700		if (min_ratio < bdi->min_ratio) {
 701			delta = bdi->min_ratio - min_ratio;
 702			bdi_min_ratio -= delta;
 703			bdi->min_ratio = min_ratio;
 704		} else {
 705			delta = min_ratio - bdi->min_ratio;
 706			if (bdi_min_ratio + delta < 100 * BDI_RATIO_SCALE) {
 707				bdi_min_ratio += delta;
 708				bdi->min_ratio = min_ratio;
 709			} else {
 710				ret = -EINVAL;
 711			}
 712		}
 713	}
 714	spin_unlock_bh(&bdi_lock);
 715
 716	return ret;
 717}
 718
 719static int __bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio)
 720{
 721	int ret = 0;
 722
 723	if (max_ratio > 100 * BDI_RATIO_SCALE)
 724		return -EINVAL;
 725
 726	spin_lock_bh(&bdi_lock);
 727	if (bdi->min_ratio > max_ratio) {
 728		ret = -EINVAL;
 729	} else {
 730		bdi->max_ratio = max_ratio;
 731		bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) /
 732						(100 * BDI_RATIO_SCALE);
 733	}
 734	spin_unlock_bh(&bdi_lock);
 735
 736	return ret;
 737}
 738
 739int bdi_set_min_ratio_no_scale(struct backing_dev_info *bdi, unsigned int min_ratio)
 740{
 741	return __bdi_set_min_ratio(bdi, min_ratio);
 742}
 743
 744int bdi_set_max_ratio_no_scale(struct backing_dev_info *bdi, unsigned int max_ratio)
 745{
 746	return __bdi_set_max_ratio(bdi, max_ratio);
 747}
 748
 749int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
 750{
 751	return __bdi_set_min_ratio(bdi, min_ratio * BDI_RATIO_SCALE);
 752}
 753
 754int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio)
 755{
 756	return __bdi_set_max_ratio(bdi, max_ratio * BDI_RATIO_SCALE);
 757}
 758EXPORT_SYMBOL(bdi_set_max_ratio);
 759
 760u64 bdi_get_min_bytes(struct backing_dev_info *bdi)
 761{
 762	return bdi_get_bytes(bdi->min_ratio);
 763}
 764
 765int bdi_set_min_bytes(struct backing_dev_info *bdi, u64 min_bytes)
 766{
 767	int ret;
 768	unsigned long pages = min_bytes >> PAGE_SHIFT;
 769	unsigned long min_ratio;
 770
 771	ret = bdi_check_pages_limit(pages);
 772	if (ret)
 773		return ret;
 774
 775	min_ratio = bdi_ratio_from_pages(pages);
 776	return __bdi_set_min_ratio(bdi, min_ratio);
 777}
 778
 779u64 bdi_get_max_bytes(struct backing_dev_info *bdi)
 780{
 781	return bdi_get_bytes(bdi->max_ratio);
 782}
 783
 784int bdi_set_max_bytes(struct backing_dev_info *bdi, u64 max_bytes)
 785{
 786	int ret;
 787	unsigned long pages = max_bytes >> PAGE_SHIFT;
 788	unsigned long max_ratio;
 789
 790	ret = bdi_check_pages_limit(pages);
 791	if (ret)
 792		return ret;
 793
 794	max_ratio = bdi_ratio_from_pages(pages);
 795	return __bdi_set_max_ratio(bdi, max_ratio);
 796}
 797
 798int bdi_set_strict_limit(struct backing_dev_info *bdi, unsigned int strict_limit)
 799{
 800	if (strict_limit > 1)
 801		return -EINVAL;
 802
 803	spin_lock_bh(&bdi_lock);
 804	if (strict_limit)
 805		bdi->capabilities |= BDI_CAP_STRICTLIMIT;
 806	else
 807		bdi->capabilities &= ~BDI_CAP_STRICTLIMIT;
 808	spin_unlock_bh(&bdi_lock);
 809
 810	return 0;
 811}
 812
 813static unsigned long dirty_freerun_ceiling(unsigned long thresh,
 814					   unsigned long bg_thresh)
 815{
 816	return (thresh + bg_thresh) / 2;
 817}
 818
 819static unsigned long hard_dirty_limit(struct wb_domain *dom,
 820				      unsigned long thresh)
 821{
 822	return max(thresh, dom->dirty_limit);
 823}
 824
 825/*
 826 * Memory which can be further allocated to a memcg domain is capped by
 827 * system-wide clean memory excluding the amount being used in the domain.
 828 */
 829static void mdtc_calc_avail(struct dirty_throttle_control *mdtc,
 830			    unsigned long filepages, unsigned long headroom)
 831{
 832	struct dirty_throttle_control *gdtc = mdtc_gdtc(mdtc);
 833	unsigned long clean = filepages - min(filepages, mdtc->dirty);
 834	unsigned long global_clean = gdtc->avail - min(gdtc->avail, gdtc->dirty);
 835	unsigned long other_clean = global_clean - min(global_clean, clean);
 836
 837	mdtc->avail = filepages + min(headroom, other_clean);
 838}
 839
 840/**
 841 * __wb_calc_thresh - @wb's share of dirty throttling threshold
 842 * @dtc: dirty_throttle_context of interest
 843 *
 
 
 
 844 * Note that balance_dirty_pages() will only seriously take it as a hard limit
 845 * when sleeping max_pause per page is not enough to keep the dirty pages under
 846 * control. For example, when the device is completely stalled due to some error
 847 * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
 848 * In the other normal situations, it acts more gently by throttling the tasks
 849 * more (rather than completely block them) when the wb dirty pages go high.
 850 *
 851 * It allocates high/low dirty limits to fast/slow devices, in order to prevent
 852 * - starving fast devices
 853 * - piling up dirty pages (that will take long time to sync) on slow devices
 854 *
 855 * The wb's share of dirty limit will be adapting to its throughput and
 856 * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
 857 *
 858 * Return: @wb's dirty limit in pages. The term "dirty" in the context of
 859 * dirty balancing includes all PG_dirty and PG_writeback pages.
 860 */
 861static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc)
 862{
 863	struct wb_domain *dom = dtc_dom(dtc);
 864	unsigned long thresh = dtc->thresh;
 865	u64 wb_thresh;
 866	unsigned long numerator, denominator;
 867	unsigned long wb_min_ratio, wb_max_ratio;
 868
 869	/*
 870	 * Calculate this BDI's share of the thresh ratio.
 871	 */
 872	fprop_fraction_percpu(&dom->completions, dtc->wb_completions,
 873			      &numerator, &denominator);
 874
 875	wb_thresh = (thresh * (100 * BDI_RATIO_SCALE - bdi_min_ratio)) / (100 * BDI_RATIO_SCALE);
 876	wb_thresh *= numerator;
 877	wb_thresh = div64_ul(wb_thresh, denominator);
 878
 879	wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio);
 880
 881	wb_thresh += (thresh * wb_min_ratio) / (100 * BDI_RATIO_SCALE);
 882	if (wb_thresh > (thresh * wb_max_ratio) / (100 * BDI_RATIO_SCALE))
 883		wb_thresh = thresh * wb_max_ratio / (100 * BDI_RATIO_SCALE);
 884
 885	return wb_thresh;
 886}
 887
 888unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh)
 889{
 890	struct dirty_throttle_control gdtc = { GDTC_INIT(wb),
 891					       .thresh = thresh };
 892	return __wb_calc_thresh(&gdtc);
 893}
 894
 895/*
 896 *                           setpoint - dirty 3
 897 *        f(dirty) := 1.0 + (----------------)
 898 *                           limit - setpoint
 899 *
 900 * it's a 3rd order polynomial that subjects to
 901 *
 902 * (1) f(freerun)  = 2.0 => rampup dirty_ratelimit reasonably fast
 903 * (2) f(setpoint) = 1.0 => the balance point
 904 * (3) f(limit)    = 0   => the hard limit
 905 * (4) df/dx      <= 0	 => negative feedback control
 906 * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
 907 *     => fast response on large errors; small oscillation near setpoint
 908 */
 909static long long pos_ratio_polynom(unsigned long setpoint,
 910					  unsigned long dirty,
 911					  unsigned long limit)
 912{
 913	long long pos_ratio;
 914	long x;
 915
 916	x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
 917		      (limit - setpoint) | 1);
 918	pos_ratio = x;
 919	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
 920	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
 921	pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
 922
 923	return clamp(pos_ratio, 0LL, 2LL << RATELIMIT_CALC_SHIFT);
 924}
 925
 926/*
 927 * Dirty position control.
 928 *
 929 * (o) global/bdi setpoints
 930 *
 931 * We want the dirty pages be balanced around the global/wb setpoints.
 932 * When the number of dirty pages is higher/lower than the setpoint, the
 933 * dirty position control ratio (and hence task dirty ratelimit) will be
 934 * decreased/increased to bring the dirty pages back to the setpoint.
 935 *
 936 *     pos_ratio = 1 << RATELIMIT_CALC_SHIFT
 937 *
 938 *     if (dirty < setpoint) scale up   pos_ratio
 939 *     if (dirty > setpoint) scale down pos_ratio
 940 *
 941 *     if (wb_dirty < wb_setpoint) scale up   pos_ratio
 942 *     if (wb_dirty > wb_setpoint) scale down pos_ratio
 943 *
 944 *     task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT
 945 *
 946 * (o) global control line
 947 *
 948 *     ^ pos_ratio
 949 *     |
 950 *     |            |<===== global dirty control scope ======>|
 951 * 2.0  * * * * * * *
 952 *     |            .*
 953 *     |            . *
 954 *     |            .   *
 955 *     |            .     *
 956 *     |            .        *
 957 *     |            .            *
 958 * 1.0 ................................*
 959 *     |            .                  .     *
 960 *     |            .                  .          *
 961 *     |            .                  .              *
 962 *     |            .                  .                 *
 963 *     |            .                  .                    *
 964 *   0 +------------.------------------.----------------------*------------->
 965 *           freerun^          setpoint^                 limit^   dirty pages
 966 *
 967 * (o) wb control line
 968 *
 969 *     ^ pos_ratio
 970 *     |
 971 *     |            *
 972 *     |              *
 973 *     |                *
 974 *     |                  *
 975 *     |                    * |<=========== span ============>|
 976 * 1.0 .......................*
 977 *     |                      . *
 978 *     |                      .   *
 979 *     |                      .     *
 980 *     |                      .       *
 981 *     |                      .         *
 982 *     |                      .           *
 983 *     |                      .             *
 984 *     |                      .               *
 985 *     |                      .                 *
 986 *     |                      .                   *
 987 *     |                      .                     *
 988 * 1/4 ...............................................* * * * * * * * * * * *
 989 *     |                      .                         .
 990 *     |                      .                           .
 991 *     |                      .                             .
 992 *   0 +----------------------.-------------------------------.------------->
 993 *                wb_setpoint^                    x_intercept^
 994 *
 995 * The wb control line won't drop below pos_ratio=1/4, so that wb_dirty can
 996 * be smoothly throttled down to normal if it starts high in situations like
 997 * - start writing to a slow SD card and a fast disk at the same time. The SD
 998 *   card's wb_dirty may rush to many times higher than wb_setpoint.
 999 * - the wb dirty thresh drops quickly due to change of JBOD workload
1000 */
1001static void wb_position_ratio(struct dirty_throttle_control *dtc)
1002{
1003	struct bdi_writeback *wb = dtc->wb;
1004	unsigned long write_bw = READ_ONCE(wb->avg_write_bandwidth);
1005	unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
1006	unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
1007	unsigned long wb_thresh = dtc->wb_thresh;
1008	unsigned long x_intercept;
1009	unsigned long setpoint;		/* dirty pages' target balance point */
1010	unsigned long wb_setpoint;
1011	unsigned long span;
1012	long long pos_ratio;		/* for scaling up/down the rate limit */
1013	long x;
1014
1015	dtc->pos_ratio = 0;
1016
1017	if (unlikely(dtc->dirty >= limit))
1018		return;
1019
1020	/*
1021	 * global setpoint
1022	 *
1023	 * See comment for pos_ratio_polynom().
1024	 */
1025	setpoint = (freerun + limit) / 2;
1026	pos_ratio = pos_ratio_polynom(setpoint, dtc->dirty, limit);
1027
1028	/*
1029	 * The strictlimit feature is a tool preventing mistrusted filesystems
1030	 * from growing a large number of dirty pages before throttling. For
1031	 * such filesystems balance_dirty_pages always checks wb counters
1032	 * against wb limits. Even if global "nr_dirty" is under "freerun".
1033	 * This is especially important for fuse which sets bdi->max_ratio to
1034	 * 1% by default. Without strictlimit feature, fuse writeback may
1035	 * consume arbitrary amount of RAM because it is accounted in
1036	 * NR_WRITEBACK_TEMP which is not involved in calculating "nr_dirty".
1037	 *
1038	 * Here, in wb_position_ratio(), we calculate pos_ratio based on
1039	 * two values: wb_dirty and wb_thresh. Let's consider an example:
1040	 * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global
1041	 * limits are set by default to 10% and 20% (background and throttle).
1042	 * Then wb_thresh is 1% of 20% of 16GB. This amounts to ~8K pages.
1043	 * wb_calc_thresh(wb, bg_thresh) is about ~4K pages. wb_setpoint is
1044	 * about ~6K pages (as the average of background and throttle wb
1045	 * limits). The 3rd order polynomial will provide positive feedback if
1046	 * wb_dirty is under wb_setpoint and vice versa.
1047	 *
1048	 * Note, that we cannot use global counters in these calculations
1049	 * because we want to throttle process writing to a strictlimit wb
1050	 * much earlier than global "freerun" is reached (~23MB vs. ~2.3GB
1051	 * in the example above).
1052	 */
1053	if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
1054		long long wb_pos_ratio;
1055
1056		if (dtc->wb_dirty < 8) {
1057			dtc->pos_ratio = min_t(long long, pos_ratio * 2,
1058					   2 << RATELIMIT_CALC_SHIFT);
1059			return;
1060		}
1061
1062		if (dtc->wb_dirty >= wb_thresh)
1063			return;
1064
1065		wb_setpoint = dirty_freerun_ceiling(wb_thresh,
1066						    dtc->wb_bg_thresh);
1067
1068		if (wb_setpoint == 0 || wb_setpoint == wb_thresh)
1069			return;
1070
1071		wb_pos_ratio = pos_ratio_polynom(wb_setpoint, dtc->wb_dirty,
1072						 wb_thresh);
1073
1074		/*
1075		 * Typically, for strictlimit case, wb_setpoint << setpoint
1076		 * and pos_ratio >> wb_pos_ratio. In the other words global
1077		 * state ("dirty") is not limiting factor and we have to
1078		 * make decision based on wb counters. But there is an
1079		 * important case when global pos_ratio should get precedence:
1080		 * global limits are exceeded (e.g. due to activities on other
1081		 * wb's) while given strictlimit wb is below limit.
1082		 *
1083		 * "pos_ratio * wb_pos_ratio" would work for the case above,
1084		 * but it would look too non-natural for the case of all
1085		 * activity in the system coming from a single strictlimit wb
1086		 * with bdi->max_ratio == 100%.
1087		 *
1088		 * Note that min() below somewhat changes the dynamics of the
1089		 * control system. Normally, pos_ratio value can be well over 3
1090		 * (when globally we are at freerun and wb is well below wb
1091		 * setpoint). Now the maximum pos_ratio in the same situation
1092		 * is 2. We might want to tweak this if we observe the control
1093		 * system is too slow to adapt.
1094		 */
1095		dtc->pos_ratio = min(pos_ratio, wb_pos_ratio);
1096		return;
1097	}
1098
1099	/*
1100	 * We have computed basic pos_ratio above based on global situation. If
1101	 * the wb is over/under its share of dirty pages, we want to scale
1102	 * pos_ratio further down/up. That is done by the following mechanism.
1103	 */
1104
1105	/*
1106	 * wb setpoint
1107	 *
1108	 *        f(wb_dirty) := 1.0 + k * (wb_dirty - wb_setpoint)
1109	 *
1110	 *                        x_intercept - wb_dirty
1111	 *                     := --------------------------
1112	 *                        x_intercept - wb_setpoint
1113	 *
1114	 * The main wb control line is a linear function that subjects to
1115	 *
1116	 * (1) f(wb_setpoint) = 1.0
1117	 * (2) k = - 1 / (8 * write_bw)  (in single wb case)
1118	 *     or equally: x_intercept = wb_setpoint + 8 * write_bw
1119	 *
1120	 * For single wb case, the dirty pages are observed to fluctuate
1121	 * regularly within range
1122	 *        [wb_setpoint - write_bw/2, wb_setpoint + write_bw/2]
1123	 * for various filesystems, where (2) can yield in a reasonable 12.5%
1124	 * fluctuation range for pos_ratio.
1125	 *
1126	 * For JBOD case, wb_thresh (not wb_dirty!) could fluctuate up to its
1127	 * own size, so move the slope over accordingly and choose a slope that
1128	 * yields 100% pos_ratio fluctuation on suddenly doubled wb_thresh.
1129	 */
1130	if (unlikely(wb_thresh > dtc->thresh))
1131		wb_thresh = dtc->thresh;
1132	/*
1133	 * It's very possible that wb_thresh is close to 0 not because the
1134	 * device is slow, but that it has remained inactive for long time.
1135	 * Honour such devices a reasonable good (hopefully IO efficient)
1136	 * threshold, so that the occasional writes won't be blocked and active
1137	 * writes can rampup the threshold quickly.
1138	 */
1139	wb_thresh = max(wb_thresh, (limit - dtc->dirty) / 8);
1140	/*
1141	 * scale global setpoint to wb's:
1142	 *	wb_setpoint = setpoint * wb_thresh / thresh
1143	 */
1144	x = div_u64((u64)wb_thresh << 16, dtc->thresh | 1);
1145	wb_setpoint = setpoint * (u64)x >> 16;
1146	/*
1147	 * Use span=(8*write_bw) in single wb case as indicated by
1148	 * (thresh - wb_thresh ~= 0) and transit to wb_thresh in JBOD case.
1149	 *
1150	 *        wb_thresh                    thresh - wb_thresh
1151	 * span = --------- * (8 * write_bw) + ------------------ * wb_thresh
1152	 *         thresh                           thresh
1153	 */
1154	span = (dtc->thresh - wb_thresh + 8 * write_bw) * (u64)x >> 16;
1155	x_intercept = wb_setpoint + span;
1156
1157	if (dtc->wb_dirty < x_intercept - span / 4) {
1158		pos_ratio = div64_u64(pos_ratio * (x_intercept - dtc->wb_dirty),
1159				      (x_intercept - wb_setpoint) | 1);
1160	} else
1161		pos_ratio /= 4;
1162
1163	/*
1164	 * wb reserve area, safeguard against dirty pool underrun and disk idle
1165	 * It may push the desired control point of global dirty pages higher
1166	 * than setpoint.
1167	 */
1168	x_intercept = wb_thresh / 2;
1169	if (dtc->wb_dirty < x_intercept) {
1170		if (dtc->wb_dirty > x_intercept / 8)
1171			pos_ratio = div_u64(pos_ratio * x_intercept,
1172					    dtc->wb_dirty);
1173		else
1174			pos_ratio *= 8;
1175	}
1176
1177	dtc->pos_ratio = pos_ratio;
1178}
1179
1180static void wb_update_write_bandwidth(struct bdi_writeback *wb,
1181				      unsigned long elapsed,
1182				      unsigned long written)
1183{
1184	const unsigned long period = roundup_pow_of_two(3 * HZ);
1185	unsigned long avg = wb->avg_write_bandwidth;
1186	unsigned long old = wb->write_bandwidth;
1187	u64 bw;
1188
1189	/*
1190	 * bw = written * HZ / elapsed
1191	 *
1192	 *                   bw * elapsed + write_bandwidth * (period - elapsed)
1193	 * write_bandwidth = ---------------------------------------------------
1194	 *                                          period
1195	 *
1196	 * @written may have decreased due to folio_redirty_for_writepage().
1197	 * Avoid underflowing @bw calculation.
1198	 */
1199	bw = written - min(written, wb->written_stamp);
1200	bw *= HZ;
1201	if (unlikely(elapsed > period)) {
1202		bw = div64_ul(bw, elapsed);
1203		avg = bw;
1204		goto out;
1205	}
1206	bw += (u64)wb->write_bandwidth * (period - elapsed);
1207	bw >>= ilog2(period);
1208
1209	/*
1210	 * one more level of smoothing, for filtering out sudden spikes
1211	 */
1212	if (avg > old && old >= (unsigned long)bw)
1213		avg -= (avg - old) >> 3;
1214
1215	if (avg < old && old <= (unsigned long)bw)
1216		avg += (old - avg) >> 3;
1217
1218out:
1219	/* keep avg > 0 to guarantee that tot > 0 if there are dirty wbs */
1220	avg = max(avg, 1LU);
1221	if (wb_has_dirty_io(wb)) {
1222		long delta = avg - wb->avg_write_bandwidth;
1223		WARN_ON_ONCE(atomic_long_add_return(delta,
1224					&wb->bdi->tot_write_bandwidth) <= 0);
1225	}
1226	wb->write_bandwidth = bw;
1227	WRITE_ONCE(wb->avg_write_bandwidth, avg);
1228}
1229
1230static void update_dirty_limit(struct dirty_throttle_control *dtc)
1231{
1232	struct wb_domain *dom = dtc_dom(dtc);
1233	unsigned long thresh = dtc->thresh;
1234	unsigned long limit = dom->dirty_limit;
1235
1236	/*
1237	 * Follow up in one step.
1238	 */
1239	if (limit < thresh) {
1240		limit = thresh;
1241		goto update;
1242	}
1243
1244	/*
1245	 * Follow down slowly. Use the higher one as the target, because thresh
1246	 * may drop below dirty. This is exactly the reason to introduce
1247	 * dom->dirty_limit which is guaranteed to lie above the dirty pages.
1248	 */
1249	thresh = max(thresh, dtc->dirty);
1250	if (limit > thresh) {
1251		limit -= (limit - thresh) >> 5;
1252		goto update;
1253	}
1254	return;
1255update:
1256	dom->dirty_limit = limit;
1257}
1258
1259static void domain_update_dirty_limit(struct dirty_throttle_control *dtc,
1260				      unsigned long now)
1261{
1262	struct wb_domain *dom = dtc_dom(dtc);
1263
1264	/*
1265	 * check locklessly first to optimize away locking for the most time
1266	 */
1267	if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL))
1268		return;
1269
1270	spin_lock(&dom->lock);
1271	if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) {
1272		update_dirty_limit(dtc);
1273		dom->dirty_limit_tstamp = now;
1274	}
1275	spin_unlock(&dom->lock);
1276}
1277
1278/*
1279 * Maintain wb->dirty_ratelimit, the base dirty throttle rate.
1280 *
1281 * Normal wb tasks will be curbed at or below it in long term.
1282 * Obviously it should be around (write_bw / N) when there are N dd tasks.
1283 */
1284static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc,
1285				      unsigned long dirtied,
1286				      unsigned long elapsed)
1287{
1288	struct bdi_writeback *wb = dtc->wb;
1289	unsigned long dirty = dtc->dirty;
1290	unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
1291	unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
1292	unsigned long setpoint = (freerun + limit) / 2;
1293	unsigned long write_bw = wb->avg_write_bandwidth;
1294	unsigned long dirty_ratelimit = wb->dirty_ratelimit;
1295	unsigned long dirty_rate;
1296	unsigned long task_ratelimit;
1297	unsigned long balanced_dirty_ratelimit;
1298	unsigned long step;
1299	unsigned long x;
1300	unsigned long shift;
1301
1302	/*
1303	 * The dirty rate will match the writeout rate in long term, except
1304	 * when dirty pages are truncated by userspace or re-dirtied by FS.
1305	 */
1306	dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed;
1307
1308	/*
1309	 * task_ratelimit reflects each dd's dirty rate for the past 200ms.
1310	 */
1311	task_ratelimit = (u64)dirty_ratelimit *
1312					dtc->pos_ratio >> RATELIMIT_CALC_SHIFT;
1313	task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */
1314
1315	/*
1316	 * A linear estimation of the "balanced" throttle rate. The theory is,
1317	 * if there are N dd tasks, each throttled at task_ratelimit, the wb's
1318	 * dirty_rate will be measured to be (N * task_ratelimit). So the below
1319	 * formula will yield the balanced rate limit (write_bw / N).
1320	 *
1321	 * Note that the expanded form is not a pure rate feedback:
1322	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate)		     (1)
1323	 * but also takes pos_ratio into account:
1324	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio  (2)
1325	 *
1326	 * (1) is not realistic because pos_ratio also takes part in balancing
1327	 * the dirty rate.  Consider the state
1328	 *	pos_ratio = 0.5						     (3)
1329	 *	rate = 2 * (write_bw / N)				     (4)
1330	 * If (1) is used, it will stuck in that state! Because each dd will
1331	 * be throttled at
1332	 *	task_ratelimit = pos_ratio * rate = (write_bw / N)	     (5)
1333	 * yielding
1334	 *	dirty_rate = N * task_ratelimit = write_bw		     (6)
1335	 * put (6) into (1) we get
1336	 *	rate_(i+1) = rate_(i)					     (7)
1337	 *
1338	 * So we end up using (2) to always keep
1339	 *	rate_(i+1) ~= (write_bw / N)				     (8)
1340	 * regardless of the value of pos_ratio. As long as (8) is satisfied,
1341	 * pos_ratio is able to drive itself to 1.0, which is not only where
1342	 * the dirty count meet the setpoint, but also where the slope of
1343	 * pos_ratio is most flat and hence task_ratelimit is least fluctuated.
1344	 */
1345	balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
1346					   dirty_rate | 1);
1347	/*
1348	 * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw
1349	 */
1350	if (unlikely(balanced_dirty_ratelimit > write_bw))
1351		balanced_dirty_ratelimit = write_bw;
1352
1353	/*
1354	 * We could safely do this and return immediately:
1355	 *
1356	 *	wb->dirty_ratelimit = balanced_dirty_ratelimit;
1357	 *
1358	 * However to get a more stable dirty_ratelimit, the below elaborated
1359	 * code makes use of task_ratelimit to filter out singular points and
1360	 * limit the step size.
1361	 *
1362	 * The below code essentially only uses the relative value of
1363	 *
1364	 *	task_ratelimit - dirty_ratelimit
1365	 *	= (pos_ratio - 1) * dirty_ratelimit
1366	 *
1367	 * which reflects the direction and size of dirty position error.
1368	 */
1369
1370	/*
1371	 * dirty_ratelimit will follow balanced_dirty_ratelimit iff
1372	 * task_ratelimit is on the same side of dirty_ratelimit, too.
1373	 * For example, when
1374	 * - dirty_ratelimit > balanced_dirty_ratelimit
1375	 * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint)
1376	 * lowering dirty_ratelimit will help meet both the position and rate
1377	 * control targets. Otherwise, don't update dirty_ratelimit if it will
1378	 * only help meet the rate target. After all, what the users ultimately
1379	 * feel and care are stable dirty rate and small position error.
1380	 *
1381	 * |task_ratelimit - dirty_ratelimit| is used to limit the step size
1382	 * and filter out the singular points of balanced_dirty_ratelimit. Which
1383	 * keeps jumping around randomly and can even leap far away at times
1384	 * due to the small 200ms estimation period of dirty_rate (we want to
1385	 * keep that period small to reduce time lags).
1386	 */
1387	step = 0;
1388
1389	/*
1390	 * For strictlimit case, calculations above were based on wb counters
1391	 * and limits (starting from pos_ratio = wb_position_ratio() and up to
1392	 * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate).
1393	 * Hence, to calculate "step" properly, we have to use wb_dirty as
1394	 * "dirty" and wb_setpoint as "setpoint".
1395	 *
1396	 * We rampup dirty_ratelimit forcibly if wb_dirty is low because
1397	 * it's possible that wb_thresh is close to zero due to inactivity
1398	 * of backing device.
1399	 */
1400	if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
1401		dirty = dtc->wb_dirty;
1402		if (dtc->wb_dirty < 8)
1403			setpoint = dtc->wb_dirty + 1;
1404		else
1405			setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2;
1406	}
1407
1408	if (dirty < setpoint) {
1409		x = min3(wb->balanced_dirty_ratelimit,
1410			 balanced_dirty_ratelimit, task_ratelimit);
1411		if (dirty_ratelimit < x)
1412			step = x - dirty_ratelimit;
1413	} else {
1414		x = max3(wb->balanced_dirty_ratelimit,
1415			 balanced_dirty_ratelimit, task_ratelimit);
1416		if (dirty_ratelimit > x)
1417			step = dirty_ratelimit - x;
1418	}
1419
1420	/*
1421	 * Don't pursue 100% rate matching. It's impossible since the balanced
1422	 * rate itself is constantly fluctuating. So decrease the track speed
1423	 * when it gets close to the target. Helps eliminate pointless tremors.
1424	 */
1425	shift = dirty_ratelimit / (2 * step + 1);
1426	if (shift < BITS_PER_LONG)
1427		step = DIV_ROUND_UP(step >> shift, 8);
1428	else
1429		step = 0;
1430
1431	if (dirty_ratelimit < balanced_dirty_ratelimit)
1432		dirty_ratelimit += step;
1433	else
1434		dirty_ratelimit -= step;
1435
1436	WRITE_ONCE(wb->dirty_ratelimit, max(dirty_ratelimit, 1UL));
1437	wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
1438
1439	trace_bdi_dirty_ratelimit(wb, dirty_rate, task_ratelimit);
1440}
1441
1442static void __wb_update_bandwidth(struct dirty_throttle_control *gdtc,
1443				  struct dirty_throttle_control *mdtc,
 
1444				  bool update_ratelimit)
1445{
1446	struct bdi_writeback *wb = gdtc->wb;
1447	unsigned long now = jiffies;
1448	unsigned long elapsed;
1449	unsigned long dirtied;
1450	unsigned long written;
1451
1452	spin_lock(&wb->list_lock);
1453
1454	/*
1455	 * Lockless checks for elapsed time are racy and delayed update after
1456	 * IO completion doesn't do it at all (to make sure written pages are
1457	 * accounted reasonably quickly). Make sure elapsed >= 1 to avoid
1458	 * division errors.
1459	 */
1460	elapsed = max(now - wb->bw_time_stamp, 1UL);
 
 
1461	dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]);
1462	written = percpu_counter_read(&wb->stat[WB_WRITTEN]);
1463
 
 
 
 
 
 
 
1464	if (update_ratelimit) {
1465		domain_update_dirty_limit(gdtc, now);
1466		wb_update_dirty_ratelimit(gdtc, dirtied, elapsed);
1467
1468		/*
1469		 * @mdtc is always NULL if !CGROUP_WRITEBACK but the
1470		 * compiler has no way to figure that out.  Help it.
1471		 */
1472		if (IS_ENABLED(CONFIG_CGROUP_WRITEBACK) && mdtc) {
1473			domain_update_dirty_limit(mdtc, now);
1474			wb_update_dirty_ratelimit(mdtc, dirtied, elapsed);
1475		}
1476	}
1477	wb_update_write_bandwidth(wb, elapsed, written);
1478
 
1479	wb->dirtied_stamp = dirtied;
1480	wb->written_stamp = written;
1481	WRITE_ONCE(wb->bw_time_stamp, now);
1482	spin_unlock(&wb->list_lock);
1483}
1484
1485void wb_update_bandwidth(struct bdi_writeback *wb)
1486{
1487	struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };
1488
1489	__wb_update_bandwidth(&gdtc, NULL, false);
1490}
1491
1492/* Interval after which we consider wb idle and don't estimate bandwidth */
1493#define WB_BANDWIDTH_IDLE_JIF (HZ)
1494
1495static void wb_bandwidth_estimate_start(struct bdi_writeback *wb)
1496{
1497	unsigned long now = jiffies;
1498	unsigned long elapsed = now - READ_ONCE(wb->bw_time_stamp);
1499
1500	if (elapsed > WB_BANDWIDTH_IDLE_JIF &&
1501	    !atomic_read(&wb->writeback_inodes)) {
1502		spin_lock(&wb->list_lock);
1503		wb->dirtied_stamp = wb_stat(wb, WB_DIRTIED);
1504		wb->written_stamp = wb_stat(wb, WB_WRITTEN);
1505		WRITE_ONCE(wb->bw_time_stamp, now);
1506		spin_unlock(&wb->list_lock);
1507	}
1508}
1509
1510/*
1511 * After a task dirtied this many pages, balance_dirty_pages_ratelimited()
1512 * will look to see if it needs to start dirty throttling.
1513 *
1514 * If dirty_poll_interval is too low, big NUMA machines will call the expensive
1515 * global_zone_page_state() too often. So scale it near-sqrt to the safety margin
1516 * (the number of pages we may dirty without exceeding the dirty limits).
1517 */
1518static unsigned long dirty_poll_interval(unsigned long dirty,
1519					 unsigned long thresh)
1520{
1521	if (thresh > dirty)
1522		return 1UL << (ilog2(thresh - dirty) >> 1);
1523
1524	return 1;
1525}
1526
1527static unsigned long wb_max_pause(struct bdi_writeback *wb,
1528				  unsigned long wb_dirty)
1529{
1530	unsigned long bw = READ_ONCE(wb->avg_write_bandwidth);
1531	unsigned long t;
1532
1533	/*
1534	 * Limit pause time for small memory systems. If sleeping for too long
1535	 * time, a small pool of dirty/writeback pages may go empty and disk go
1536	 * idle.
1537	 *
1538	 * 8 serves as the safety ratio.
1539	 */
1540	t = wb_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
1541	t++;
1542
1543	return min_t(unsigned long, t, MAX_PAUSE);
1544}
1545
1546static long wb_min_pause(struct bdi_writeback *wb,
1547			 long max_pause,
1548			 unsigned long task_ratelimit,
1549			 unsigned long dirty_ratelimit,
1550			 int *nr_dirtied_pause)
1551{
1552	long hi = ilog2(READ_ONCE(wb->avg_write_bandwidth));
1553	long lo = ilog2(READ_ONCE(wb->dirty_ratelimit));
1554	long t;		/* target pause */
1555	long pause;	/* estimated next pause */
1556	int pages;	/* target nr_dirtied_pause */
1557
1558	/* target for 10ms pause on 1-dd case */
1559	t = max(1, HZ / 100);
1560
1561	/*
1562	 * Scale up pause time for concurrent dirtiers in order to reduce CPU
1563	 * overheads.
1564	 *
1565	 * (N * 10ms) on 2^N concurrent tasks.
1566	 */
1567	if (hi > lo)
1568		t += (hi - lo) * (10 * HZ) / 1024;
1569
1570	/*
1571	 * This is a bit convoluted. We try to base the next nr_dirtied_pause
1572	 * on the much more stable dirty_ratelimit. However the next pause time
1573	 * will be computed based on task_ratelimit and the two rate limits may
1574	 * depart considerably at some time. Especially if task_ratelimit goes
1575	 * below dirty_ratelimit/2 and the target pause is max_pause, the next
1576	 * pause time will be max_pause*2 _trimmed down_ to max_pause.  As a
1577	 * result task_ratelimit won't be executed faithfully, which could
1578	 * eventually bring down dirty_ratelimit.
1579	 *
1580	 * We apply two rules to fix it up:
1581	 * 1) try to estimate the next pause time and if necessary, use a lower
1582	 *    nr_dirtied_pause so as not to exceed max_pause. When this happens,
1583	 *    nr_dirtied_pause will be "dancing" with task_ratelimit.
1584	 * 2) limit the target pause time to max_pause/2, so that the normal
1585	 *    small fluctuations of task_ratelimit won't trigger rule (1) and
1586	 *    nr_dirtied_pause will remain as stable as dirty_ratelimit.
1587	 */
1588	t = min(t, 1 + max_pause / 2);
1589	pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1590
1591	/*
1592	 * Tiny nr_dirtied_pause is found to hurt I/O performance in the test
1593	 * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}.
1594	 * When the 16 consecutive reads are often interrupted by some dirty
1595	 * throttling pause during the async writes, cfq will go into idles
1596	 * (deadline is fine). So push nr_dirtied_pause as high as possible
1597	 * until reaches DIRTY_POLL_THRESH=32 pages.
1598	 */
1599	if (pages < DIRTY_POLL_THRESH) {
1600		t = max_pause;
1601		pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1602		if (pages > DIRTY_POLL_THRESH) {
1603			pages = DIRTY_POLL_THRESH;
1604			t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit;
1605		}
1606	}
1607
1608	pause = HZ * pages / (task_ratelimit + 1);
1609	if (pause > max_pause) {
1610		t = max_pause;
1611		pages = task_ratelimit * t / roundup_pow_of_two(HZ);
1612	}
1613
1614	*nr_dirtied_pause = pages;
1615	/*
1616	 * The minimal pause time will normally be half the target pause time.
1617	 */
1618	return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
1619}
1620
1621static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
1622{
1623	struct bdi_writeback *wb = dtc->wb;
1624	unsigned long wb_reclaimable;
1625
1626	/*
1627	 * wb_thresh is not treated as some limiting factor as
1628	 * dirty_thresh, due to reasons
1629	 * - in JBOD setup, wb_thresh can fluctuate a lot
1630	 * - in a system with HDD and USB key, the USB key may somehow
1631	 *   go into state (wb_dirty >> wb_thresh) either because
1632	 *   wb_dirty starts high, or because wb_thresh drops low.
1633	 *   In this case we don't want to hard throttle the USB key
1634	 *   dirtiers for 100 seconds until wb_dirty drops under
1635	 *   wb_thresh. Instead the auxiliary wb control line in
1636	 *   wb_position_ratio() will let the dirtier task progress
1637	 *   at some rate <= (write_bw / 2) for bringing down wb_dirty.
1638	 */
1639	dtc->wb_thresh = __wb_calc_thresh(dtc);
1640	dtc->wb_bg_thresh = dtc->thresh ?
1641		div64_u64(dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
1642
1643	/*
1644	 * In order to avoid the stacked BDI deadlock we need
1645	 * to ensure we accurately count the 'dirty' pages when
1646	 * the threshold is low.
1647	 *
1648	 * Otherwise it would be possible to get thresh+n pages
1649	 * reported dirty, even though there are thresh-m pages
1650	 * actually dirty; with m+n sitting in the percpu
1651	 * deltas.
1652	 */
1653	if (dtc->wb_thresh < 2 * wb_stat_error()) {
1654		wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
1655		dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK);
1656	} else {
1657		wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE);
1658		dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK);
1659	}
1660}
1661
1662/*
1663 * balance_dirty_pages() must be called by processes which are generating dirty
1664 * data.  It looks at the number of dirty pages in the machine and will force
1665 * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2.
1666 * If we're over `background_thresh' then the writeback threads are woken to
1667 * perform some writeout.
1668 */
1669static int balance_dirty_pages(struct bdi_writeback *wb,
1670			       unsigned long pages_dirtied, unsigned int flags)
 
1671{
1672	struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
1673	struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
1674	struct dirty_throttle_control * const gdtc = &gdtc_stor;
1675	struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
1676						     &mdtc_stor : NULL;
1677	struct dirty_throttle_control *sdtc;
1678	unsigned long nr_reclaimable;	/* = file_dirty */
1679	long period;
1680	long pause;
1681	long max_pause;
1682	long min_pause;
1683	int nr_dirtied_pause;
1684	bool dirty_exceeded = false;
1685	unsigned long task_ratelimit;
1686	unsigned long dirty_ratelimit;
1687	struct backing_dev_info *bdi = wb->bdi;
1688	bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT;
1689	unsigned long start_time = jiffies;
1690	int ret = 0;
1691
1692	for (;;) {
1693		unsigned long now = jiffies;
1694		unsigned long dirty, thresh, bg_thresh;
1695		unsigned long m_dirty = 0;	/* stop bogus uninit warnings */
1696		unsigned long m_thresh = 0;
1697		unsigned long m_bg_thresh = 0;
1698
1699		nr_reclaimable = global_node_page_state(NR_FILE_DIRTY);
 
 
 
 
 
 
 
1700		gdtc->avail = global_dirtyable_memory();
1701		gdtc->dirty = nr_reclaimable + global_node_page_state(NR_WRITEBACK);
1702
1703		domain_dirty_limits(gdtc);
1704
1705		if (unlikely(strictlimit)) {
1706			wb_dirty_limits(gdtc);
1707
1708			dirty = gdtc->wb_dirty;
1709			thresh = gdtc->wb_thresh;
1710			bg_thresh = gdtc->wb_bg_thresh;
1711		} else {
1712			dirty = gdtc->dirty;
1713			thresh = gdtc->thresh;
1714			bg_thresh = gdtc->bg_thresh;
1715		}
1716
1717		if (mdtc) {
1718			unsigned long filepages, headroom, writeback;
1719
1720			/*
1721			 * If @wb belongs to !root memcg, repeat the same
1722			 * basic calculations for the memcg domain.
1723			 */
1724			mem_cgroup_wb_stats(wb, &filepages, &headroom,
1725					    &mdtc->dirty, &writeback);
1726			mdtc->dirty += writeback;
1727			mdtc_calc_avail(mdtc, filepages, headroom);
1728
1729			domain_dirty_limits(mdtc);
1730
1731			if (unlikely(strictlimit)) {
1732				wb_dirty_limits(mdtc);
1733				m_dirty = mdtc->wb_dirty;
1734				m_thresh = mdtc->wb_thresh;
1735				m_bg_thresh = mdtc->wb_bg_thresh;
1736			} else {
1737				m_dirty = mdtc->dirty;
1738				m_thresh = mdtc->thresh;
1739				m_bg_thresh = mdtc->bg_thresh;
1740			}
1741		}
1742
1743		/*
1744		 * In laptop mode, we wait until hitting the higher threshold
1745		 * before starting background writeout, and then write out all
1746		 * the way down to the lower threshold.  So slow writers cause
1747		 * minimal disk activity.
1748		 *
1749		 * In normal mode, we start background writeout at the lower
1750		 * background_thresh, to keep the amount of dirty memory low.
1751		 */
1752		if (!laptop_mode && nr_reclaimable > gdtc->bg_thresh &&
1753		    !writeback_in_progress(wb))
1754			wb_start_background_writeback(wb);
1755
1756		/*
1757		 * Throttle it only when the background writeback cannot
1758		 * catch-up. This avoids (excessively) small writeouts
1759		 * when the wb limits are ramping up in case of !strictlimit.
1760		 *
1761		 * In strictlimit case make decision based on the wb counters
1762		 * and limits. Small writeouts when the wb limits are ramping
1763		 * up are the price we consciously pay for strictlimit-ing.
1764		 *
1765		 * If memcg domain is in effect, @dirty should be under
1766		 * both global and memcg freerun ceilings.
1767		 */
1768		if (dirty <= dirty_freerun_ceiling(thresh, bg_thresh) &&
1769		    (!mdtc ||
1770		     m_dirty <= dirty_freerun_ceiling(m_thresh, m_bg_thresh))) {
1771			unsigned long intv;
1772			unsigned long m_intv;
1773
1774free_running:
1775			intv = dirty_poll_interval(dirty, thresh);
1776			m_intv = ULONG_MAX;
1777
1778			current->dirty_paused_when = now;
1779			current->nr_dirtied = 0;
1780			if (mdtc)
1781				m_intv = dirty_poll_interval(m_dirty, m_thresh);
1782			current->nr_dirtied_pause = min(intv, m_intv);
1783			break;
1784		}
1785
1786		/* Start writeback even when in laptop mode */
1787		if (unlikely(!writeback_in_progress(wb)))
1788			wb_start_background_writeback(wb);
1789
1790		mem_cgroup_flush_foreign(wb);
1791
1792		/*
1793		 * Calculate global domain's pos_ratio and select the
1794		 * global dtc by default.
1795		 */
1796		if (!strictlimit) {
1797			wb_dirty_limits(gdtc);
1798
1799			if ((current->flags & PF_LOCAL_THROTTLE) &&
1800			    gdtc->wb_dirty <
1801			    dirty_freerun_ceiling(gdtc->wb_thresh,
1802						  gdtc->wb_bg_thresh))
1803				/*
1804				 * LOCAL_THROTTLE tasks must not be throttled
1805				 * when below the per-wb freerun ceiling.
1806				 */
1807				goto free_running;
1808		}
1809
1810		dirty_exceeded = (gdtc->wb_dirty > gdtc->wb_thresh) &&
1811			((gdtc->dirty > gdtc->thresh) || strictlimit);
1812
1813		wb_position_ratio(gdtc);
1814		sdtc = gdtc;
1815
1816		if (mdtc) {
1817			/*
1818			 * If memcg domain is in effect, calculate its
1819			 * pos_ratio.  @wb should satisfy constraints from
1820			 * both global and memcg domains.  Choose the one
1821			 * w/ lower pos_ratio.
1822			 */
1823			if (!strictlimit) {
1824				wb_dirty_limits(mdtc);
1825
1826				if ((current->flags & PF_LOCAL_THROTTLE) &&
1827				    mdtc->wb_dirty <
1828				    dirty_freerun_ceiling(mdtc->wb_thresh,
1829							  mdtc->wb_bg_thresh))
1830					/*
1831					 * LOCAL_THROTTLE tasks must not be
1832					 * throttled when below the per-wb
1833					 * freerun ceiling.
1834					 */
1835					goto free_running;
1836			}
1837			dirty_exceeded |= (mdtc->wb_dirty > mdtc->wb_thresh) &&
1838				((mdtc->dirty > mdtc->thresh) || strictlimit);
1839
1840			wb_position_ratio(mdtc);
1841			if (mdtc->pos_ratio < gdtc->pos_ratio)
1842				sdtc = mdtc;
1843		}
1844
1845		if (dirty_exceeded != wb->dirty_exceeded)
1846			wb->dirty_exceeded = dirty_exceeded;
1847
1848		if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
1849					   BANDWIDTH_INTERVAL))
1850			__wb_update_bandwidth(gdtc, mdtc, true);
 
 
 
1851
1852		/* throttle according to the chosen dtc */
1853		dirty_ratelimit = READ_ONCE(wb->dirty_ratelimit);
1854		task_ratelimit = ((u64)dirty_ratelimit * sdtc->pos_ratio) >>
1855							RATELIMIT_CALC_SHIFT;
1856		max_pause = wb_max_pause(wb, sdtc->wb_dirty);
1857		min_pause = wb_min_pause(wb, max_pause,
1858					 task_ratelimit, dirty_ratelimit,
1859					 &nr_dirtied_pause);
1860
1861		if (unlikely(task_ratelimit == 0)) {
1862			period = max_pause;
1863			pause = max_pause;
1864			goto pause;
1865		}
1866		period = HZ * pages_dirtied / task_ratelimit;
1867		pause = period;
1868		if (current->dirty_paused_when)
1869			pause -= now - current->dirty_paused_when;
1870		/*
1871		 * For less than 1s think time (ext3/4 may block the dirtier
1872		 * for up to 800ms from time to time on 1-HDD; so does xfs,
1873		 * however at much less frequency), try to compensate it in
1874		 * future periods by updating the virtual time; otherwise just
1875		 * do a reset, as it may be a light dirtier.
1876		 */
1877		if (pause < min_pause) {
1878			trace_balance_dirty_pages(wb,
1879						  sdtc->thresh,
1880						  sdtc->bg_thresh,
1881						  sdtc->dirty,
1882						  sdtc->wb_thresh,
1883						  sdtc->wb_dirty,
1884						  dirty_ratelimit,
1885						  task_ratelimit,
1886						  pages_dirtied,
1887						  period,
1888						  min(pause, 0L),
1889						  start_time);
1890			if (pause < -HZ) {
1891				current->dirty_paused_when = now;
1892				current->nr_dirtied = 0;
1893			} else if (period) {
1894				current->dirty_paused_when += period;
1895				current->nr_dirtied = 0;
1896			} else if (current->nr_dirtied_pause <= pages_dirtied)
1897				current->nr_dirtied_pause += pages_dirtied;
1898			break;
1899		}
1900		if (unlikely(pause > max_pause)) {
1901			/* for occasional dropped task_ratelimit */
1902			now += min(pause - max_pause, max_pause);
1903			pause = max_pause;
1904		}
1905
1906pause:
1907		trace_balance_dirty_pages(wb,
1908					  sdtc->thresh,
1909					  sdtc->bg_thresh,
1910					  sdtc->dirty,
1911					  sdtc->wb_thresh,
1912					  sdtc->wb_dirty,
1913					  dirty_ratelimit,
1914					  task_ratelimit,
1915					  pages_dirtied,
1916					  period,
1917					  pause,
1918					  start_time);
1919		if (flags & BDP_ASYNC) {
1920			ret = -EAGAIN;
1921			break;
1922		}
1923		__set_current_state(TASK_KILLABLE);
1924		bdi->last_bdp_sleep = jiffies;
1925		io_schedule_timeout(pause);
1926
1927		current->dirty_paused_when = now + pause;
1928		current->nr_dirtied = 0;
1929		current->nr_dirtied_pause = nr_dirtied_pause;
1930
1931		/*
1932		 * This is typically equal to (dirty < thresh) and can also
1933		 * keep "1000+ dd on a slow USB stick" under control.
1934		 */
1935		if (task_ratelimit)
1936			break;
1937
1938		/*
1939		 * In the case of an unresponsive NFS server and the NFS dirty
1940		 * pages exceeds dirty_thresh, give the other good wb's a pipe
1941		 * to go through, so that tasks on them still remain responsive.
1942		 *
1943		 * In theory 1 page is enough to keep the consumer-producer
1944		 * pipe going: the flusher cleans 1 page => the task dirties 1
1945		 * more page. However wb_dirty has accounting errors.  So use
1946		 * the larger and more IO friendly wb_stat_error.
1947		 */
1948		if (sdtc->wb_dirty <= wb_stat_error())
1949			break;
1950
1951		if (fatal_signal_pending(current))
1952			break;
1953	}
1954	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1955}
1956
1957static DEFINE_PER_CPU(int, bdp_ratelimits);
1958
1959/*
1960 * Normal tasks are throttled by
1961 *	loop {
1962 *		dirty tsk->nr_dirtied_pause pages;
1963 *		take a snap in balance_dirty_pages();
1964 *	}
1965 * However there is a worst case. If every task exit immediately when dirtied
1966 * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
1967 * called to throttle the page dirties. The solution is to save the not yet
1968 * throttled page dirties in dirty_throttle_leaks on task exit and charge them
1969 * randomly into the running tasks. This works well for the above worst case,
1970 * as the new task will pick up and accumulate the old task's leaked dirty
1971 * count and eventually get throttled.
1972 */
1973DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
1974
1975/**
1976 * balance_dirty_pages_ratelimited_flags - Balance dirty memory state.
1977 * @mapping: address_space which was dirtied.
1978 * @flags: BDP flags.
1979 *
1980 * Processes which are dirtying memory should call in here once for each page
1981 * which was newly dirtied.  The function will periodically check the system's
1982 * dirty state and will initiate writeback if needed.
1983 *
1984 * See balance_dirty_pages_ratelimited() for details.
1985 *
1986 * Return: If @flags contains BDP_ASYNC, it may return -EAGAIN to
1987 * indicate that memory is out of balance and the caller must wait
1988 * for I/O to complete.  Otherwise, it will return 0 to indicate
1989 * that either memory was already in balance, or it was able to sleep
1990 * until the amount of dirty memory returned to balance.
1991 */
1992int balance_dirty_pages_ratelimited_flags(struct address_space *mapping,
1993					unsigned int flags)
1994{
1995	struct inode *inode = mapping->host;
1996	struct backing_dev_info *bdi = inode_to_bdi(inode);
1997	struct bdi_writeback *wb = NULL;
1998	int ratelimit;
1999	int ret = 0;
2000	int *p;
2001
2002	if (!(bdi->capabilities & BDI_CAP_WRITEBACK))
2003		return ret;
2004
2005	if (inode_cgwb_enabled(inode))
2006		wb = wb_get_create_current(bdi, GFP_KERNEL);
2007	if (!wb)
2008		wb = &bdi->wb;
2009
2010	ratelimit = current->nr_dirtied_pause;
2011	if (wb->dirty_exceeded)
2012		ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));
2013
2014	preempt_disable();
2015	/*
2016	 * This prevents one CPU to accumulate too many dirtied pages without
2017	 * calling into balance_dirty_pages(), which can happen when there are
2018	 * 1000+ tasks, all of them start dirtying pages at exactly the same
2019	 * time, hence all honoured too large initial task->nr_dirtied_pause.
2020	 */
2021	p =  this_cpu_ptr(&bdp_ratelimits);
2022	if (unlikely(current->nr_dirtied >= ratelimit))
2023		*p = 0;
2024	else if (unlikely(*p >= ratelimit_pages)) {
2025		*p = 0;
2026		ratelimit = 0;
2027	}
2028	/*
2029	 * Pick up the dirtied pages by the exited tasks. This avoids lots of
2030	 * short-lived tasks (eg. gcc invocations in a kernel build) escaping
2031	 * the dirty throttling and livelock other long-run dirtiers.
2032	 */
2033	p = this_cpu_ptr(&dirty_throttle_leaks);
2034	if (*p > 0 && current->nr_dirtied < ratelimit) {
2035		unsigned long nr_pages_dirtied;
2036		nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
2037		*p -= nr_pages_dirtied;
2038		current->nr_dirtied += nr_pages_dirtied;
2039	}
2040	preempt_enable();
2041
2042	if (unlikely(current->nr_dirtied >= ratelimit))
2043		ret = balance_dirty_pages(wb, current->nr_dirtied, flags);
2044
2045	wb_put(wb);
2046	return ret;
2047}
2048EXPORT_SYMBOL_GPL(balance_dirty_pages_ratelimited_flags);
2049
2050/**
2051 * balance_dirty_pages_ratelimited - balance dirty memory state.
2052 * @mapping: address_space which was dirtied.
2053 *
2054 * Processes which are dirtying memory should call in here once for each page
2055 * which was newly dirtied.  The function will periodically check the system's
2056 * dirty state and will initiate writeback if needed.
2057 *
2058 * Once we're over the dirty memory limit we decrease the ratelimiting
2059 * by a lot, to prevent individual processes from overshooting the limit
2060 * by (ratelimit_pages) each.
2061 */
2062void balance_dirty_pages_ratelimited(struct address_space *mapping)
2063{
2064	balance_dirty_pages_ratelimited_flags(mapping, 0);
2065}
2066EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
2067
2068/**
2069 * wb_over_bg_thresh - does @wb need to be written back?
2070 * @wb: bdi_writeback of interest
2071 *
2072 * Determines whether background writeback should keep writing @wb or it's
2073 * clean enough.
2074 *
2075 * Return: %true if writeback should continue.
2076 */
2077bool wb_over_bg_thresh(struct bdi_writeback *wb)
2078{
2079	struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
2080	struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
2081	struct dirty_throttle_control * const gdtc = &gdtc_stor;
2082	struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
2083						     &mdtc_stor : NULL;
2084	unsigned long reclaimable;
2085	unsigned long thresh;
2086
2087	/*
2088	 * Similar to balance_dirty_pages() but ignores pages being written
2089	 * as we're trying to decide whether to put more under writeback.
2090	 */
2091	gdtc->avail = global_dirtyable_memory();
2092	gdtc->dirty = global_node_page_state(NR_FILE_DIRTY);
 
2093	domain_dirty_limits(gdtc);
2094
2095	if (gdtc->dirty > gdtc->bg_thresh)
2096		return true;
2097
2098	thresh = wb_calc_thresh(gdtc->wb, gdtc->bg_thresh);
2099	if (thresh < 2 * wb_stat_error())
2100		reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
2101	else
2102		reclaimable = wb_stat(wb, WB_RECLAIMABLE);
2103
2104	if (reclaimable > thresh)
2105		return true;
2106
2107	if (mdtc) {
2108		unsigned long filepages, headroom, writeback;
2109
2110		mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty,
2111				    &writeback);
2112		mdtc_calc_avail(mdtc, filepages, headroom);
2113		domain_dirty_limits(mdtc);	/* ditto, ignore writeback */
2114
2115		if (mdtc->dirty > mdtc->bg_thresh)
2116			return true;
2117
2118		thresh = wb_calc_thresh(mdtc->wb, mdtc->bg_thresh);
2119		if (thresh < 2 * wb_stat_error())
2120			reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
2121		else
2122			reclaimable = wb_stat(wb, WB_RECLAIMABLE);
2123
2124		if (reclaimable > thresh)
2125			return true;
2126	}
2127
2128	return false;
2129}
2130
2131#ifdef CONFIG_SYSCTL
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2132/*
2133 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
2134 */
2135static int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
2136		void *buffer, size_t *length, loff_t *ppos)
2137{
2138	unsigned int old_interval = dirty_writeback_interval;
2139	int ret;
 
2140
2141	ret = proc_dointvec(table, write, buffer, length, ppos);
 
 
 
 
 
 
2142
2143	/*
2144	 * Writing 0 to dirty_writeback_interval will disable periodic writeback
2145	 * and a different non-zero value will wakeup the writeback threads.
2146	 * wb_wakeup_delayed() would be more appropriate, but it's a pain to
2147	 * iterate over all bdis and wbs.
2148	 * The reason we do this is to make the change take effect immediately.
2149	 */
2150	if (!ret && write && dirty_writeback_interval &&
2151		dirty_writeback_interval != old_interval)
2152		wakeup_flusher_threads(WB_REASON_PERIODIC);
2153
2154	return ret;
2155}
2156#endif
2157
2158void laptop_mode_timer_fn(struct timer_list *t)
2159{
2160	struct backing_dev_info *backing_dev_info =
2161		from_timer(backing_dev_info, t, laptop_mode_wb_timer);
2162
2163	wakeup_flusher_threads_bdi(backing_dev_info, WB_REASON_LAPTOP_TIMER);
2164}
2165
2166/*
2167 * We've spun up the disk and we're in laptop mode: schedule writeback
2168 * of all dirty data a few seconds from now.  If the flush is already scheduled
2169 * then push it back - the user is still using the disk.
2170 */
2171void laptop_io_completion(struct backing_dev_info *info)
2172{
2173	mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
2174}
2175
2176/*
2177 * We're in laptop mode and we've just synced. The sync's writes will have
2178 * caused another writeback to be scheduled by laptop_io_completion.
2179 * Nothing needs to be written back anymore, so we unschedule the writeback.
2180 */
2181void laptop_sync_completion(void)
2182{
2183	struct backing_dev_info *bdi;
2184
2185	rcu_read_lock();
2186
2187	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
2188		del_timer(&bdi->laptop_mode_wb_timer);
2189
2190	rcu_read_unlock();
2191}
 
2192
2193/*
2194 * If ratelimit_pages is too high then we can get into dirty-data overload
2195 * if a large number of processes all perform writes at the same time.
 
 
2196 *
2197 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
2198 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
2199 * thresholds.
2200 */
2201
2202void writeback_set_ratelimit(void)
2203{
2204	struct wb_domain *dom = &global_wb_domain;
2205	unsigned long background_thresh;
2206	unsigned long dirty_thresh;
2207
2208	global_dirty_limits(&background_thresh, &dirty_thresh);
2209	dom->dirty_limit = dirty_thresh;
2210	ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
2211	if (ratelimit_pages < 16)
2212		ratelimit_pages = 16;
2213}
2214
2215static int page_writeback_cpu_online(unsigned int cpu)
 
 
2216{
2217	writeback_set_ratelimit();
2218	return 0;
 
 
 
 
 
 
 
2219}
2220
2221#ifdef CONFIG_SYSCTL
2222
2223/* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
2224static const unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
2225
2226static struct ctl_table vm_page_writeback_sysctls[] = {
2227	{
2228		.procname   = "dirty_background_ratio",
2229		.data       = &dirty_background_ratio,
2230		.maxlen     = sizeof(dirty_background_ratio),
2231		.mode       = 0644,
2232		.proc_handler   = dirty_background_ratio_handler,
2233		.extra1     = SYSCTL_ZERO,
2234		.extra2     = SYSCTL_ONE_HUNDRED,
2235	},
2236	{
2237		.procname   = "dirty_background_bytes",
2238		.data       = &dirty_background_bytes,
2239		.maxlen     = sizeof(dirty_background_bytes),
2240		.mode       = 0644,
2241		.proc_handler   = dirty_background_bytes_handler,
2242		.extra1     = SYSCTL_LONG_ONE,
2243	},
2244	{
2245		.procname   = "dirty_ratio",
2246		.data       = &vm_dirty_ratio,
2247		.maxlen     = sizeof(vm_dirty_ratio),
2248		.mode       = 0644,
2249		.proc_handler   = dirty_ratio_handler,
2250		.extra1     = SYSCTL_ZERO,
2251		.extra2     = SYSCTL_ONE_HUNDRED,
2252	},
2253	{
2254		.procname   = "dirty_bytes",
2255		.data       = &vm_dirty_bytes,
2256		.maxlen     = sizeof(vm_dirty_bytes),
2257		.mode       = 0644,
2258		.proc_handler   = dirty_bytes_handler,
2259		.extra1     = (void *)&dirty_bytes_min,
2260	},
2261	{
2262		.procname   = "dirty_writeback_centisecs",
2263		.data       = &dirty_writeback_interval,
2264		.maxlen     = sizeof(dirty_writeback_interval),
2265		.mode       = 0644,
2266		.proc_handler   = dirty_writeback_centisecs_handler,
2267	},
2268	{
2269		.procname   = "dirty_expire_centisecs",
2270		.data       = &dirty_expire_interval,
2271		.maxlen     = sizeof(dirty_expire_interval),
2272		.mode       = 0644,
2273		.proc_handler   = proc_dointvec_minmax,
2274		.extra1     = SYSCTL_ZERO,
2275	},
2276#ifdef CONFIG_HIGHMEM
2277	{
2278		.procname	= "highmem_is_dirtyable",
2279		.data		= &vm_highmem_is_dirtyable,
2280		.maxlen		= sizeof(vm_highmem_is_dirtyable),
2281		.mode		= 0644,
2282		.proc_handler	= proc_dointvec_minmax,
2283		.extra1		= SYSCTL_ZERO,
2284		.extra2		= SYSCTL_ONE,
2285	},
2286#endif
2287	{
2288		.procname	= "laptop_mode",
2289		.data		= &laptop_mode,
2290		.maxlen		= sizeof(laptop_mode),
2291		.mode		= 0644,
2292		.proc_handler	= proc_dointvec_jiffies,
2293	},
2294	{}
2295};
2296#endif
2297
2298/*
2299 * Called early on to tune the page writeback dirty limits.
2300 *
2301 * We used to scale dirty pages according to how total memory
2302 * related to pages that could be allocated for buffers.
 
2303 *
2304 * However, that was when we used "dirty_ratio" to scale with
2305 * all memory, and we don't do that any more. "dirty_ratio"
2306 * is now applied to total non-HIGHPAGE memory, and as such we can't
 
2307 * get into the old insane situation any more where we had
2308 * large amounts of dirty pages compared to a small amount of
2309 * non-HIGHMEM memory.
2310 *
2311 * But we might still want to scale the dirty_ratio by how
2312 * much memory the box has..
2313 */
2314void __init page_writeback_init(void)
2315{
2316	BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
2317
2318	cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/writeback:online",
2319			  page_writeback_cpu_online, NULL);
2320	cpuhp_setup_state(CPUHP_MM_WRITEBACK_DEAD, "mm/writeback:dead", NULL,
2321			  page_writeback_cpu_online);
2322#ifdef CONFIG_SYSCTL
2323	register_sysctl_init("vm", vm_page_writeback_sysctls);
2324#endif
2325}
2326
2327/**
2328 * tag_pages_for_writeback - tag pages to be written by write_cache_pages
2329 * @mapping: address space structure to write
2330 * @start: starting page index
2331 * @end: ending page index (inclusive)
2332 *
2333 * This function scans the page range from @start to @end (inclusive) and tags
2334 * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is
2335 * that write_cache_pages (or whoever calls this function) will then use
2336 * TOWRITE tag to identify pages eligible for writeback.  This mechanism is
2337 * used to avoid livelocking of writeback by a process steadily creating new
2338 * dirty pages in the file (thus it is important for this function to be quick
2339 * so that it can tag pages faster than a dirtying process can create them).
2340 */
 
 
 
2341void tag_pages_for_writeback(struct address_space *mapping,
2342			     pgoff_t start, pgoff_t end)
2343{
2344	XA_STATE(xas, &mapping->i_pages, start);
2345	unsigned int tagged = 0;
2346	void *page;
2347
2348	xas_lock_irq(&xas);
2349	xas_for_each_marked(&xas, page, end, PAGECACHE_TAG_DIRTY) {
2350		xas_set_mark(&xas, PAGECACHE_TAG_TOWRITE);
2351		if (++tagged % XA_CHECK_SCHED)
2352			continue;
2353
2354		xas_pause(&xas);
2355		xas_unlock_irq(&xas);
 
 
 
 
 
2356		cond_resched();
2357		xas_lock_irq(&xas);
2358	}
2359	xas_unlock_irq(&xas);
2360}
2361EXPORT_SYMBOL(tag_pages_for_writeback);
2362
2363/**
2364 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2365 * @mapping: address space structure to write
2366 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2367 * @writepage: function called for each page
2368 * @data: data passed to writepage function
2369 *
2370 * If a page is already under I/O, write_cache_pages() skips it, even
2371 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
2372 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
2373 * and msync() need to guarantee that all the data which was dirty at the time
2374 * the call was made get new I/O started against them.  If wbc->sync_mode is
2375 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2376 * existing IO to complete.
2377 *
2378 * To avoid livelocks (when other process dirties new pages), we first tag
2379 * pages which should be written back with TOWRITE tag and only then start
2380 * writing them. For data-integrity sync we have to be careful so that we do
2381 * not miss some pages (e.g., because some other process has cleared TOWRITE
2382 * tag we set). The rule we follow is that TOWRITE tag can be cleared only
2383 * by the process clearing the DIRTY tag (and submitting the page for IO).
2384 *
2385 * To avoid deadlocks between range_cyclic writeback and callers that hold
2386 * pages in PageWriteback to aggregate IO until write_cache_pages() returns,
2387 * we do not loop back to the start of the file. Doing so causes a page
2388 * lock/page writeback access order inversion - we should only ever lock
2389 * multiple pages in ascending page->index order, and looping back to the start
2390 * of the file violates that rule and causes deadlocks.
2391 *
2392 * Return: %0 on success, negative error code otherwise
2393 */
2394int write_cache_pages(struct address_space *mapping,
2395		      struct writeback_control *wbc, writepage_t writepage,
2396		      void *data)
2397{
2398	int ret = 0;
2399	int done = 0;
2400	int error;
2401	struct folio_batch fbatch;
2402	int nr_folios;
2403	pgoff_t index;
2404	pgoff_t end;		/* Inclusive */
2405	pgoff_t done_index;
 
2406	int range_whole = 0;
2407	xa_mark_t tag;
2408
2409	folio_batch_init(&fbatch);
2410	if (wbc->range_cyclic) {
2411		index = mapping->writeback_index; /* prev offset */
 
 
 
 
 
2412		end = -1;
2413	} else {
2414		index = wbc->range_start >> PAGE_SHIFT;
2415		end = wbc->range_end >> PAGE_SHIFT;
2416		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2417			range_whole = 1;
 
2418	}
2419	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) {
2420		tag_pages_for_writeback(mapping, index, end);
2421		tag = PAGECACHE_TAG_TOWRITE;
2422	} else {
2423		tag = PAGECACHE_TAG_DIRTY;
2424	}
 
 
2425	done_index = index;
2426	while (!done && (index <= end)) {
2427		int i;
2428
2429		nr_folios = filemap_get_folios_tag(mapping, &index, end,
2430				tag, &fbatch);
 
 
2431
2432		if (nr_folios == 0)
2433			break;
2434
2435		for (i = 0; i < nr_folios; i++) {
2436			struct folio *folio = fbatch.folios[i];
2437			unsigned long nr;
 
 
 
 
 
 
 
 
 
 
 
 
2438
2439			done_index = folio->index;
2440
2441			folio_lock(folio);
2442
2443			/*
2444			 * Page truncated or invalidated. We can freely skip it
2445			 * then, even for data integrity operations: the page
2446			 * has disappeared concurrently, so there could be no
2447			 * real expectation of this data integrity operation
2448			 * even if there is now a new, dirty page at the same
2449			 * pagecache address.
2450			 */
2451			if (unlikely(folio->mapping != mapping)) {
2452continue_unlock:
2453				folio_unlock(folio);
2454				continue;
2455			}
2456
2457			if (!folio_test_dirty(folio)) {
2458				/* someone wrote it for us */
2459				goto continue_unlock;
2460			}
2461
2462			if (folio_test_writeback(folio)) {
2463				if (wbc->sync_mode != WB_SYNC_NONE)
2464					folio_wait_writeback(folio);
2465				else
2466					goto continue_unlock;
2467			}
2468
2469			BUG_ON(folio_test_writeback(folio));
2470			if (!folio_clear_dirty_for_io(folio))
2471				goto continue_unlock;
2472
2473			trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
2474			error = writepage(folio, wbc, data);
2475			nr = folio_nr_pages(folio);
2476			if (unlikely(error)) {
2477				/*
2478				 * Handle errors according to the type of
2479				 * writeback. There's no need to continue for
2480				 * background writeback. Just push done_index
2481				 * past this page so media errors won't choke
2482				 * writeout for the entire file. For integrity
2483				 * writeback, we must process the entire dirty
2484				 * set regardless of errors because the fs may
2485				 * still have state to clear for each page. In
2486				 * that case we continue processing and return
2487				 * the first error.
2488				 */
2489				if (error == AOP_WRITEPAGE_ACTIVATE) {
2490					folio_unlock(folio);
2491					error = 0;
2492				} else if (wbc->sync_mode != WB_SYNC_ALL) {
2493					ret = error;
2494					done_index = folio->index + nr;
2495					done = 1;
2496					break;
2497				}
2498				if (!ret)
2499					ret = error;
2500			}
2501
2502			/*
2503			 * We stop writing back only if we are not doing
2504			 * integrity sync. In case of integrity sync we have to
2505			 * keep going until we have written all the pages
2506			 * we tagged for writeback prior to entering this loop.
2507			 */
2508			wbc->nr_to_write -= nr;
2509			if (wbc->nr_to_write <= 0 &&
2510			    wbc->sync_mode == WB_SYNC_NONE) {
2511				done = 1;
2512				break;
2513			}
2514		}
2515		folio_batch_release(&fbatch);
2516		cond_resched();
2517	}
2518
2519	/*
2520	 * If we hit the last page and there is more work to be done: wrap
2521	 * back the index back to the start of the file for the next
2522	 * time we are called.
2523	 */
2524	if (wbc->range_cyclic && !done)
2525		done_index = 0;
 
 
 
2526	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2527		mapping->writeback_index = done_index;
2528
2529	return ret;
2530}
2531EXPORT_SYMBOL(write_cache_pages);
2532
2533static int writepage_cb(struct folio *folio, struct writeback_control *wbc,
2534		void *data)
 
 
 
 
2535{
2536	struct address_space *mapping = data;
2537	int ret = mapping->a_ops->writepage(&folio->page, wbc);
2538	mapping_set_error(mapping, ret);
2539	return ret;
2540}
2541
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2542int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
2543{
2544	int ret;
2545	struct bdi_writeback *wb;
2546
2547	if (wbc->nr_to_write <= 0)
2548		return 0;
2549	wb = inode_to_wb_wbc(mapping->host, wbc);
2550	wb_bandwidth_estimate_start(wb);
2551	while (1) {
2552		if (mapping->a_ops->writepages) {
2553			ret = mapping->a_ops->writepages(mapping, wbc);
2554		} else if (mapping->a_ops->writepage) {
2555			struct blk_plug plug;
2556
2557			blk_start_plug(&plug);
2558			ret = write_cache_pages(mapping, wbc, writepage_cb,
2559						mapping);
2560			blk_finish_plug(&plug);
2561		} else {
2562			/* deal with chardevs and other special files */
2563			ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2564		}
2565		if (ret != -ENOMEM || wbc->sync_mode != WB_SYNC_ALL)
2566			break;
2567
2568		/*
2569		 * Lacking an allocation context or the locality or writeback
2570		 * state of any of the inode's pages, throttle based on
2571		 * writeback activity on the local node. It's as good a
2572		 * guess as any.
2573		 */
2574		reclaim_throttle(NODE_DATA(numa_node_id()),
2575			VMSCAN_THROTTLE_WRITEBACK);
2576	}
2577	/*
2578	 * Usually few pages are written by now from those we've just submitted
2579	 * but if there's constant writeback being submitted, this makes sure
2580	 * writeback bandwidth is updated once in a while.
2581	 */
2582	if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
2583				   BANDWIDTH_INTERVAL))
2584		wb_update_bandwidth(wb);
2585	return ret;
2586}
 
2587
2588/*
2589 * For address_spaces which do not use buffers nor write back.
2590 */
2591bool noop_dirty_folio(struct address_space *mapping, struct folio *folio)
2592{
2593	if (!folio_test_dirty(folio))
2594		return !folio_test_set_dirty(folio);
2595	return false;
2596}
2597EXPORT_SYMBOL(noop_dirty_folio);
2598
2599/*
2600 * Helper function for set_page_dirty family.
2601 *
2602 * Caller must hold folio_memcg_lock().
2603 *
2604 * NOTE: This relies on being atomic wrt interrupts.
2605 */
2606static void folio_account_dirtied(struct folio *folio,
2607		struct address_space *mapping)
2608{
2609	struct inode *inode = mapping->host;
2610
2611	trace_writeback_dirty_folio(folio, mapping);
2612
2613	if (mapping_can_writeback(mapping)) {
2614		struct bdi_writeback *wb;
2615		long nr = folio_nr_pages(folio);
2616
2617		inode_attach_wb(inode, folio);
2618		wb = inode_to_wb(inode);
2619
2620		__lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, nr);
2621		__zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
2622		__node_stat_mod_folio(folio, NR_DIRTIED, nr);
2623		wb_stat_mod(wb, WB_RECLAIMABLE, nr);
2624		wb_stat_mod(wb, WB_DIRTIED, nr);
2625		task_io_account_write(nr * PAGE_SIZE);
2626		current->nr_dirtied += nr;
2627		__this_cpu_add(bdp_ratelimits, nr);
2628
2629		mem_cgroup_track_foreign_dirty(folio, wb);
2630	}
2631}
 
2632
2633/*
2634 * Helper function for deaccounting dirty page without writeback.
2635 *
2636 * Caller must hold folio_memcg_lock().
2637 */
2638void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb)
 
2639{
2640	long nr = folio_nr_pages(folio);
2641
2642	lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr);
2643	zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
2644	wb_stat_mod(wb, WB_RECLAIMABLE, -nr);
2645	task_io_account_cancelled_write(nr * PAGE_SIZE);
2646}
2647
2648/*
2649 * Mark the folio dirty, and set it dirty in the page cache, and mark
2650 * the inode dirty.
2651 *
2652 * If warn is true, then emit a warning if the folio is not uptodate and has
2653 * not been truncated.
 
2654 *
2655 * The caller must hold folio_memcg_lock().  Most callers have the folio
2656 * locked.  A few have the folio blocked from truncation through other
2657 * means (eg zap_vma_pages() has it mapped and is holding the page table
2658 * lock).  This can also be called from mark_buffer_dirty(), which I
2659 * cannot prove is always protected against truncate.
2660 */
2661void __folio_mark_dirty(struct folio *folio, struct address_space *mapping,
2662			     int warn)
2663{
2664	unsigned long flags;
 
 
 
2665
2666	xa_lock_irqsave(&mapping->i_pages, flags);
2667	if (folio->mapping) {	/* Race with truncate? */
2668		WARN_ON_ONCE(warn && !folio_test_uptodate(folio));
2669		folio_account_dirtied(folio, mapping);
2670		__xa_set_mark(&mapping->i_pages, folio_index(folio),
2671				PAGECACHE_TAG_DIRTY);
 
 
 
 
 
 
 
 
 
 
 
 
 
2672	}
2673	xa_unlock_irqrestore(&mapping->i_pages, flags);
 
2674}
 
2675
2676/**
2677 * filemap_dirty_folio - Mark a folio dirty for filesystems which do not use buffer_heads.
2678 * @mapping: Address space this folio belongs to.
2679 * @folio: Folio to be marked as dirty.
2680 *
2681 * Filesystems which do not use buffer heads should call this function
2682 * from their dirty_folio address space operation.  It ignores the
2683 * contents of folio_get_private(), so if the filesystem marks individual
2684 * blocks as dirty, the filesystem should handle that itself.
2685 *
2686 * This is also sometimes used by filesystems which use buffer_heads when
2687 * a single buffer is being dirtied: we want to set the folio dirty in
2688 * that case, but not all the buffers.  This is a "bottom-up" dirtying,
2689 * whereas block_dirty_folio() is a "top-down" dirtying.
2690 *
2691 * The caller must ensure this doesn't race with truncation.  Most will
2692 * simply hold the folio lock, but e.g. zap_pte_range() calls with the
2693 * folio mapped and the pte lock held, which also locks out truncation.
2694 */
2695bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio)
2696{
2697	folio_memcg_lock(folio);
2698	if (folio_test_set_dirty(folio)) {
2699		folio_memcg_unlock(folio);
2700		return false;
2701	}
2702
2703	__folio_mark_dirty(folio, mapping, !folio_test_private(folio));
2704	folio_memcg_unlock(folio);
 
 
2705
2706	if (mapping->host) {
2707		/* !PageAnon && !swapper_space */
2708		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 
 
2709	}
2710	return true;
2711}
2712EXPORT_SYMBOL(filemap_dirty_folio);
2713
2714/**
2715 * folio_redirty_for_writepage - Decline to write a dirty folio.
2716 * @wbc: The writeback control.
2717 * @folio: The folio.
2718 *
2719 * When a writepage implementation decides that it doesn't want to write
2720 * @folio for some reason, it should call this function, unlock @folio and
2721 * return 0.
2722 *
2723 * Return: True if we redirtied the folio.  False if someone else dirtied
2724 * it first.
2725 */
2726bool folio_redirty_for_writepage(struct writeback_control *wbc,
2727		struct folio *folio)
2728{
2729	struct address_space *mapping = folio->mapping;
2730	long nr = folio_nr_pages(folio);
2731	bool ret;
2732
2733	wbc->pages_skipped += nr;
2734	ret = filemap_dirty_folio(mapping, folio);
2735	if (mapping && mapping_can_writeback(mapping)) {
2736		struct inode *inode = mapping->host;
2737		struct bdi_writeback *wb;
2738		struct wb_lock_cookie cookie = {};
2739
2740		wb = unlocked_inode_to_wb_begin(inode, &cookie);
2741		current->nr_dirtied -= nr;
2742		node_stat_mod_folio(folio, NR_DIRTIED, -nr);
2743		wb_stat_mod(wb, WB_DIRTIED, -nr);
2744		unlocked_inode_to_wb_end(inode, &cookie);
2745	}
2746	return ret;
2747}
2748EXPORT_SYMBOL(folio_redirty_for_writepage);
2749
2750/**
2751 * folio_mark_dirty - Mark a folio as being modified.
2752 * @folio: The folio.
2753 *
2754 * The folio may not be truncated while this function is running.
2755 * Holding the folio lock is sufficient to prevent truncation, but some
2756 * callers cannot acquire a sleeping lock.  These callers instead hold
2757 * the page table lock for a page table which contains at least one page
2758 * in this folio.  Truncation will block on the page table lock as it
2759 * unmaps pages before removing the folio from its mapping.
2760 *
2761 * Return: True if the folio was newly dirtied, false if it was already dirty.
 
2762 */
2763bool folio_mark_dirty(struct folio *folio)
2764{
2765	struct address_space *mapping = folio_mapping(folio);
2766
2767	if (likely(mapping)) {
 
2768		/*
2769		 * readahead/folio_deactivate could remain
2770		 * PG_readahead/PG_reclaim due to race with folio_end_writeback
2771		 * About readahead, if the folio is written, the flags would be
2772		 * reset. So no problem.
2773		 * About folio_deactivate, if the folio is redirtied,
2774		 * the flag will be reset. So no problem. but if the
2775		 * folio is used by readahead it will confuse readahead
2776		 * and make it restart the size rampup process. But it's
2777		 * a trivial problem.
2778		 */
2779		if (folio_test_reclaim(folio))
2780			folio_clear_reclaim(folio);
2781		return mapping->a_ops->dirty_folio(mapping, folio);
 
 
 
 
 
 
 
 
2782	}
2783
2784	return noop_dirty_folio(mapping, folio);
2785}
2786EXPORT_SYMBOL(folio_mark_dirty);
2787
2788/*
2789 * set_page_dirty() is racy if the caller has no reference against
2790 * page->mapping->host, and if the page is unlocked.  This is because another
2791 * CPU could truncate the page off the mapping and then free the mapping.
2792 *
2793 * Usually, the page _is_ locked, or the caller is a user-space process which
2794 * holds a reference on the inode by having an open file.
2795 *
2796 * In other cases, the page should be locked before running set_page_dirty().
2797 */
2798int set_page_dirty_lock(struct page *page)
2799{
2800	int ret;
2801
2802	lock_page(page);
2803	ret = set_page_dirty(page);
2804	unlock_page(page);
2805	return ret;
2806}
2807EXPORT_SYMBOL(set_page_dirty_lock);
2808
2809/*
2810 * This cancels just the dirty bit on the kernel page itself, it does NOT
2811 * actually remove dirty bits on any mmap's that may be around. It also
2812 * leaves the page tagged dirty, so any sync activity will still find it on
2813 * the dirty lists, and in particular, clear_page_dirty_for_io() will still
2814 * look at the dirty bits in the VM.
2815 *
2816 * Doing this should *normally* only ever be done when a page is truncated,
2817 * and is not actually mapped anywhere at all. However, fs/buffer.c does
2818 * this when it notices that somebody has cleaned out all the buffers on a
2819 * page without actually doing it through the VM. Can you say "ext3 is
2820 * horribly ugly"? Thought you could.
2821 */
2822void __folio_cancel_dirty(struct folio *folio)
2823{
2824	struct address_space *mapping = folio_mapping(folio);
2825
2826	if (mapping_can_writeback(mapping)) {
2827		struct inode *inode = mapping->host;
2828		struct bdi_writeback *wb;
2829		struct wb_lock_cookie cookie = {};
2830
2831		folio_memcg_lock(folio);
2832		wb = unlocked_inode_to_wb_begin(inode, &cookie);
2833
2834		if (folio_test_clear_dirty(folio))
2835			folio_account_cleaned(folio, wb);
2836
2837		unlocked_inode_to_wb_end(inode, &cookie);
2838		folio_memcg_unlock(folio);
2839	} else {
2840		folio_clear_dirty(folio);
2841	}
2842}
2843EXPORT_SYMBOL(__folio_cancel_dirty);
2844
2845/*
2846 * Clear a folio's dirty flag, while caring for dirty memory accounting.
2847 * Returns true if the folio was previously dirty.
2848 *
2849 * This is for preparing to put the folio under writeout.  We leave
2850 * the folio tagged as dirty in the xarray so that a concurrent
2851 * write-for-sync can discover it via a PAGECACHE_TAG_DIRTY walk.
2852 * The ->writepage implementation will run either folio_start_writeback()
2853 * or folio_mark_dirty(), at which stage we bring the folio's dirty flag
2854 * and xarray dirty tag back into sync.
2855 *
2856 * This incoherency between the folio's dirty flag and xarray tag is
2857 * unfortunate, but it only exists while the folio is locked.
2858 */
2859bool folio_clear_dirty_for_io(struct folio *folio)
2860{
2861	struct address_space *mapping = folio_mapping(folio);
2862	bool ret = false;
2863
2864	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
2865
2866	if (mapping && mapping_can_writeback(mapping)) {
2867		struct inode *inode = mapping->host;
2868		struct bdi_writeback *wb;
2869		struct wb_lock_cookie cookie = {};
2870
2871		/*
2872		 * Yes, Virginia, this is indeed insane.
2873		 *
2874		 * We use this sequence to make sure that
2875		 *  (a) we account for dirty stats properly
2876		 *  (b) we tell the low-level filesystem to
2877		 *      mark the whole folio dirty if it was
2878		 *      dirty in a pagetable. Only to then
2879		 *  (c) clean the folio again and return 1 to
2880		 *      cause the writeback.
2881		 *
2882		 * This way we avoid all nasty races with the
2883		 * dirty bit in multiple places and clearing
2884		 * them concurrently from different threads.
2885		 *
2886		 * Note! Normally the "folio_mark_dirty(folio)"
2887		 * has no effect on the actual dirty bit - since
2888		 * that will already usually be set. But we
2889		 * need the side effects, and it can help us
2890		 * avoid races.
2891		 *
2892		 * We basically use the folio "master dirty bit"
2893		 * as a serialization point for all the different
2894		 * threads doing their things.
2895		 */
2896		if (folio_mkclean(folio))
2897			folio_mark_dirty(folio);
2898		/*
2899		 * We carefully synchronise fault handlers against
2900		 * installing a dirty pte and marking the folio dirty
2901		 * at this point.  We do this by having them hold the
2902		 * page lock while dirtying the folio, and folios are
2903		 * always locked coming in here, so we get the desired
2904		 * exclusion.
2905		 */
2906		wb = unlocked_inode_to_wb_begin(inode, &cookie);
2907		if (folio_test_clear_dirty(folio)) {
2908			long nr = folio_nr_pages(folio);
2909			lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr);
2910			zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
2911			wb_stat_mod(wb, WB_RECLAIMABLE, -nr);
2912			ret = true;
2913		}
2914		unlocked_inode_to_wb_end(inode, &cookie);
2915		return ret;
2916	}
2917	return folio_test_clear_dirty(folio);
2918}
2919EXPORT_SYMBOL(folio_clear_dirty_for_io);
2920
2921static void wb_inode_writeback_start(struct bdi_writeback *wb)
2922{
2923	atomic_inc(&wb->writeback_inodes);
2924}
2925
2926static void wb_inode_writeback_end(struct bdi_writeback *wb)
2927{
2928	unsigned long flags;
2929	atomic_dec(&wb->writeback_inodes);
2930	/*
2931	 * Make sure estimate of writeback throughput gets updated after
2932	 * writeback completed. We delay the update by BANDWIDTH_INTERVAL
2933	 * (which is the interval other bandwidth updates use for batching) so
2934	 * that if multiple inodes end writeback at a similar time, they get
2935	 * batched into one bandwidth update.
2936	 */
2937	spin_lock_irqsave(&wb->work_lock, flags);
2938	if (test_bit(WB_registered, &wb->state))
2939		queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL);
2940	spin_unlock_irqrestore(&wb->work_lock, flags);
2941}
2942
2943bool __folio_end_writeback(struct folio *folio)
2944{
2945	long nr = folio_nr_pages(folio);
2946	struct address_space *mapping = folio_mapping(folio);
2947	bool ret;
2948
2949	folio_memcg_lock(folio);
2950	if (mapping && mapping_use_writeback_tags(mapping)) {
2951		struct inode *inode = mapping->host;
2952		struct backing_dev_info *bdi = inode_to_bdi(inode);
2953		unsigned long flags;
2954
2955		xa_lock_irqsave(&mapping->i_pages, flags);
2956		ret = folio_xor_flags_has_waiters(folio, 1 << PG_writeback);
2957		__xa_clear_mark(&mapping->i_pages, folio_index(folio),
2958					PAGECACHE_TAG_WRITEBACK);
2959		if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
2960			struct bdi_writeback *wb = inode_to_wb(inode);
2961
2962			wb_stat_mod(wb, WB_WRITEBACK, -nr);
2963			__wb_writeout_add(wb, nr);
2964			if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
2965				wb_inode_writeback_end(wb);
2966		}
2967
2968		if (mapping->host && !mapping_tagged(mapping,
2969						     PAGECACHE_TAG_WRITEBACK))
2970			sb_clear_inode_writeback(mapping->host);
2971
2972		xa_unlock_irqrestore(&mapping->i_pages, flags);
 
 
 
 
2973	} else {
2974		ret = folio_xor_flags_has_waiters(folio, 1 << PG_writeback);
 
 
 
 
 
2975	}
2976
2977	lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
2978	zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
2979	node_stat_mod_folio(folio, NR_WRITTEN, nr);
2980	folio_memcg_unlock(folio);
2981
2982	return ret;
2983}
2984
2985void __folio_start_writeback(struct folio *folio, bool keep_write)
2986{
2987	long nr = folio_nr_pages(folio);
2988	struct address_space *mapping = folio_mapping(folio);
2989	int access_ret;
2990
2991	VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
2992
2993	folio_memcg_lock(folio);
2994	if (mapping && mapping_use_writeback_tags(mapping)) {
2995		XA_STATE(xas, &mapping->i_pages, folio_index(folio));
2996		struct inode *inode = mapping->host;
2997		struct backing_dev_info *bdi = inode_to_bdi(inode);
2998		unsigned long flags;
2999		bool on_wblist;
3000
3001		xas_lock_irqsave(&xas, flags);
3002		xas_load(&xas);
3003		folio_test_set_writeback(folio);
3004
3005		on_wblist = mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
3006
3007		xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK);
3008		if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
3009			struct bdi_writeback *wb = inode_to_wb(inode);
3010
3011			wb_stat_mod(wb, WB_WRITEBACK, nr);
3012			if (!on_wblist)
3013				wb_inode_writeback_start(wb);
3014		}
3015
3016		/*
3017		 * We can come through here when swapping anonymous
3018		 * folios, so we don't necessarily have an inode to
3019		 * track for sync.
3020		 */
3021		if (mapping->host && !on_wblist)
3022			sb_mark_inode_writeback(mapping->host);
3023		if (!folio_test_dirty(folio))
3024			xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
3025		if (!keep_write)
3026			xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
3027		xas_unlock_irqrestore(&xas, flags);
 
 
3028	} else {
3029		folio_test_set_writeback(folio);
 
 
 
 
3030	}
 
 
3031
3032	lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr);
3033	zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
3034	folio_memcg_unlock(folio);
3035
3036	access_ret = arch_make_folio_accessible(folio);
3037	/*
3038	 * If writeback has been triggered on a page that cannot be made
3039	 * accessible, it is too late to recover here.
3040	 */
3041	VM_BUG_ON_FOLIO(access_ret != 0, folio);
3042}
3043EXPORT_SYMBOL(__folio_start_writeback);
3044
3045/**
3046 * folio_wait_writeback - Wait for a folio to finish writeback.
3047 * @folio: The folio to wait for.
3048 *
3049 * If the folio is currently being written back to storage, wait for the
3050 * I/O to complete.
3051 *
3052 * Context: Sleeps.  Must be called in process context and with
3053 * no spinlocks held.  Caller should hold a reference on the folio.
3054 * If the folio is not locked, writeback may start again after writeback
3055 * has finished.
3056 */
3057void folio_wait_writeback(struct folio *folio)
3058{
3059	while (folio_test_writeback(folio)) {
3060		trace_folio_wait_writeback(folio, folio_mapping(folio));
3061		folio_wait_bit(folio, PG_writeback);
3062	}
3063}
3064EXPORT_SYMBOL_GPL(folio_wait_writeback);
3065
3066/**
3067 * folio_wait_writeback_killable - Wait for a folio to finish writeback.
3068 * @folio: The folio to wait for.
3069 *
3070 * If the folio is currently being written back to storage, wait for the
3071 * I/O to complete or a fatal signal to arrive.
3072 *
3073 * Context: Sleeps.  Must be called in process context and with
3074 * no spinlocks held.  Caller should hold a reference on the folio.
3075 * If the folio is not locked, writeback may start again after writeback
3076 * has finished.
3077 * Return: 0 on success, -EINTR if we get a fatal signal while waiting.
3078 */
3079int folio_wait_writeback_killable(struct folio *folio)
3080{
3081	while (folio_test_writeback(folio)) {
3082		trace_folio_wait_writeback(folio, folio_mapping(folio));
3083		if (folio_wait_bit_killable(folio, PG_writeback))
3084			return -EINTR;
3085	}
3086
3087	return 0;
3088}
3089EXPORT_SYMBOL_GPL(folio_wait_writeback_killable);
3090
3091/**
3092 * folio_wait_stable() - wait for writeback to finish, if necessary.
3093 * @folio: The folio to wait on.
3094 *
3095 * This function determines if the given folio is related to a backing
3096 * device that requires folio contents to be held stable during writeback.
3097 * If so, then it will wait for any pending writeback to complete.
3098 *
3099 * Context: Sleeps.  Must be called in process context and with
3100 * no spinlocks held.  Caller should hold a reference on the folio.
3101 * If the folio is not locked, writeback may start again after writeback
3102 * has finished.
3103 */
3104void folio_wait_stable(struct folio *folio)
3105{
3106	if (mapping_stable_writes(folio_mapping(folio)))
3107		folio_wait_writeback(folio);
3108}
3109EXPORT_SYMBOL_GPL(folio_wait_stable);