Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v4.17
 
   1/*
   2 * mm/page-writeback.c
   3 *
   4 * Copyright (C) 2002, Linus Torvalds.
   5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
   6 *
   7 * Contains functions related to writing back dirty pages at the
   8 * address_space level.
   9 *
  10 * 10Apr2002	Andrew Morton
  11 *		Initial version
  12 */
  13
  14#include <linux/kernel.h>
 
  15#include <linux/export.h>
  16#include <linux/spinlock.h>
  17#include <linux/fs.h>
  18#include <linux/mm.h>
  19#include <linux/swap.h>
  20#include <linux/slab.h>
  21#include <linux/pagemap.h>
  22#include <linux/writeback.h>
  23#include <linux/init.h>
  24#include <linux/backing-dev.h>
  25#include <linux/task_io_accounting_ops.h>
  26#include <linux/blkdev.h>
  27#include <linux/mpage.h>
  28#include <linux/rmap.h>
  29#include <linux/percpu.h>
  30#include <linux/notifier.h>
  31#include <linux/smp.h>
  32#include <linux/sysctl.h>
  33#include <linux/cpu.h>
  34#include <linux/syscalls.h>
  35#include <linux/buffer_head.h> /* __set_page_dirty_buffers */
  36#include <linux/pagevec.h>
  37#include <linux/timer.h>
  38#include <linux/sched/rt.h>
  39#include <linux/sched/signal.h>
  40#include <linux/mm_inline.h>
  41#include <trace/events/writeback.h>
  42
  43#include "internal.h"
  44
  45/*
  46 * Sleep at most 200ms at a time in balance_dirty_pages().
  47 */
  48#define MAX_PAUSE		max(HZ/5, 1)
  49
  50/*
  51 * Try to keep balance_dirty_pages() call intervals higher than this many pages
  52 * by raising pause time to max_pause when falls below it.
  53 */
  54#define DIRTY_POLL_THRESH	(128 >> (PAGE_SHIFT - 10))
  55
  56/*
  57 * Estimate write bandwidth at 200ms intervals.
  58 */
  59#define BANDWIDTH_INTERVAL	max(HZ/5, 1)
  60
  61#define RATELIMIT_CALC_SHIFT	10
  62
  63/*
  64 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
  65 * will look to see if it needs to force writeback or throttling.
  66 */
  67static long ratelimit_pages = 32;
  68
  69/* The following parameters are exported via /proc/sys/vm */
  70
  71/*
  72 * Start background writeback (via writeback threads) at this percentage
  73 */
  74int dirty_background_ratio = 10;
  75
  76/*
  77 * dirty_background_bytes starts at 0 (disabled) so that it is a function of
  78 * dirty_background_ratio * the amount of dirtyable memory
  79 */
  80unsigned long dirty_background_bytes;
  81
  82/*
  83 * free highmem will not be subtracted from the total free memory
  84 * for calculating free ratios if vm_highmem_is_dirtyable is true
  85 */
  86int vm_highmem_is_dirtyable;
  87
  88/*
  89 * The generator of dirty data starts writeback at this percentage
  90 */
  91int vm_dirty_ratio = 20;
  92
  93/*
  94 * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
  95 * vm_dirty_ratio * the amount of dirtyable memory
  96 */
  97unsigned long vm_dirty_bytes;
  98
  99/*
 100 * The interval between `kupdate'-style writebacks
 101 */
 102unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
 103
 104EXPORT_SYMBOL_GPL(dirty_writeback_interval);
 105
 106/*
 107 * The longest time for which data is allowed to remain dirty
 108 */
 109unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
 110
 111/*
 112 * Flag that makes the machine dump writes/reads and block dirtyings.
 113 */
 114int block_dump;
 115
 116/*
 117 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
 118 * a full sync is triggered after this time elapses without any disk activity.
 119 */
 120int laptop_mode;
 121
 122EXPORT_SYMBOL(laptop_mode);
 123
 124/* End of sysctl-exported parameters */
 125
 126struct wb_domain global_wb_domain;
 127
 128/* consolidated parameters for balance_dirty_pages() and its subroutines */
 129struct dirty_throttle_control {
 130#ifdef CONFIG_CGROUP_WRITEBACK
 131	struct wb_domain	*dom;
 132	struct dirty_throttle_control *gdtc;	/* only set in memcg dtc's */
 133#endif
 134	struct bdi_writeback	*wb;
 135	struct fprop_local_percpu *wb_completions;
 136
 137	unsigned long		avail;		/* dirtyable */
 138	unsigned long		dirty;		/* file_dirty + write + nfs */
 139	unsigned long		thresh;		/* dirty threshold */
 140	unsigned long		bg_thresh;	/* dirty background threshold */
 141
 142	unsigned long		wb_dirty;	/* per-wb counterparts */
 143	unsigned long		wb_thresh;
 144	unsigned long		wb_bg_thresh;
 145
 146	unsigned long		pos_ratio;
 147};
 148
 149/*
 150 * Length of period for aging writeout fractions of bdis. This is an
 151 * arbitrarily chosen number. The longer the period, the slower fractions will
 152 * reflect changes in current writeout rate.
 153 */
 154#define VM_COMPLETIONS_PERIOD_LEN (3*HZ)
 155
 156#ifdef CONFIG_CGROUP_WRITEBACK
 157
 158#define GDTC_INIT(__wb)		.wb = (__wb),				\
 159				.dom = &global_wb_domain,		\
 160				.wb_completions = &(__wb)->completions
 161
 162#define GDTC_INIT_NO_WB		.dom = &global_wb_domain
 163
 164#define MDTC_INIT(__wb, __gdtc)	.wb = (__wb),				\
 165				.dom = mem_cgroup_wb_domain(__wb),	\
 166				.wb_completions = &(__wb)->memcg_completions, \
 167				.gdtc = __gdtc
 168
 169static bool mdtc_valid(struct dirty_throttle_control *dtc)
 170{
 171	return dtc->dom;
 172}
 173
 174static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
 175{
 176	return dtc->dom;
 177}
 178
 179static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
 180{
 181	return mdtc->gdtc;
 182}
 183
 184static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
 185{
 186	return &wb->memcg_completions;
 187}
 188
 189static void wb_min_max_ratio(struct bdi_writeback *wb,
 190			     unsigned long *minp, unsigned long *maxp)
 191{
 192	unsigned long this_bw = wb->avg_write_bandwidth;
 193	unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
 194	unsigned long long min = wb->bdi->min_ratio;
 195	unsigned long long max = wb->bdi->max_ratio;
 196
 197	/*
 198	 * @wb may already be clean by the time control reaches here and
 199	 * the total may not include its bw.
 200	 */
 201	if (this_bw < tot_bw) {
 202		if (min) {
 203			min *= this_bw;
 204			do_div(min, tot_bw);
 205		}
 206		if (max < 100) {
 207			max *= this_bw;
 208			do_div(max, tot_bw);
 209		}
 210	}
 211
 212	*minp = min;
 213	*maxp = max;
 214}
 215
 216#else	/* CONFIG_CGROUP_WRITEBACK */
 217
 218#define GDTC_INIT(__wb)		.wb = (__wb),                           \
 219				.wb_completions = &(__wb)->completions
 220#define GDTC_INIT_NO_WB
 221#define MDTC_INIT(__wb, __gdtc)
 222
 223static bool mdtc_valid(struct dirty_throttle_control *dtc)
 224{
 225	return false;
 226}
 227
 228static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
 229{
 230	return &global_wb_domain;
 231}
 232
 233static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
 234{
 235	return NULL;
 236}
 237
 238static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
 239{
 240	return NULL;
 241}
 242
 243static void wb_min_max_ratio(struct bdi_writeback *wb,
 244			     unsigned long *minp, unsigned long *maxp)
 245{
 246	*minp = wb->bdi->min_ratio;
 247	*maxp = wb->bdi->max_ratio;
 248}
 249
 250#endif	/* CONFIG_CGROUP_WRITEBACK */
 251
 252/*
 253 * In a memory zone, there is a certain amount of pages we consider
 254 * available for the page cache, which is essentially the number of
 255 * free and reclaimable pages, minus some zone reserves to protect
 256 * lowmem and the ability to uphold the zone's watermarks without
 257 * requiring writeback.
 258 *
 259 * This number of dirtyable pages is the base value of which the
 260 * user-configurable dirty ratio is the effictive number of pages that
 261 * are allowed to be actually dirtied.  Per individual zone, or
 262 * globally by using the sum of dirtyable pages over all zones.
 263 *
 264 * Because the user is allowed to specify the dirty limit globally as
 265 * absolute number of bytes, calculating the per-zone dirty limit can
 266 * require translating the configured limit into a percentage of
 267 * global dirtyable memory first.
 268 */
 269
 270/**
 271 * node_dirtyable_memory - number of dirtyable pages in a node
 272 * @pgdat: the node
 273 *
 274 * Returns the node's number of pages potentially available for dirty
 275 * page cache.  This is the base value for the per-node dirty limits.
 276 */
 277static unsigned long node_dirtyable_memory(struct pglist_data *pgdat)
 278{
 279	unsigned long nr_pages = 0;
 280	int z;
 281
 282	for (z = 0; z < MAX_NR_ZONES; z++) {
 283		struct zone *zone = pgdat->node_zones + z;
 284
 285		if (!populated_zone(zone))
 286			continue;
 287
 288		nr_pages += zone_page_state(zone, NR_FREE_PAGES);
 289	}
 290
 291	/*
 292	 * Pages reserved for the kernel should not be considered
 293	 * dirtyable, to prevent a situation where reclaim has to
 294	 * clean pages in order to balance the zones.
 295	 */
 296	nr_pages -= min(nr_pages, pgdat->totalreserve_pages);
 297
 298	nr_pages += node_page_state(pgdat, NR_INACTIVE_FILE);
 299	nr_pages += node_page_state(pgdat, NR_ACTIVE_FILE);
 300
 301	return nr_pages;
 302}
 303
 304static unsigned long highmem_dirtyable_memory(unsigned long total)
 305{
 306#ifdef CONFIG_HIGHMEM
 307	int node;
 308	unsigned long x = 0;
 309	int i;
 310
 311	for_each_node_state(node, N_HIGH_MEMORY) {
 312		for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) {
 313			struct zone *z;
 314			unsigned long nr_pages;
 315
 316			if (!is_highmem_idx(i))
 317				continue;
 318
 319			z = &NODE_DATA(node)->node_zones[i];
 320			if (!populated_zone(z))
 321				continue;
 322
 323			nr_pages = zone_page_state(z, NR_FREE_PAGES);
 324			/* watch for underflows */
 325			nr_pages -= min(nr_pages, high_wmark_pages(z));
 326			nr_pages += zone_page_state(z, NR_ZONE_INACTIVE_FILE);
 327			nr_pages += zone_page_state(z, NR_ZONE_ACTIVE_FILE);
 328			x += nr_pages;
 329		}
 330	}
 331
 332	/*
 333	 * Unreclaimable memory (kernel memory or anonymous memory
 334	 * without swap) can bring down the dirtyable pages below
 335	 * the zone's dirty balance reserve and the above calculation
 336	 * will underflow.  However we still want to add in nodes
 337	 * which are below threshold (negative values) to get a more
 338	 * accurate calculation but make sure that the total never
 339	 * underflows.
 340	 */
 341	if ((long)x < 0)
 342		x = 0;
 343
 344	/*
 345	 * Make sure that the number of highmem pages is never larger
 346	 * than the number of the total dirtyable memory. This can only
 347	 * occur in very strange VM situations but we want to make sure
 348	 * that this does not occur.
 349	 */
 350	return min(x, total);
 351#else
 352	return 0;
 353#endif
 354}
 355
 356/**
 357 * global_dirtyable_memory - number of globally dirtyable pages
 358 *
 359 * Returns the global number of pages potentially available for dirty
 360 * page cache.  This is the base value for the global dirty limits.
 361 */
 362static unsigned long global_dirtyable_memory(void)
 363{
 364	unsigned long x;
 365
 366	x = global_zone_page_state(NR_FREE_PAGES);
 367	/*
 368	 * Pages reserved for the kernel should not be considered
 369	 * dirtyable, to prevent a situation where reclaim has to
 370	 * clean pages in order to balance the zones.
 371	 */
 372	x -= min(x, totalreserve_pages);
 373
 374	x += global_node_page_state(NR_INACTIVE_FILE);
 375	x += global_node_page_state(NR_ACTIVE_FILE);
 376
 377	if (!vm_highmem_is_dirtyable)
 378		x -= highmem_dirtyable_memory(x);
 379
 380	return x + 1;	/* Ensure that we never return 0 */
 381}
 382
 383/**
 384 * domain_dirty_limits - calculate thresh and bg_thresh for a wb_domain
 385 * @dtc: dirty_throttle_control of interest
 386 *
 387 * Calculate @dtc->thresh and ->bg_thresh considering
 388 * vm_dirty_{bytes|ratio} and dirty_background_{bytes|ratio}.  The caller
 389 * must ensure that @dtc->avail is set before calling this function.  The
 390 * dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
 391 * real-time tasks.
 392 */
 393static void domain_dirty_limits(struct dirty_throttle_control *dtc)
 394{
 395	const unsigned long available_memory = dtc->avail;
 396	struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
 397	unsigned long bytes = vm_dirty_bytes;
 398	unsigned long bg_bytes = dirty_background_bytes;
 399	/* convert ratios to per-PAGE_SIZE for higher precision */
 400	unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100;
 401	unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100;
 402	unsigned long thresh;
 403	unsigned long bg_thresh;
 404	struct task_struct *tsk;
 405
 406	/* gdtc is !NULL iff @dtc is for memcg domain */
 407	if (gdtc) {
 408		unsigned long global_avail = gdtc->avail;
 409
 410		/*
 411		 * The byte settings can't be applied directly to memcg
 412		 * domains.  Convert them to ratios by scaling against
 413		 * globally available memory.  As the ratios are in
 414		 * per-PAGE_SIZE, they can be obtained by dividing bytes by
 415		 * number of pages.
 416		 */
 417		if (bytes)
 418			ratio = min(DIV_ROUND_UP(bytes, global_avail),
 419				    PAGE_SIZE);
 420		if (bg_bytes)
 421			bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail),
 422				       PAGE_SIZE);
 423		bytes = bg_bytes = 0;
 424	}
 425
 426	if (bytes)
 427		thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
 428	else
 429		thresh = (ratio * available_memory) / PAGE_SIZE;
 430
 431	if (bg_bytes)
 432		bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
 433	else
 434		bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
 435
 436	if (bg_thresh >= thresh)
 437		bg_thresh = thresh / 2;
 438	tsk = current;
 439	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
 440		bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
 441		thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
 442	}
 443	dtc->thresh = thresh;
 444	dtc->bg_thresh = bg_thresh;
 445
 446	/* we should eventually report the domain in the TP */
 447	if (!gdtc)
 448		trace_global_dirty_state(bg_thresh, thresh);
 449}
 450
 451/**
 452 * global_dirty_limits - background-writeback and dirty-throttling thresholds
 453 * @pbackground: out parameter for bg_thresh
 454 * @pdirty: out parameter for thresh
 455 *
 456 * Calculate bg_thresh and thresh for global_wb_domain.  See
 457 * domain_dirty_limits() for details.
 458 */
 459void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
 460{
 461	struct dirty_throttle_control gdtc = { GDTC_INIT_NO_WB };
 462
 463	gdtc.avail = global_dirtyable_memory();
 464	domain_dirty_limits(&gdtc);
 465
 466	*pbackground = gdtc.bg_thresh;
 467	*pdirty = gdtc.thresh;
 468}
 469
 470/**
 471 * node_dirty_limit - maximum number of dirty pages allowed in a node
 472 * @pgdat: the node
 473 *
 474 * Returns the maximum number of dirty pages allowed in a node, based
 475 * on the node's dirtyable memory.
 476 */
 477static unsigned long node_dirty_limit(struct pglist_data *pgdat)
 478{
 479	unsigned long node_memory = node_dirtyable_memory(pgdat);
 480	struct task_struct *tsk = current;
 481	unsigned long dirty;
 482
 483	if (vm_dirty_bytes)
 484		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) *
 485			node_memory / global_dirtyable_memory();
 486	else
 487		dirty = vm_dirty_ratio * node_memory / 100;
 488
 489	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk))
 490		dirty += dirty / 4;
 491
 492	return dirty;
 493}
 494
 495/**
 496 * node_dirty_ok - tells whether a node is within its dirty limits
 497 * @pgdat: the node to check
 498 *
 499 * Returns %true when the dirty pages in @pgdat are within the node's
 500 * dirty limit, %false if the limit is exceeded.
 501 */
 502bool node_dirty_ok(struct pglist_data *pgdat)
 503{
 504	unsigned long limit = node_dirty_limit(pgdat);
 505	unsigned long nr_pages = 0;
 506
 507	nr_pages += node_page_state(pgdat, NR_FILE_DIRTY);
 508	nr_pages += node_page_state(pgdat, NR_UNSTABLE_NFS);
 509	nr_pages += node_page_state(pgdat, NR_WRITEBACK);
 510
 511	return nr_pages <= limit;
 512}
 513
 514int dirty_background_ratio_handler(struct ctl_table *table, int write,
 515		void __user *buffer, size_t *lenp,
 516		loff_t *ppos)
 517{
 518	int ret;
 519
 520	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 521	if (ret == 0 && write)
 522		dirty_background_bytes = 0;
 523	return ret;
 524}
 525
 526int dirty_background_bytes_handler(struct ctl_table *table, int write,
 527		void __user *buffer, size_t *lenp,
 528		loff_t *ppos)
 529{
 530	int ret;
 531
 532	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 533	if (ret == 0 && write)
 534		dirty_background_ratio = 0;
 535	return ret;
 536}
 537
 538int dirty_ratio_handler(struct ctl_table *table, int write,
 539		void __user *buffer, size_t *lenp,
 540		loff_t *ppos)
 541{
 542	int old_ratio = vm_dirty_ratio;
 543	int ret;
 544
 545	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 546	if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
 547		writeback_set_ratelimit();
 548		vm_dirty_bytes = 0;
 549	}
 550	return ret;
 551}
 552
 553int dirty_bytes_handler(struct ctl_table *table, int write,
 554		void __user *buffer, size_t *lenp,
 555		loff_t *ppos)
 556{
 557	unsigned long old_bytes = vm_dirty_bytes;
 558	int ret;
 559
 560	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 561	if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
 562		writeback_set_ratelimit();
 563		vm_dirty_ratio = 0;
 564	}
 565	return ret;
 566}
 
 567
 568static unsigned long wp_next_time(unsigned long cur_time)
 569{
 570	cur_time += VM_COMPLETIONS_PERIOD_LEN;
 571	/* 0 has a special meaning... */
 572	if (!cur_time)
 573		return 1;
 574	return cur_time;
 575}
 576
 577static void wb_domain_writeout_inc(struct wb_domain *dom,
 578				   struct fprop_local_percpu *completions,
 579				   unsigned int max_prop_frac)
 580{
 581	__fprop_inc_percpu_max(&dom->completions, completions,
 582			       max_prop_frac);
 583	/* First event after period switching was turned off? */
 584	if (unlikely(!dom->period_time)) {
 585		/*
 586		 * We can race with other __bdi_writeout_inc calls here but
 587		 * it does not cause any harm since the resulting time when
 588		 * timer will fire and what is in writeout_period_time will be
 589		 * roughly the same.
 590		 */
 591		dom->period_time = wp_next_time(jiffies);
 592		mod_timer(&dom->period_timer, dom->period_time);
 593	}
 594}
 595
 596/*
 597 * Increment @wb's writeout completion count and the global writeout
 598 * completion count. Called from test_clear_page_writeback().
 599 */
 600static inline void __wb_writeout_inc(struct bdi_writeback *wb)
 601{
 602	struct wb_domain *cgdom;
 603
 604	inc_wb_stat(wb, WB_WRITTEN);
 605	wb_domain_writeout_inc(&global_wb_domain, &wb->completions,
 606			       wb->bdi->max_prop_frac);
 607
 608	cgdom = mem_cgroup_wb_domain(wb);
 609	if (cgdom)
 610		wb_domain_writeout_inc(cgdom, wb_memcg_completions(wb),
 611				       wb->bdi->max_prop_frac);
 612}
 613
 614void wb_writeout_inc(struct bdi_writeback *wb)
 615{
 616	unsigned long flags;
 617
 618	local_irq_save(flags);
 619	__wb_writeout_inc(wb);
 620	local_irq_restore(flags);
 621}
 622EXPORT_SYMBOL_GPL(wb_writeout_inc);
 623
 624/*
 625 * On idle system, we can be called long after we scheduled because we use
 626 * deferred timers so count with missed periods.
 627 */
 628static void writeout_period(struct timer_list *t)
 629{
 630	struct wb_domain *dom = from_timer(dom, t, period_timer);
 631	int miss_periods = (jiffies - dom->period_time) /
 632						 VM_COMPLETIONS_PERIOD_LEN;
 633
 634	if (fprop_new_period(&dom->completions, miss_periods + 1)) {
 635		dom->period_time = wp_next_time(dom->period_time +
 636				miss_periods * VM_COMPLETIONS_PERIOD_LEN);
 637		mod_timer(&dom->period_timer, dom->period_time);
 638	} else {
 639		/*
 640		 * Aging has zeroed all fractions. Stop wasting CPU on period
 641		 * updates.
 642		 */
 643		dom->period_time = 0;
 644	}
 645}
 646
 647int wb_domain_init(struct wb_domain *dom, gfp_t gfp)
 648{
 649	memset(dom, 0, sizeof(*dom));
 650
 651	spin_lock_init(&dom->lock);
 652
 653	timer_setup(&dom->period_timer, writeout_period, TIMER_DEFERRABLE);
 654
 655	dom->dirty_limit_tstamp = jiffies;
 656
 657	return fprop_global_init(&dom->completions, gfp);
 658}
 659
 660#ifdef CONFIG_CGROUP_WRITEBACK
 661void wb_domain_exit(struct wb_domain *dom)
 662{
 663	del_timer_sync(&dom->period_timer);
 664	fprop_global_destroy(&dom->completions);
 665}
 666#endif
 667
 668/*
 669 * bdi_min_ratio keeps the sum of the minimum dirty shares of all
 670 * registered backing devices, which, for obvious reasons, can not
 671 * exceed 100%.
 672 */
 673static unsigned int bdi_min_ratio;
 674
 675int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 676{
 
 
 
 
 
 
 
 
 
 
 
 
 
 677	int ret = 0;
 678
 
 
 
 679	spin_lock_bh(&bdi_lock);
 680	if (min_ratio > bdi->max_ratio) {
 681		ret = -EINVAL;
 682	} else {
 683		min_ratio -= bdi->min_ratio;
 684		if (bdi_min_ratio + min_ratio < 100) {
 685			bdi_min_ratio += min_ratio;
 686			bdi->min_ratio += min_ratio;
 687		} else {
 688			ret = -EINVAL;
 
 
 
 
 
 
 689		}
 690	}
 691	spin_unlock_bh(&bdi_lock);
 692
 693	return ret;
 694}
 695
 696int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
 697{
 698	int ret = 0;
 699
 700	if (max_ratio > 100)
 701		return -EINVAL;
 702
 703	spin_lock_bh(&bdi_lock);
 704	if (bdi->min_ratio > max_ratio) {
 705		ret = -EINVAL;
 706	} else {
 707		bdi->max_ratio = max_ratio;
 708		bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 100;
 
 709	}
 710	spin_unlock_bh(&bdi_lock);
 711
 712	return ret;
 713}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 714EXPORT_SYMBOL(bdi_set_max_ratio);
 715
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 716static unsigned long dirty_freerun_ceiling(unsigned long thresh,
 717					   unsigned long bg_thresh)
 718{
 719	return (thresh + bg_thresh) / 2;
 720}
 721
 722static unsigned long hard_dirty_limit(struct wb_domain *dom,
 723				      unsigned long thresh)
 724{
 725	return max(thresh, dom->dirty_limit);
 726}
 727
 728/*
 729 * Memory which can be further allocated to a memcg domain is capped by
 730 * system-wide clean memory excluding the amount being used in the domain.
 731 */
 732static void mdtc_calc_avail(struct dirty_throttle_control *mdtc,
 733			    unsigned long filepages, unsigned long headroom)
 734{
 735	struct dirty_throttle_control *gdtc = mdtc_gdtc(mdtc);
 736	unsigned long clean = filepages - min(filepages, mdtc->dirty);
 737	unsigned long global_clean = gdtc->avail - min(gdtc->avail, gdtc->dirty);
 738	unsigned long other_clean = global_clean - min(global_clean, clean);
 739
 740	mdtc->avail = filepages + min(headroom, other_clean);
 741}
 742
 743/**
 744 * __wb_calc_thresh - @wb's share of dirty throttling threshold
 745 * @dtc: dirty_throttle_context of interest
 746 *
 747 * Returns @wb's dirty limit in pages. The term "dirty" in the context of
 748 * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
 749 *
 750 * Note that balance_dirty_pages() will only seriously take it as a hard limit
 751 * when sleeping max_pause per page is not enough to keep the dirty pages under
 752 * control. For example, when the device is completely stalled due to some error
 753 * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
 754 * In the other normal situations, it acts more gently by throttling the tasks
 755 * more (rather than completely block them) when the wb dirty pages go high.
 756 *
 757 * It allocates high/low dirty limits to fast/slow devices, in order to prevent
 758 * - starving fast devices
 759 * - piling up dirty pages (that will take long time to sync) on slow devices
 760 *
 761 * The wb's share of dirty limit will be adapting to its throughput and
 762 * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
 
 
 
 763 */
 764static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc)
 765{
 766	struct wb_domain *dom = dtc_dom(dtc);
 767	unsigned long thresh = dtc->thresh;
 768	u64 wb_thresh;
 769	long numerator, denominator;
 770	unsigned long wb_min_ratio, wb_max_ratio;
 771
 772	/*
 773	 * Calculate this BDI's share of the thresh ratio.
 774	 */
 775	fprop_fraction_percpu(&dom->completions, dtc->wb_completions,
 776			      &numerator, &denominator);
 777
 778	wb_thresh = (thresh * (100 - bdi_min_ratio)) / 100;
 779	wb_thresh *= numerator;
 780	do_div(wb_thresh, denominator);
 781
 782	wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio);
 783
 784	wb_thresh += (thresh * wb_min_ratio) / 100;
 785	if (wb_thresh > (thresh * wb_max_ratio) / 100)
 786		wb_thresh = thresh * wb_max_ratio / 100;
 787
 788	return wb_thresh;
 789}
 790
 791unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh)
 792{
 793	struct dirty_throttle_control gdtc = { GDTC_INIT(wb),
 794					       .thresh = thresh };
 795	return __wb_calc_thresh(&gdtc);
 796}
 797
 798/*
 799 *                           setpoint - dirty 3
 800 *        f(dirty) := 1.0 + (----------------)
 801 *                           limit - setpoint
 802 *
 803 * it's a 3rd order polynomial that subjects to
 804 *
 805 * (1) f(freerun)  = 2.0 => rampup dirty_ratelimit reasonably fast
 806 * (2) f(setpoint) = 1.0 => the balance point
 807 * (3) f(limit)    = 0   => the hard limit
 808 * (4) df/dx      <= 0	 => negative feedback control
 809 * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
 810 *     => fast response on large errors; small oscillation near setpoint
 811 */
 812static long long pos_ratio_polynom(unsigned long setpoint,
 813					  unsigned long dirty,
 814					  unsigned long limit)
 815{
 816	long long pos_ratio;
 817	long x;
 818
 819	x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
 820		      (limit - setpoint) | 1);
 821	pos_ratio = x;
 822	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
 823	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
 824	pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
 825
 826	return clamp(pos_ratio, 0LL, 2LL << RATELIMIT_CALC_SHIFT);
 827}
 828
 829/*
 830 * Dirty position control.
 831 *
 832 * (o) global/bdi setpoints
 833 *
 834 * We want the dirty pages be balanced around the global/wb setpoints.
 835 * When the number of dirty pages is higher/lower than the setpoint, the
 836 * dirty position control ratio (and hence task dirty ratelimit) will be
 837 * decreased/increased to bring the dirty pages back to the setpoint.
 838 *
 839 *     pos_ratio = 1 << RATELIMIT_CALC_SHIFT
 840 *
 841 *     if (dirty < setpoint) scale up   pos_ratio
 842 *     if (dirty > setpoint) scale down pos_ratio
 843 *
 844 *     if (wb_dirty < wb_setpoint) scale up   pos_ratio
 845 *     if (wb_dirty > wb_setpoint) scale down pos_ratio
 846 *
 847 *     task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT
 848 *
 849 * (o) global control line
 850 *
 851 *     ^ pos_ratio
 852 *     |
 853 *     |            |<===== global dirty control scope ======>|
 854 * 2.0 .............*
 855 *     |            .*
 856 *     |            . *
 857 *     |            .   *
 858 *     |            .     *
 859 *     |            .        *
 860 *     |            .            *
 861 * 1.0 ................................*
 862 *     |            .                  .     *
 863 *     |            .                  .          *
 864 *     |            .                  .              *
 865 *     |            .                  .                 *
 866 *     |            .                  .                    *
 867 *   0 +------------.------------------.----------------------*------------->
 868 *           freerun^          setpoint^                 limit^   dirty pages
 869 *
 870 * (o) wb control line
 871 *
 872 *     ^ pos_ratio
 873 *     |
 874 *     |            *
 875 *     |              *
 876 *     |                *
 877 *     |                  *
 878 *     |                    * |<=========== span ============>|
 879 * 1.0 .......................*
 880 *     |                      . *
 881 *     |                      .   *
 882 *     |                      .     *
 883 *     |                      .       *
 884 *     |                      .         *
 885 *     |                      .           *
 886 *     |                      .             *
 887 *     |                      .               *
 888 *     |                      .                 *
 889 *     |                      .                   *
 890 *     |                      .                     *
 891 * 1/4 ...............................................* * * * * * * * * * * *
 892 *     |                      .                         .
 893 *     |                      .                           .
 894 *     |                      .                             .
 895 *   0 +----------------------.-------------------------------.------------->
 896 *                wb_setpoint^                    x_intercept^
 897 *
 898 * The wb control line won't drop below pos_ratio=1/4, so that wb_dirty can
 899 * be smoothly throttled down to normal if it starts high in situations like
 900 * - start writing to a slow SD card and a fast disk at the same time. The SD
 901 *   card's wb_dirty may rush to many times higher than wb_setpoint.
 902 * - the wb dirty thresh drops quickly due to change of JBOD workload
 903 */
 904static void wb_position_ratio(struct dirty_throttle_control *dtc)
 905{
 906	struct bdi_writeback *wb = dtc->wb;
 907	unsigned long write_bw = wb->avg_write_bandwidth;
 908	unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
 909	unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
 910	unsigned long wb_thresh = dtc->wb_thresh;
 911	unsigned long x_intercept;
 912	unsigned long setpoint;		/* dirty pages' target balance point */
 913	unsigned long wb_setpoint;
 914	unsigned long span;
 915	long long pos_ratio;		/* for scaling up/down the rate limit */
 916	long x;
 917
 918	dtc->pos_ratio = 0;
 919
 920	if (unlikely(dtc->dirty >= limit))
 921		return;
 922
 923	/*
 924	 * global setpoint
 925	 *
 926	 * See comment for pos_ratio_polynom().
 927	 */
 928	setpoint = (freerun + limit) / 2;
 929	pos_ratio = pos_ratio_polynom(setpoint, dtc->dirty, limit);
 930
 931	/*
 932	 * The strictlimit feature is a tool preventing mistrusted filesystems
 933	 * from growing a large number of dirty pages before throttling. For
 934	 * such filesystems balance_dirty_pages always checks wb counters
 935	 * against wb limits. Even if global "nr_dirty" is under "freerun".
 936	 * This is especially important for fuse which sets bdi->max_ratio to
 937	 * 1% by default. Without strictlimit feature, fuse writeback may
 938	 * consume arbitrary amount of RAM because it is accounted in
 939	 * NR_WRITEBACK_TEMP which is not involved in calculating "nr_dirty".
 940	 *
 941	 * Here, in wb_position_ratio(), we calculate pos_ratio based on
 942	 * two values: wb_dirty and wb_thresh. Let's consider an example:
 943	 * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global
 944	 * limits are set by default to 10% and 20% (background and throttle).
 945	 * Then wb_thresh is 1% of 20% of 16GB. This amounts to ~8K pages.
 946	 * wb_calc_thresh(wb, bg_thresh) is about ~4K pages. wb_setpoint is
 947	 * about ~6K pages (as the average of background and throttle wb
 948	 * limits). The 3rd order polynomial will provide positive feedback if
 949	 * wb_dirty is under wb_setpoint and vice versa.
 950	 *
 951	 * Note, that we cannot use global counters in these calculations
 952	 * because we want to throttle process writing to a strictlimit wb
 953	 * much earlier than global "freerun" is reached (~23MB vs. ~2.3GB
 954	 * in the example above).
 955	 */
 956	if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
 957		long long wb_pos_ratio;
 958
 959		if (dtc->wb_dirty < 8) {
 960			dtc->pos_ratio = min_t(long long, pos_ratio * 2,
 961					   2 << RATELIMIT_CALC_SHIFT);
 962			return;
 963		}
 964
 965		if (dtc->wb_dirty >= wb_thresh)
 966			return;
 967
 968		wb_setpoint = dirty_freerun_ceiling(wb_thresh,
 969						    dtc->wb_bg_thresh);
 970
 971		if (wb_setpoint == 0 || wb_setpoint == wb_thresh)
 972			return;
 973
 974		wb_pos_ratio = pos_ratio_polynom(wb_setpoint, dtc->wb_dirty,
 975						 wb_thresh);
 976
 977		/*
 978		 * Typically, for strictlimit case, wb_setpoint << setpoint
 979		 * and pos_ratio >> wb_pos_ratio. In the other words global
 980		 * state ("dirty") is not limiting factor and we have to
 981		 * make decision based on wb counters. But there is an
 982		 * important case when global pos_ratio should get precedence:
 983		 * global limits are exceeded (e.g. due to activities on other
 984		 * wb's) while given strictlimit wb is below limit.
 985		 *
 986		 * "pos_ratio * wb_pos_ratio" would work for the case above,
 987		 * but it would look too non-natural for the case of all
 988		 * activity in the system coming from a single strictlimit wb
 989		 * with bdi->max_ratio == 100%.
 990		 *
 991		 * Note that min() below somewhat changes the dynamics of the
 992		 * control system. Normally, pos_ratio value can be well over 3
 993		 * (when globally we are at freerun and wb is well below wb
 994		 * setpoint). Now the maximum pos_ratio in the same situation
 995		 * is 2. We might want to tweak this if we observe the control
 996		 * system is too slow to adapt.
 997		 */
 998		dtc->pos_ratio = min(pos_ratio, wb_pos_ratio);
 999		return;
1000	}
1001
1002	/*
1003	 * We have computed basic pos_ratio above based on global situation. If
1004	 * the wb is over/under its share of dirty pages, we want to scale
1005	 * pos_ratio further down/up. That is done by the following mechanism.
1006	 */
1007
1008	/*
1009	 * wb setpoint
1010	 *
1011	 *        f(wb_dirty) := 1.0 + k * (wb_dirty - wb_setpoint)
1012	 *
1013	 *                        x_intercept - wb_dirty
1014	 *                     := --------------------------
1015	 *                        x_intercept - wb_setpoint
1016	 *
1017	 * The main wb control line is a linear function that subjects to
1018	 *
1019	 * (1) f(wb_setpoint) = 1.0
1020	 * (2) k = - 1 / (8 * write_bw)  (in single wb case)
1021	 *     or equally: x_intercept = wb_setpoint + 8 * write_bw
1022	 *
1023	 * For single wb case, the dirty pages are observed to fluctuate
1024	 * regularly within range
1025	 *        [wb_setpoint - write_bw/2, wb_setpoint + write_bw/2]
1026	 * for various filesystems, where (2) can yield in a reasonable 12.5%
1027	 * fluctuation range for pos_ratio.
1028	 *
1029	 * For JBOD case, wb_thresh (not wb_dirty!) could fluctuate up to its
1030	 * own size, so move the slope over accordingly and choose a slope that
1031	 * yields 100% pos_ratio fluctuation on suddenly doubled wb_thresh.
1032	 */
1033	if (unlikely(wb_thresh > dtc->thresh))
1034		wb_thresh = dtc->thresh;
1035	/*
1036	 * It's very possible that wb_thresh is close to 0 not because the
1037	 * device is slow, but that it has remained inactive for long time.
1038	 * Honour such devices a reasonable good (hopefully IO efficient)
1039	 * threshold, so that the occasional writes won't be blocked and active
1040	 * writes can rampup the threshold quickly.
1041	 */
1042	wb_thresh = max(wb_thresh, (limit - dtc->dirty) / 8);
1043	/*
1044	 * scale global setpoint to wb's:
1045	 *	wb_setpoint = setpoint * wb_thresh / thresh
1046	 */
1047	x = div_u64((u64)wb_thresh << 16, dtc->thresh | 1);
1048	wb_setpoint = setpoint * (u64)x >> 16;
1049	/*
1050	 * Use span=(8*write_bw) in single wb case as indicated by
1051	 * (thresh - wb_thresh ~= 0) and transit to wb_thresh in JBOD case.
1052	 *
1053	 *        wb_thresh                    thresh - wb_thresh
1054	 * span = --------- * (8 * write_bw) + ------------------ * wb_thresh
1055	 *         thresh                           thresh
1056	 */
1057	span = (dtc->thresh - wb_thresh + 8 * write_bw) * (u64)x >> 16;
1058	x_intercept = wb_setpoint + span;
1059
1060	if (dtc->wb_dirty < x_intercept - span / 4) {
1061		pos_ratio = div64_u64(pos_ratio * (x_intercept - dtc->wb_dirty),
1062				      (x_intercept - wb_setpoint) | 1);
1063	} else
1064		pos_ratio /= 4;
1065
1066	/*
1067	 * wb reserve area, safeguard against dirty pool underrun and disk idle
1068	 * It may push the desired control point of global dirty pages higher
1069	 * than setpoint.
1070	 */
1071	x_intercept = wb_thresh / 2;
1072	if (dtc->wb_dirty < x_intercept) {
1073		if (dtc->wb_dirty > x_intercept / 8)
1074			pos_ratio = div_u64(pos_ratio * x_intercept,
1075					    dtc->wb_dirty);
1076		else
1077			pos_ratio *= 8;
1078	}
1079
1080	dtc->pos_ratio = pos_ratio;
1081}
1082
1083static void wb_update_write_bandwidth(struct bdi_writeback *wb,
1084				      unsigned long elapsed,
1085				      unsigned long written)
1086{
1087	const unsigned long period = roundup_pow_of_two(3 * HZ);
1088	unsigned long avg = wb->avg_write_bandwidth;
1089	unsigned long old = wb->write_bandwidth;
1090	u64 bw;
1091
1092	/*
1093	 * bw = written * HZ / elapsed
1094	 *
1095	 *                   bw * elapsed + write_bandwidth * (period - elapsed)
1096	 * write_bandwidth = ---------------------------------------------------
1097	 *                                          period
1098	 *
1099	 * @written may have decreased due to account_page_redirty().
1100	 * Avoid underflowing @bw calculation.
1101	 */
1102	bw = written - min(written, wb->written_stamp);
1103	bw *= HZ;
1104	if (unlikely(elapsed > period)) {
1105		do_div(bw, elapsed);
1106		avg = bw;
1107		goto out;
1108	}
1109	bw += (u64)wb->write_bandwidth * (period - elapsed);
1110	bw >>= ilog2(period);
1111
1112	/*
1113	 * one more level of smoothing, for filtering out sudden spikes
1114	 */
1115	if (avg > old && old >= (unsigned long)bw)
1116		avg -= (avg - old) >> 3;
1117
1118	if (avg < old && old <= (unsigned long)bw)
1119		avg += (old - avg) >> 3;
1120
1121out:
1122	/* keep avg > 0 to guarantee that tot > 0 if there are dirty wbs */
1123	avg = max(avg, 1LU);
1124	if (wb_has_dirty_io(wb)) {
1125		long delta = avg - wb->avg_write_bandwidth;
1126		WARN_ON_ONCE(atomic_long_add_return(delta,
1127					&wb->bdi->tot_write_bandwidth) <= 0);
1128	}
1129	wb->write_bandwidth = bw;
1130	wb->avg_write_bandwidth = avg;
1131}
1132
1133static void update_dirty_limit(struct dirty_throttle_control *dtc)
1134{
1135	struct wb_domain *dom = dtc_dom(dtc);
1136	unsigned long thresh = dtc->thresh;
1137	unsigned long limit = dom->dirty_limit;
1138
1139	/*
1140	 * Follow up in one step.
1141	 */
1142	if (limit < thresh) {
1143		limit = thresh;
1144		goto update;
1145	}
1146
1147	/*
1148	 * Follow down slowly. Use the higher one as the target, because thresh
1149	 * may drop below dirty. This is exactly the reason to introduce
1150	 * dom->dirty_limit which is guaranteed to lie above the dirty pages.
1151	 */
1152	thresh = max(thresh, dtc->dirty);
1153	if (limit > thresh) {
1154		limit -= (limit - thresh) >> 5;
1155		goto update;
1156	}
1157	return;
1158update:
1159	dom->dirty_limit = limit;
1160}
1161
1162static void domain_update_bandwidth(struct dirty_throttle_control *dtc,
1163				    unsigned long now)
1164{
1165	struct wb_domain *dom = dtc_dom(dtc);
1166
1167	/*
1168	 * check locklessly first to optimize away locking for the most time
1169	 */
1170	if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL))
1171		return;
1172
1173	spin_lock(&dom->lock);
1174	if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) {
1175		update_dirty_limit(dtc);
1176		dom->dirty_limit_tstamp = now;
1177	}
1178	spin_unlock(&dom->lock);
1179}
1180
1181/*
1182 * Maintain wb->dirty_ratelimit, the base dirty throttle rate.
1183 *
1184 * Normal wb tasks will be curbed at or below it in long term.
1185 * Obviously it should be around (write_bw / N) when there are N dd tasks.
1186 */
1187static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc,
1188				      unsigned long dirtied,
1189				      unsigned long elapsed)
1190{
1191	struct bdi_writeback *wb = dtc->wb;
1192	unsigned long dirty = dtc->dirty;
1193	unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
1194	unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
1195	unsigned long setpoint = (freerun + limit) / 2;
1196	unsigned long write_bw = wb->avg_write_bandwidth;
1197	unsigned long dirty_ratelimit = wb->dirty_ratelimit;
1198	unsigned long dirty_rate;
1199	unsigned long task_ratelimit;
1200	unsigned long balanced_dirty_ratelimit;
1201	unsigned long step;
1202	unsigned long x;
1203	unsigned long shift;
1204
1205	/*
1206	 * The dirty rate will match the writeout rate in long term, except
1207	 * when dirty pages are truncated by userspace or re-dirtied by FS.
1208	 */
1209	dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed;
1210
1211	/*
1212	 * task_ratelimit reflects each dd's dirty rate for the past 200ms.
1213	 */
1214	task_ratelimit = (u64)dirty_ratelimit *
1215					dtc->pos_ratio >> RATELIMIT_CALC_SHIFT;
1216	task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */
1217
1218	/*
1219	 * A linear estimation of the "balanced" throttle rate. The theory is,
1220	 * if there are N dd tasks, each throttled at task_ratelimit, the wb's
1221	 * dirty_rate will be measured to be (N * task_ratelimit). So the below
1222	 * formula will yield the balanced rate limit (write_bw / N).
1223	 *
1224	 * Note that the expanded form is not a pure rate feedback:
1225	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate)		     (1)
1226	 * but also takes pos_ratio into account:
1227	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio  (2)
1228	 *
1229	 * (1) is not realistic because pos_ratio also takes part in balancing
1230	 * the dirty rate.  Consider the state
1231	 *	pos_ratio = 0.5						     (3)
1232	 *	rate = 2 * (write_bw / N)				     (4)
1233	 * If (1) is used, it will stuck in that state! Because each dd will
1234	 * be throttled at
1235	 *	task_ratelimit = pos_ratio * rate = (write_bw / N)	     (5)
1236	 * yielding
1237	 *	dirty_rate = N * task_ratelimit = write_bw		     (6)
1238	 * put (6) into (1) we get
1239	 *	rate_(i+1) = rate_(i)					     (7)
1240	 *
1241	 * So we end up using (2) to always keep
1242	 *	rate_(i+1) ~= (write_bw / N)				     (8)
1243	 * regardless of the value of pos_ratio. As long as (8) is satisfied,
1244	 * pos_ratio is able to drive itself to 1.0, which is not only where
1245	 * the dirty count meet the setpoint, but also where the slope of
1246	 * pos_ratio is most flat and hence task_ratelimit is least fluctuated.
1247	 */
1248	balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
1249					   dirty_rate | 1);
1250	/*
1251	 * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw
1252	 */
1253	if (unlikely(balanced_dirty_ratelimit > write_bw))
1254		balanced_dirty_ratelimit = write_bw;
1255
1256	/*
1257	 * We could safely do this and return immediately:
1258	 *
1259	 *	wb->dirty_ratelimit = balanced_dirty_ratelimit;
1260	 *
1261	 * However to get a more stable dirty_ratelimit, the below elaborated
1262	 * code makes use of task_ratelimit to filter out singular points and
1263	 * limit the step size.
1264	 *
1265	 * The below code essentially only uses the relative value of
1266	 *
1267	 *	task_ratelimit - dirty_ratelimit
1268	 *	= (pos_ratio - 1) * dirty_ratelimit
1269	 *
1270	 * which reflects the direction and size of dirty position error.
1271	 */
1272
1273	/*
1274	 * dirty_ratelimit will follow balanced_dirty_ratelimit iff
1275	 * task_ratelimit is on the same side of dirty_ratelimit, too.
1276	 * For example, when
1277	 * - dirty_ratelimit > balanced_dirty_ratelimit
1278	 * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint)
1279	 * lowering dirty_ratelimit will help meet both the position and rate
1280	 * control targets. Otherwise, don't update dirty_ratelimit if it will
1281	 * only help meet the rate target. After all, what the users ultimately
1282	 * feel and care are stable dirty rate and small position error.
1283	 *
1284	 * |task_ratelimit - dirty_ratelimit| is used to limit the step size
1285	 * and filter out the singular points of balanced_dirty_ratelimit. Which
1286	 * keeps jumping around randomly and can even leap far away at times
1287	 * due to the small 200ms estimation period of dirty_rate (we want to
1288	 * keep that period small to reduce time lags).
1289	 */
1290	step = 0;
1291
1292	/*
1293	 * For strictlimit case, calculations above were based on wb counters
1294	 * and limits (starting from pos_ratio = wb_position_ratio() and up to
1295	 * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate).
1296	 * Hence, to calculate "step" properly, we have to use wb_dirty as
1297	 * "dirty" and wb_setpoint as "setpoint".
1298	 *
1299	 * We rampup dirty_ratelimit forcibly if wb_dirty is low because
1300	 * it's possible that wb_thresh is close to zero due to inactivity
1301	 * of backing device.
1302	 */
1303	if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
1304		dirty = dtc->wb_dirty;
1305		if (dtc->wb_dirty < 8)
1306			setpoint = dtc->wb_dirty + 1;
1307		else
1308			setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2;
1309	}
1310
1311	if (dirty < setpoint) {
1312		x = min3(wb->balanced_dirty_ratelimit,
1313			 balanced_dirty_ratelimit, task_ratelimit);
1314		if (dirty_ratelimit < x)
1315			step = x - dirty_ratelimit;
1316	} else {
1317		x = max3(wb->balanced_dirty_ratelimit,
1318			 balanced_dirty_ratelimit, task_ratelimit);
1319		if (dirty_ratelimit > x)
1320			step = dirty_ratelimit - x;
1321	}
1322
1323	/*
1324	 * Don't pursue 100% rate matching. It's impossible since the balanced
1325	 * rate itself is constantly fluctuating. So decrease the track speed
1326	 * when it gets close to the target. Helps eliminate pointless tremors.
1327	 */
1328	shift = dirty_ratelimit / (2 * step + 1);
1329	if (shift < BITS_PER_LONG)
1330		step = DIV_ROUND_UP(step >> shift, 8);
1331	else
1332		step = 0;
1333
1334	if (dirty_ratelimit < balanced_dirty_ratelimit)
1335		dirty_ratelimit += step;
1336	else
1337		dirty_ratelimit -= step;
1338
1339	wb->dirty_ratelimit = max(dirty_ratelimit, 1UL);
1340	wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
1341
1342	trace_bdi_dirty_ratelimit(wb, dirty_rate, task_ratelimit);
1343}
1344
1345static void __wb_update_bandwidth(struct dirty_throttle_control *gdtc,
1346				  struct dirty_throttle_control *mdtc,
1347				  unsigned long start_time,
1348				  bool update_ratelimit)
1349{
1350	struct bdi_writeback *wb = gdtc->wb;
1351	unsigned long now = jiffies;
1352	unsigned long elapsed = now - wb->bw_time_stamp;
1353	unsigned long dirtied;
1354	unsigned long written;
1355
1356	lockdep_assert_held(&wb->list_lock);
1357
1358	/*
1359	 * rate-limit, only update once every 200ms.
 
 
 
1360	 */
1361	if (elapsed < BANDWIDTH_INTERVAL)
1362		return;
1363
1364	dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]);
1365	written = percpu_counter_read(&wb->stat[WB_WRITTEN]);
1366
1367	/*
1368	 * Skip quiet periods when disk bandwidth is under-utilized.
1369	 * (at least 1s idle time between two flusher runs)
1370	 */
1371	if (elapsed > HZ && time_before(wb->bw_time_stamp, start_time))
1372		goto snapshot;
1373
1374	if (update_ratelimit) {
1375		domain_update_bandwidth(gdtc, now);
1376		wb_update_dirty_ratelimit(gdtc, dirtied, elapsed);
1377
1378		/*
1379		 * @mdtc is always NULL if !CGROUP_WRITEBACK but the
1380		 * compiler has no way to figure that out.  Help it.
1381		 */
1382		if (IS_ENABLED(CONFIG_CGROUP_WRITEBACK) && mdtc) {
1383			domain_update_bandwidth(mdtc, now);
1384			wb_update_dirty_ratelimit(mdtc, dirtied, elapsed);
1385		}
1386	}
1387	wb_update_write_bandwidth(wb, elapsed, written);
1388
1389snapshot:
1390	wb->dirtied_stamp = dirtied;
1391	wb->written_stamp = written;
1392	wb->bw_time_stamp = now;
 
1393}
1394
1395void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time)
1396{
1397	struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };
1398
1399	__wb_update_bandwidth(&gdtc, NULL, start_time, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1400}
1401
1402/*
1403 * After a task dirtied this many pages, balance_dirty_pages_ratelimited()
1404 * will look to see if it needs to start dirty throttling.
1405 *
1406 * If dirty_poll_interval is too low, big NUMA machines will call the expensive
1407 * global_zone_page_state() too often. So scale it near-sqrt to the safety margin
1408 * (the number of pages we may dirty without exceeding the dirty limits).
1409 */
1410static unsigned long dirty_poll_interval(unsigned long dirty,
1411					 unsigned long thresh)
1412{
1413	if (thresh > dirty)
1414		return 1UL << (ilog2(thresh - dirty) >> 1);
1415
1416	return 1;
1417}
1418
1419static unsigned long wb_max_pause(struct bdi_writeback *wb,
1420				  unsigned long wb_dirty)
1421{
1422	unsigned long bw = wb->avg_write_bandwidth;
1423	unsigned long t;
1424
1425	/*
1426	 * Limit pause time for small memory systems. If sleeping for too long
1427	 * time, a small pool of dirty/writeback pages may go empty and disk go
1428	 * idle.
1429	 *
1430	 * 8 serves as the safety ratio.
1431	 */
1432	t = wb_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
1433	t++;
1434
1435	return min_t(unsigned long, t, MAX_PAUSE);
1436}
1437
1438static long wb_min_pause(struct bdi_writeback *wb,
1439			 long max_pause,
1440			 unsigned long task_ratelimit,
1441			 unsigned long dirty_ratelimit,
1442			 int *nr_dirtied_pause)
1443{
1444	long hi = ilog2(wb->avg_write_bandwidth);
1445	long lo = ilog2(wb->dirty_ratelimit);
1446	long t;		/* target pause */
1447	long pause;	/* estimated next pause */
1448	int pages;	/* target nr_dirtied_pause */
1449
1450	/* target for 10ms pause on 1-dd case */
1451	t = max(1, HZ / 100);
1452
1453	/*
1454	 * Scale up pause time for concurrent dirtiers in order to reduce CPU
1455	 * overheads.
1456	 *
1457	 * (N * 10ms) on 2^N concurrent tasks.
1458	 */
1459	if (hi > lo)
1460		t += (hi - lo) * (10 * HZ) / 1024;
1461
1462	/*
1463	 * This is a bit convoluted. We try to base the next nr_dirtied_pause
1464	 * on the much more stable dirty_ratelimit. However the next pause time
1465	 * will be computed based on task_ratelimit and the two rate limits may
1466	 * depart considerably at some time. Especially if task_ratelimit goes
1467	 * below dirty_ratelimit/2 and the target pause is max_pause, the next
1468	 * pause time will be max_pause*2 _trimmed down_ to max_pause.  As a
1469	 * result task_ratelimit won't be executed faithfully, which could
1470	 * eventually bring down dirty_ratelimit.
1471	 *
1472	 * We apply two rules to fix it up:
1473	 * 1) try to estimate the next pause time and if necessary, use a lower
1474	 *    nr_dirtied_pause so as not to exceed max_pause. When this happens,
1475	 *    nr_dirtied_pause will be "dancing" with task_ratelimit.
1476	 * 2) limit the target pause time to max_pause/2, so that the normal
1477	 *    small fluctuations of task_ratelimit won't trigger rule (1) and
1478	 *    nr_dirtied_pause will remain as stable as dirty_ratelimit.
1479	 */
1480	t = min(t, 1 + max_pause / 2);
1481	pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1482
1483	/*
1484	 * Tiny nr_dirtied_pause is found to hurt I/O performance in the test
1485	 * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}.
1486	 * When the 16 consecutive reads are often interrupted by some dirty
1487	 * throttling pause during the async writes, cfq will go into idles
1488	 * (deadline is fine). So push nr_dirtied_pause as high as possible
1489	 * until reaches DIRTY_POLL_THRESH=32 pages.
1490	 */
1491	if (pages < DIRTY_POLL_THRESH) {
1492		t = max_pause;
1493		pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1494		if (pages > DIRTY_POLL_THRESH) {
1495			pages = DIRTY_POLL_THRESH;
1496			t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit;
1497		}
1498	}
1499
1500	pause = HZ * pages / (task_ratelimit + 1);
1501	if (pause > max_pause) {
1502		t = max_pause;
1503		pages = task_ratelimit * t / roundup_pow_of_two(HZ);
1504	}
1505
1506	*nr_dirtied_pause = pages;
1507	/*
1508	 * The minimal pause time will normally be half the target pause time.
1509	 */
1510	return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
1511}
1512
1513static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
1514{
1515	struct bdi_writeback *wb = dtc->wb;
1516	unsigned long wb_reclaimable;
1517
1518	/*
1519	 * wb_thresh is not treated as some limiting factor as
1520	 * dirty_thresh, due to reasons
1521	 * - in JBOD setup, wb_thresh can fluctuate a lot
1522	 * - in a system with HDD and USB key, the USB key may somehow
1523	 *   go into state (wb_dirty >> wb_thresh) either because
1524	 *   wb_dirty starts high, or because wb_thresh drops low.
1525	 *   In this case we don't want to hard throttle the USB key
1526	 *   dirtiers for 100 seconds until wb_dirty drops under
1527	 *   wb_thresh. Instead the auxiliary wb control line in
1528	 *   wb_position_ratio() will let the dirtier task progress
1529	 *   at some rate <= (write_bw / 2) for bringing down wb_dirty.
1530	 */
1531	dtc->wb_thresh = __wb_calc_thresh(dtc);
1532	dtc->wb_bg_thresh = dtc->thresh ?
1533		div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
1534
1535	/*
1536	 * In order to avoid the stacked BDI deadlock we need
1537	 * to ensure we accurately count the 'dirty' pages when
1538	 * the threshold is low.
1539	 *
1540	 * Otherwise it would be possible to get thresh+n pages
1541	 * reported dirty, even though there are thresh-m pages
1542	 * actually dirty; with m+n sitting in the percpu
1543	 * deltas.
1544	 */
1545	if (dtc->wb_thresh < 2 * wb_stat_error()) {
1546		wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
1547		dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK);
1548	} else {
1549		wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE);
1550		dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK);
1551	}
1552}
1553
1554/*
1555 * balance_dirty_pages() must be called by processes which are generating dirty
1556 * data.  It looks at the number of dirty pages in the machine and will force
1557 * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2.
1558 * If we're over `background_thresh' then the writeback threads are woken to
1559 * perform some writeout.
1560 */
1561static void balance_dirty_pages(struct bdi_writeback *wb,
1562				unsigned long pages_dirtied)
1563{
1564	struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
1565	struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
1566	struct dirty_throttle_control * const gdtc = &gdtc_stor;
1567	struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
1568						     &mdtc_stor : NULL;
1569	struct dirty_throttle_control *sdtc;
1570	unsigned long nr_reclaimable;	/* = file_dirty + unstable_nfs */
1571	long period;
1572	long pause;
1573	long max_pause;
1574	long min_pause;
1575	int nr_dirtied_pause;
1576	bool dirty_exceeded = false;
1577	unsigned long task_ratelimit;
1578	unsigned long dirty_ratelimit;
1579	struct backing_dev_info *bdi = wb->bdi;
1580	bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT;
1581	unsigned long start_time = jiffies;
 
1582
1583	for (;;) {
1584		unsigned long now = jiffies;
1585		unsigned long dirty, thresh, bg_thresh;
1586		unsigned long m_dirty = 0;	/* stop bogus uninit warnings */
1587		unsigned long m_thresh = 0;
1588		unsigned long m_bg_thresh = 0;
1589
1590		/*
1591		 * Unstable writes are a feature of certain networked
1592		 * filesystems (i.e. NFS) in which data may have been
1593		 * written to the server's write cache, but has not yet
1594		 * been flushed to permanent storage.
1595		 */
1596		nr_reclaimable = global_node_page_state(NR_FILE_DIRTY) +
1597					global_node_page_state(NR_UNSTABLE_NFS);
1598		gdtc->avail = global_dirtyable_memory();
1599		gdtc->dirty = nr_reclaimable + global_node_page_state(NR_WRITEBACK);
1600
1601		domain_dirty_limits(gdtc);
1602
1603		if (unlikely(strictlimit)) {
1604			wb_dirty_limits(gdtc);
1605
1606			dirty = gdtc->wb_dirty;
1607			thresh = gdtc->wb_thresh;
1608			bg_thresh = gdtc->wb_bg_thresh;
1609		} else {
1610			dirty = gdtc->dirty;
1611			thresh = gdtc->thresh;
1612			bg_thresh = gdtc->bg_thresh;
1613		}
1614
1615		if (mdtc) {
1616			unsigned long filepages, headroom, writeback;
1617
1618			/*
1619			 * If @wb belongs to !root memcg, repeat the same
1620			 * basic calculations for the memcg domain.
1621			 */
1622			mem_cgroup_wb_stats(wb, &filepages, &headroom,
1623					    &mdtc->dirty, &writeback);
1624			mdtc->dirty += writeback;
1625			mdtc_calc_avail(mdtc, filepages, headroom);
1626
1627			domain_dirty_limits(mdtc);
1628
1629			if (unlikely(strictlimit)) {
1630				wb_dirty_limits(mdtc);
1631				m_dirty = mdtc->wb_dirty;
1632				m_thresh = mdtc->wb_thresh;
1633				m_bg_thresh = mdtc->wb_bg_thresh;
1634			} else {
1635				m_dirty = mdtc->dirty;
1636				m_thresh = mdtc->thresh;
1637				m_bg_thresh = mdtc->bg_thresh;
1638			}
1639		}
1640
1641		/*
 
 
 
 
 
 
 
 
 
 
 
 
 
1642		 * Throttle it only when the background writeback cannot
1643		 * catch-up. This avoids (excessively) small writeouts
1644		 * when the wb limits are ramping up in case of !strictlimit.
1645		 *
1646		 * In strictlimit case make decision based on the wb counters
1647		 * and limits. Small writeouts when the wb limits are ramping
1648		 * up are the price we consciously pay for strictlimit-ing.
1649		 *
1650		 * If memcg domain is in effect, @dirty should be under
1651		 * both global and memcg freerun ceilings.
1652		 */
1653		if (dirty <= dirty_freerun_ceiling(thresh, bg_thresh) &&
1654		    (!mdtc ||
1655		     m_dirty <= dirty_freerun_ceiling(m_thresh, m_bg_thresh))) {
1656			unsigned long intv = dirty_poll_interval(dirty, thresh);
1657			unsigned long m_intv = ULONG_MAX;
 
 
 
 
1658
1659			current->dirty_paused_when = now;
1660			current->nr_dirtied = 0;
1661			if (mdtc)
1662				m_intv = dirty_poll_interval(m_dirty, m_thresh);
1663			current->nr_dirtied_pause = min(intv, m_intv);
1664			break;
1665		}
1666
 
1667		if (unlikely(!writeback_in_progress(wb)))
1668			wb_start_background_writeback(wb);
1669
 
 
1670		/*
1671		 * Calculate global domain's pos_ratio and select the
1672		 * global dtc by default.
1673		 */
1674		if (!strictlimit)
1675			wb_dirty_limits(gdtc);
1676
 
 
 
 
 
 
 
 
 
 
 
1677		dirty_exceeded = (gdtc->wb_dirty > gdtc->wb_thresh) &&
1678			((gdtc->dirty > gdtc->thresh) || strictlimit);
1679
1680		wb_position_ratio(gdtc);
1681		sdtc = gdtc;
1682
1683		if (mdtc) {
1684			/*
1685			 * If memcg domain is in effect, calculate its
1686			 * pos_ratio.  @wb should satisfy constraints from
1687			 * both global and memcg domains.  Choose the one
1688			 * w/ lower pos_ratio.
1689			 */
1690			if (!strictlimit)
1691				wb_dirty_limits(mdtc);
1692
 
 
 
 
 
 
 
 
 
 
 
1693			dirty_exceeded |= (mdtc->wb_dirty > mdtc->wb_thresh) &&
1694				((mdtc->dirty > mdtc->thresh) || strictlimit);
1695
1696			wb_position_ratio(mdtc);
1697			if (mdtc->pos_ratio < gdtc->pos_ratio)
1698				sdtc = mdtc;
1699		}
1700
1701		if (dirty_exceeded && !wb->dirty_exceeded)
1702			wb->dirty_exceeded = 1;
1703
1704		if (time_is_before_jiffies(wb->bw_time_stamp +
1705					   BANDWIDTH_INTERVAL)) {
1706			spin_lock(&wb->list_lock);
1707			__wb_update_bandwidth(gdtc, mdtc, start_time, true);
1708			spin_unlock(&wb->list_lock);
1709		}
1710
1711		/* throttle according to the chosen dtc */
1712		dirty_ratelimit = wb->dirty_ratelimit;
1713		task_ratelimit = ((u64)dirty_ratelimit * sdtc->pos_ratio) >>
1714							RATELIMIT_CALC_SHIFT;
1715		max_pause = wb_max_pause(wb, sdtc->wb_dirty);
1716		min_pause = wb_min_pause(wb, max_pause,
1717					 task_ratelimit, dirty_ratelimit,
1718					 &nr_dirtied_pause);
1719
1720		if (unlikely(task_ratelimit == 0)) {
1721			period = max_pause;
1722			pause = max_pause;
1723			goto pause;
1724		}
1725		period = HZ * pages_dirtied / task_ratelimit;
1726		pause = period;
1727		if (current->dirty_paused_when)
1728			pause -= now - current->dirty_paused_when;
1729		/*
1730		 * For less than 1s think time (ext3/4 may block the dirtier
1731		 * for up to 800ms from time to time on 1-HDD; so does xfs,
1732		 * however at much less frequency), try to compensate it in
1733		 * future periods by updating the virtual time; otherwise just
1734		 * do a reset, as it may be a light dirtier.
1735		 */
1736		if (pause < min_pause) {
1737			trace_balance_dirty_pages(wb,
1738						  sdtc->thresh,
1739						  sdtc->bg_thresh,
1740						  sdtc->dirty,
1741						  sdtc->wb_thresh,
1742						  sdtc->wb_dirty,
1743						  dirty_ratelimit,
1744						  task_ratelimit,
1745						  pages_dirtied,
1746						  period,
1747						  min(pause, 0L),
1748						  start_time);
1749			if (pause < -HZ) {
1750				current->dirty_paused_when = now;
1751				current->nr_dirtied = 0;
1752			} else if (period) {
1753				current->dirty_paused_when += period;
1754				current->nr_dirtied = 0;
1755			} else if (current->nr_dirtied_pause <= pages_dirtied)
1756				current->nr_dirtied_pause += pages_dirtied;
1757			break;
1758		}
1759		if (unlikely(pause > max_pause)) {
1760			/* for occasional dropped task_ratelimit */
1761			now += min(pause - max_pause, max_pause);
1762			pause = max_pause;
1763		}
1764
1765pause:
1766		trace_balance_dirty_pages(wb,
1767					  sdtc->thresh,
1768					  sdtc->bg_thresh,
1769					  sdtc->dirty,
1770					  sdtc->wb_thresh,
1771					  sdtc->wb_dirty,
1772					  dirty_ratelimit,
1773					  task_ratelimit,
1774					  pages_dirtied,
1775					  period,
1776					  pause,
1777					  start_time);
 
 
 
 
1778		__set_current_state(TASK_KILLABLE);
1779		wb->dirty_sleep = now;
1780		io_schedule_timeout(pause);
1781
1782		current->dirty_paused_when = now + pause;
1783		current->nr_dirtied = 0;
1784		current->nr_dirtied_pause = nr_dirtied_pause;
1785
1786		/*
1787		 * This is typically equal to (dirty < thresh) and can also
1788		 * keep "1000+ dd on a slow USB stick" under control.
1789		 */
1790		if (task_ratelimit)
1791			break;
1792
1793		/*
1794		 * In the case of an unresponding NFS server and the NFS dirty
1795		 * pages exceeds dirty_thresh, give the other good wb's a pipe
1796		 * to go through, so that tasks on them still remain responsive.
1797		 *
1798		 * In theory 1 page is enough to keep the consumer-producer
1799		 * pipe going: the flusher cleans 1 page => the task dirties 1
1800		 * more page. However wb_dirty has accounting errors.  So use
1801		 * the larger and more IO friendly wb_stat_error.
1802		 */
1803		if (sdtc->wb_dirty <= wb_stat_error())
1804			break;
1805
1806		if (fatal_signal_pending(current))
1807			break;
1808	}
1809
1810	if (!dirty_exceeded && wb->dirty_exceeded)
1811		wb->dirty_exceeded = 0;
1812
1813	if (writeback_in_progress(wb))
1814		return;
1815
1816	/*
1817	 * In laptop mode, we wait until hitting the higher threshold before
1818	 * starting background writeout, and then write out all the way down
1819	 * to the lower threshold.  So slow writers cause minimal disk activity.
1820	 *
1821	 * In normal mode, we start background writeout at the lower
1822	 * background_thresh, to keep the amount of dirty memory low.
1823	 */
1824	if (laptop_mode)
1825		return;
1826
1827	if (nr_reclaimable > gdtc->bg_thresh)
1828		wb_start_background_writeback(wb);
1829}
1830
1831static DEFINE_PER_CPU(int, bdp_ratelimits);
1832
1833/*
1834 * Normal tasks are throttled by
1835 *	loop {
1836 *		dirty tsk->nr_dirtied_pause pages;
1837 *		take a snap in balance_dirty_pages();
1838 *	}
1839 * However there is a worst case. If every task exit immediately when dirtied
1840 * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
1841 * called to throttle the page dirties. The solution is to save the not yet
1842 * throttled page dirties in dirty_throttle_leaks on task exit and charge them
1843 * randomly into the running tasks. This works well for the above worst case,
1844 * as the new task will pick up and accumulate the old task's leaked dirty
1845 * count and eventually get throttled.
1846 */
1847DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
1848
1849/**
1850 * balance_dirty_pages_ratelimited - balance dirty memory state
1851 * @mapping: address_space which was dirtied
 
1852 *
1853 * Processes which are dirtying memory should call in here once for each page
1854 * which was newly dirtied.  The function will periodically check the system's
1855 * dirty state and will initiate writeback if needed.
1856 *
1857 * On really big machines, get_writeback_state is expensive, so try to avoid
1858 * calling it too often (ratelimiting).  But once we're over the dirty memory
1859 * limit we decrease the ratelimiting by a lot, to prevent individual processes
1860 * from overshooting the limit by (ratelimit_pages) each.
 
 
 
1861 */
1862void balance_dirty_pages_ratelimited(struct address_space *mapping)
 
1863{
1864	struct inode *inode = mapping->host;
1865	struct backing_dev_info *bdi = inode_to_bdi(inode);
1866	struct bdi_writeback *wb = NULL;
1867	int ratelimit;
 
1868	int *p;
1869
1870	if (!bdi_cap_account_dirty(bdi))
1871		return;
1872
1873	if (inode_cgwb_enabled(inode))
1874		wb = wb_get_create_current(bdi, GFP_KERNEL);
1875	if (!wb)
1876		wb = &bdi->wb;
1877
1878	ratelimit = current->nr_dirtied_pause;
1879	if (wb->dirty_exceeded)
1880		ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));
1881
1882	preempt_disable();
1883	/*
1884	 * This prevents one CPU to accumulate too many dirtied pages without
1885	 * calling into balance_dirty_pages(), which can happen when there are
1886	 * 1000+ tasks, all of them start dirtying pages at exactly the same
1887	 * time, hence all honoured too large initial task->nr_dirtied_pause.
1888	 */
1889	p =  this_cpu_ptr(&bdp_ratelimits);
1890	if (unlikely(current->nr_dirtied >= ratelimit))
1891		*p = 0;
1892	else if (unlikely(*p >= ratelimit_pages)) {
1893		*p = 0;
1894		ratelimit = 0;
1895	}
1896	/*
1897	 * Pick up the dirtied pages by the exited tasks. This avoids lots of
1898	 * short-lived tasks (eg. gcc invocations in a kernel build) escaping
1899	 * the dirty throttling and livelock other long-run dirtiers.
1900	 */
1901	p = this_cpu_ptr(&dirty_throttle_leaks);
1902	if (*p > 0 && current->nr_dirtied < ratelimit) {
1903		unsigned long nr_pages_dirtied;
1904		nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
1905		*p -= nr_pages_dirtied;
1906		current->nr_dirtied += nr_pages_dirtied;
1907	}
1908	preempt_enable();
1909
1910	if (unlikely(current->nr_dirtied >= ratelimit))
1911		balance_dirty_pages(wb, current->nr_dirtied);
1912
1913	wb_put(wb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1914}
1915EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
1916
1917/**
1918 * wb_over_bg_thresh - does @wb need to be written back?
1919 * @wb: bdi_writeback of interest
1920 *
1921 * Determines whether background writeback should keep writing @wb or it's
1922 * clean enough.  Returns %true if writeback should continue.
 
 
1923 */
1924bool wb_over_bg_thresh(struct bdi_writeback *wb)
1925{
1926	struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
1927	struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
1928	struct dirty_throttle_control * const gdtc = &gdtc_stor;
1929	struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
1930						     &mdtc_stor : NULL;
 
 
1931
1932	/*
1933	 * Similar to balance_dirty_pages() but ignores pages being written
1934	 * as we're trying to decide whether to put more under writeback.
1935	 */
1936	gdtc->avail = global_dirtyable_memory();
1937	gdtc->dirty = global_node_page_state(NR_FILE_DIRTY) +
1938		      global_node_page_state(NR_UNSTABLE_NFS);
1939	domain_dirty_limits(gdtc);
1940
1941	if (gdtc->dirty > gdtc->bg_thresh)
1942		return true;
1943
1944	if (wb_stat(wb, WB_RECLAIMABLE) >
1945	    wb_calc_thresh(gdtc->wb, gdtc->bg_thresh))
 
 
 
 
 
1946		return true;
1947
1948	if (mdtc) {
1949		unsigned long filepages, headroom, writeback;
1950
1951		mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty,
1952				    &writeback);
1953		mdtc_calc_avail(mdtc, filepages, headroom);
1954		domain_dirty_limits(mdtc);	/* ditto, ignore writeback */
1955
1956		if (mdtc->dirty > mdtc->bg_thresh)
1957			return true;
1958
1959		if (wb_stat(wb, WB_RECLAIMABLE) >
1960		    wb_calc_thresh(mdtc->wb, mdtc->bg_thresh))
 
 
 
 
 
1961			return true;
1962	}
1963
1964	return false;
1965}
1966
 
1967/*
1968 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
1969 */
1970int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
1971	void __user *buffer, size_t *length, loff_t *ppos)
1972{
1973	unsigned int old_interval = dirty_writeback_interval;
1974	int ret;
1975
1976	ret = proc_dointvec(table, write, buffer, length, ppos);
1977
1978	/*
1979	 * Writing 0 to dirty_writeback_interval will disable periodic writeback
1980	 * and a different non-zero value will wakeup the writeback threads.
1981	 * wb_wakeup_delayed() would be more appropriate, but it's a pain to
1982	 * iterate over all bdis and wbs.
1983	 * The reason we do this is to make the change take effect immediately.
1984	 */
1985	if (!ret && write && dirty_writeback_interval &&
1986		dirty_writeback_interval != old_interval)
1987		wakeup_flusher_threads(WB_REASON_PERIODIC);
1988
1989	return ret;
1990}
 
1991
1992#ifdef CONFIG_BLOCK
1993void laptop_mode_timer_fn(struct timer_list *t)
1994{
1995	struct backing_dev_info *backing_dev_info =
1996		from_timer(backing_dev_info, t, laptop_mode_wb_timer);
1997
1998	wakeup_flusher_threads_bdi(backing_dev_info, WB_REASON_LAPTOP_TIMER);
1999}
2000
2001/*
2002 * We've spun up the disk and we're in laptop mode: schedule writeback
2003 * of all dirty data a few seconds from now.  If the flush is already scheduled
2004 * then push it back - the user is still using the disk.
2005 */
2006void laptop_io_completion(struct backing_dev_info *info)
2007{
2008	mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
2009}
2010
2011/*
2012 * We're in laptop mode and we've just synced. The sync's writes will have
2013 * caused another writeback to be scheduled by laptop_io_completion.
2014 * Nothing needs to be written back anymore, so we unschedule the writeback.
2015 */
2016void laptop_sync_completion(void)
2017{
2018	struct backing_dev_info *bdi;
2019
2020	rcu_read_lock();
2021
2022	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
2023		del_timer(&bdi->laptop_mode_wb_timer);
2024
2025	rcu_read_unlock();
2026}
2027#endif
2028
2029/*
2030 * If ratelimit_pages is too high then we can get into dirty-data overload
2031 * if a large number of processes all perform writes at the same time.
2032 * If it is too low then SMP machines will call the (expensive)
2033 * get_writeback_state too often.
2034 *
2035 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
2036 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
2037 * thresholds.
2038 */
2039
2040void writeback_set_ratelimit(void)
2041{
2042	struct wb_domain *dom = &global_wb_domain;
2043	unsigned long background_thresh;
2044	unsigned long dirty_thresh;
2045
2046	global_dirty_limits(&background_thresh, &dirty_thresh);
2047	dom->dirty_limit = dirty_thresh;
2048	ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
2049	if (ratelimit_pages < 16)
2050		ratelimit_pages = 16;
2051}
2052
2053static int page_writeback_cpu_online(unsigned int cpu)
2054{
2055	writeback_set_ratelimit();
2056	return 0;
2057}
2058
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2059/*
2060 * Called early on to tune the page writeback dirty limits.
2061 *
2062 * We used to scale dirty pages according to how total memory
2063 * related to pages that could be allocated for buffers (by
2064 * comparing nr_free_buffer_pages() to vm_total_pages.
2065 *
2066 * However, that was when we used "dirty_ratio" to scale with
2067 * all memory, and we don't do that any more. "dirty_ratio"
2068 * is now applied to total non-HIGHPAGE memory (by subtracting
2069 * totalhigh_pages from vm_total_pages), and as such we can't
2070 * get into the old insane situation any more where we had
2071 * large amounts of dirty pages compared to a small amount of
2072 * non-HIGHMEM memory.
2073 *
2074 * But we might still want to scale the dirty_ratio by how
2075 * much memory the box has..
2076 */
2077void __init page_writeback_init(void)
2078{
2079	BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
2080
2081	cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/writeback:online",
2082			  page_writeback_cpu_online, NULL);
2083	cpuhp_setup_state(CPUHP_MM_WRITEBACK_DEAD, "mm/writeback:dead", NULL,
2084			  page_writeback_cpu_online);
 
 
 
2085}
2086
2087/**
2088 * tag_pages_for_writeback - tag pages to be written by write_cache_pages
2089 * @mapping: address space structure to write
2090 * @start: starting page index
2091 * @end: ending page index (inclusive)
2092 *
2093 * This function scans the page range from @start to @end (inclusive) and tags
2094 * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is
2095 * that write_cache_pages (or whoever calls this function) will then use
2096 * TOWRITE tag to identify pages eligible for writeback.  This mechanism is
2097 * used to avoid livelocking of writeback by a process steadily creating new
2098 * dirty pages in the file (thus it is important for this function to be quick
2099 * so that it can tag pages faster than a dirtying process can create them).
2100 */
2101/*
2102 * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce the i_pages lock
2103 * latency.
2104 */
2105void tag_pages_for_writeback(struct address_space *mapping,
2106			     pgoff_t start, pgoff_t end)
2107{
2108#define WRITEBACK_TAG_BATCH 4096
2109	unsigned long tagged = 0;
2110	struct radix_tree_iter iter;
2111	void **slot;
2112
2113	xa_lock_irq(&mapping->i_pages);
2114	radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, start,
2115							PAGECACHE_TAG_DIRTY) {
2116		if (iter.index > end)
2117			break;
2118		radix_tree_iter_tag_set(&mapping->i_pages, &iter,
2119							PAGECACHE_TAG_TOWRITE);
2120		tagged++;
2121		if ((tagged % WRITEBACK_TAG_BATCH) != 0)
2122			continue;
2123		slot = radix_tree_iter_resume(slot, &iter);
2124		xa_unlock_irq(&mapping->i_pages);
 
2125		cond_resched();
2126		xa_lock_irq(&mapping->i_pages);
2127	}
2128	xa_unlock_irq(&mapping->i_pages);
2129}
2130EXPORT_SYMBOL(tag_pages_for_writeback);
2131
2132/**
2133 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2134 * @mapping: address space structure to write
2135 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2136 * @writepage: function called for each page
2137 * @data: data passed to writepage function
2138 *
2139 * If a page is already under I/O, write_cache_pages() skips it, even
2140 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
2141 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
2142 * and msync() need to guarantee that all the data which was dirty at the time
2143 * the call was made get new I/O started against them.  If wbc->sync_mode is
2144 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2145 * existing IO to complete.
2146 *
2147 * To avoid livelocks (when other process dirties new pages), we first tag
2148 * pages which should be written back with TOWRITE tag and only then start
2149 * writing them. For data-integrity sync we have to be careful so that we do
2150 * not miss some pages (e.g., because some other process has cleared TOWRITE
2151 * tag we set). The rule we follow is that TOWRITE tag can be cleared only
2152 * by the process clearing the DIRTY tag (and submitting the page for IO).
2153 */
2154int write_cache_pages(struct address_space *mapping,
2155		      struct writeback_control *wbc, writepage_t writepage,
2156		      void *data)
2157{
2158	int ret = 0;
2159	int done = 0;
2160	struct pagevec pvec;
2161	int nr_pages;
2162	pgoff_t uninitialized_var(writeback_index);
2163	pgoff_t index;
2164	pgoff_t end;		/* Inclusive */
2165	pgoff_t done_index;
2166	int cycled;
2167	int range_whole = 0;
2168	int tag;
2169
2170	pagevec_init(&pvec);
2171	if (wbc->range_cyclic) {
2172		writeback_index = mapping->writeback_index; /* prev offset */
2173		index = writeback_index;
2174		if (index == 0)
2175			cycled = 1;
2176		else
2177			cycled = 0;
2178		end = -1;
2179	} else {
2180		index = wbc->range_start >> PAGE_SHIFT;
2181		end = wbc->range_end >> PAGE_SHIFT;
2182		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2183			range_whole = 1;
2184		cycled = 1; /* ignore range_cyclic tests */
2185	}
2186	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2187		tag = PAGECACHE_TAG_TOWRITE;
2188	else
2189		tag = PAGECACHE_TAG_DIRTY;
2190retry:
2191	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2192		tag_pages_for_writeback(mapping, index, end);
2193	done_index = index;
2194	while (!done && (index <= end)) {
2195		int i;
2196
2197		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
2198				tag);
2199		if (nr_pages == 0)
2200			break;
2201
2202		for (i = 0; i < nr_pages; i++) {
2203			struct page *page = pvec.pages[i];
 
 
 
2204
2205			done_index = page->index;
 
 
 
 
 
2206
2207			lock_page(page);
 
2208
2209			/*
2210			 * Page truncated or invalidated. We can freely skip it
2211			 * then, even for data integrity operations: the page
2212			 * has disappeared concurrently, so there could be no
2213			 * real expectation of this data interity operation
2214			 * even if there is now a new, dirty page at the same
2215			 * pagecache address.
2216			 */
2217			if (unlikely(page->mapping != mapping)) {
2218continue_unlock:
2219				unlock_page(page);
2220				continue;
2221			}
2222
2223			if (!PageDirty(page)) {
2224				/* someone wrote it for us */
2225				goto continue_unlock;
2226			}
 
 
2227
2228			if (PageWriteback(page)) {
2229				if (wbc->sync_mode != WB_SYNC_NONE)
2230					wait_on_page_writeback(page);
2231				else
2232					goto continue_unlock;
2233			}
2234
2235			BUG_ON(PageWriteback(page));
2236			if (!clear_page_dirty_for_io(page))
2237				goto continue_unlock;
2238
2239			trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
2240			ret = (*writepage)(page, wbc, data);
2241			if (unlikely(ret)) {
2242				if (ret == AOP_WRITEPAGE_ACTIVATE) {
2243					unlock_page(page);
2244					ret = 0;
2245				} else {
2246					/*
2247					 * done_index is set past this page,
2248					 * so media errors will not choke
2249					 * background writeout for the entire
2250					 * file. This has consequences for
2251					 * range_cyclic semantics (ie. it may
2252					 * not be suitable for data integrity
2253					 * writeout).
2254					 */
2255					done_index = page->index + 1;
2256					done = 1;
2257					break;
2258				}
2259			}
2260
2261			/*
2262			 * We stop writing back only if we are not doing
2263			 * integrity sync. In case of integrity sync we have to
2264			 * keep going until we have written all the pages
2265			 * we tagged for writeback prior to entering this loop.
2266			 */
2267			if (--wbc->nr_to_write <= 0 &&
2268			    wbc->sync_mode == WB_SYNC_NONE) {
2269				done = 1;
2270				break;
2271			}
2272		}
2273		pagevec_release(&pvec);
2274		cond_resched();
 
 
 
 
 
2275	}
2276	if (!cycled && !done) {
2277		/*
2278		 * range_cyclic:
2279		 * We hit the last page and there is more work to be done: wrap
2280		 * back to the start of the file
2281		 */
2282		cycled = 1;
2283		index = 0;
2284		end = writeback_index - 1;
2285		goto retry;
2286	}
2287	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2288		mapping->writeback_index = done_index;
2289
2290	return ret;
 
2291}
2292EXPORT_SYMBOL(write_cache_pages);
2293
2294/*
2295 * Function used by generic_writepages to call the real writepage
2296 * function and set the mapping flags on error
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2297 */
2298static int __writepage(struct page *page, struct writeback_control *wbc,
2299		       void *data)
2300{
2301	struct address_space *mapping = data;
2302	int ret = mapping->a_ops->writepage(page, wbc);
2303	mapping_set_error(mapping, ret);
2304	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2305}
2306
2307/**
2308 * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
2309 * @mapping: address space structure to write
2310 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
 
 
 
 
2311 *
2312 * This is a library function, which implements the writepages()
2313 * address_space_operation.
2314 */
2315int generic_writepages(struct address_space *mapping,
2316		       struct writeback_control *wbc)
 
2317{
2318	struct blk_plug plug;
2319	int ret;
2320
2321	/* deal with chardevs and other special file */
2322	if (!mapping->a_ops->writepage)
2323		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2324
2325	blk_start_plug(&plug);
2326	ret = write_cache_pages(mapping, wbc, __writepage, mapping);
 
 
 
 
 
 
 
2327	blk_finish_plug(&plug);
2328	return ret;
2329}
2330
2331EXPORT_SYMBOL(generic_writepages);
 
2332
2333int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
2334{
2335	int ret;
 
2336
2337	if (wbc->nr_to_write <= 0)
2338		return 0;
 
 
2339	while (1) {
2340		if (mapping->a_ops->writepages)
2341			ret = mapping->a_ops->writepages(mapping, wbc);
2342		else
2343			ret = generic_writepages(mapping, wbc);
2344		if ((ret != -ENOMEM) || (wbc->sync_mode != WB_SYNC_ALL))
 
 
 
 
2345			break;
2346		cond_resched();
2347		congestion_wait(BLK_RW_ASYNC, HZ/50);
2348	}
2349	return ret;
2350}
2351
2352/**
2353 * write_one_page - write out a single page and wait on I/O
2354 * @page: the page to write
2355 *
2356 * The page must be locked by the caller and will be unlocked upon return.
2357 *
2358 * Note that the mapping's AS_EIO/AS_ENOSPC flags will be cleared when this
2359 * function returns.
2360 */
2361int write_one_page(struct page *page)
2362{
2363	struct address_space *mapping = page->mapping;
2364	int ret = 0;
2365	struct writeback_control wbc = {
2366		.sync_mode = WB_SYNC_ALL,
2367		.nr_to_write = 1,
2368	};
2369
2370	BUG_ON(!PageLocked(page));
2371
2372	wait_on_page_writeback(page);
2373
2374	if (clear_page_dirty_for_io(page)) {
2375		get_page(page);
2376		ret = mapping->a_ops->writepage(page, &wbc);
2377		if (ret == 0)
2378			wait_on_page_writeback(page);
2379		put_page(page);
2380	} else {
2381		unlock_page(page);
2382	}
2383
2384	if (!ret)
2385		ret = filemap_check_errors(mapping);
 
 
 
 
 
2386	return ret;
2387}
2388EXPORT_SYMBOL(write_one_page);
2389
2390/*
2391 * For address_spaces which do not use buffers nor write back.
2392 */
2393int __set_page_dirty_no_writeback(struct page *page)
2394{
2395	if (!PageDirty(page))
2396		return !TestSetPageDirty(page);
2397	return 0;
2398}
 
2399
2400/*
2401 * Helper function for set_page_dirty family.
2402 *
2403 * Caller must hold lock_page_memcg().
2404 *
2405 * NOTE: This relies on being atomic wrt interrupts.
2406 */
2407void account_page_dirtied(struct page *page, struct address_space *mapping)
 
2408{
2409	struct inode *inode = mapping->host;
2410
2411	trace_writeback_dirty_page(page, mapping);
2412
2413	if (mapping_cap_account_dirty(mapping)) {
2414		struct bdi_writeback *wb;
 
2415
2416		inode_attach_wb(inode, page);
2417		wb = inode_to_wb(inode);
2418
2419		__inc_lruvec_page_state(page, NR_FILE_DIRTY);
2420		__inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2421		__inc_node_page_state(page, NR_DIRTIED);
2422		inc_wb_stat(wb, WB_RECLAIMABLE);
2423		inc_wb_stat(wb, WB_DIRTIED);
2424		task_io_account_write(PAGE_SIZE);
2425		current->nr_dirtied++;
2426		this_cpu_inc(bdp_ratelimits);
 
 
2427	}
2428}
2429EXPORT_SYMBOL(account_page_dirtied);
2430
2431/*
2432 * Helper function for deaccounting dirty page without writeback.
2433 *
2434 * Caller must hold lock_page_memcg().
2435 */
2436void account_page_cleaned(struct page *page, struct address_space *mapping,
2437			  struct bdi_writeback *wb)
2438{
2439	if (mapping_cap_account_dirty(mapping)) {
2440		dec_lruvec_page_state(page, NR_FILE_DIRTY);
2441		dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2442		dec_wb_stat(wb, WB_RECLAIMABLE);
2443		task_io_account_cancelled_write(PAGE_SIZE);
2444	}
2445}
2446
2447/*
2448 * For address_spaces which do not use buffers.  Just tag the page as dirty in
2449 * its radix tree.
2450 *
2451 * This is also used when a single buffer is being dirtied: we want to set the
2452 * page dirty in that case, but not all the buffers.  This is a "bottom-up"
2453 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
2454 *
2455 * The caller must ensure this doesn't race with truncation.  Most will simply
2456 * hold the page lock, but e.g. zap_pte_range() calls with the page mapped and
2457 * the pte lock held, which also locks out truncation.
 
 
2458 */
2459int __set_page_dirty_nobuffers(struct page *page)
 
2460{
2461	lock_page_memcg(page);
2462	if (!TestSetPageDirty(page)) {
2463		struct address_space *mapping = page_mapping(page);
2464		unsigned long flags;
2465
2466		if (!mapping) {
2467			unlock_page_memcg(page);
2468			return 1;
2469		}
2470
2471		xa_lock_irqsave(&mapping->i_pages, flags);
2472		BUG_ON(page_mapping(page) != mapping);
2473		WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
2474		account_page_dirtied(page, mapping);
2475		radix_tree_tag_set(&mapping->i_pages, page_index(page),
2476				   PAGECACHE_TAG_DIRTY);
2477		xa_unlock_irqrestore(&mapping->i_pages, flags);
2478		unlock_page_memcg(page);
2479
2480		if (mapping->host) {
2481			/* !PageAnon && !swapper_space */
2482			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
2483		}
2484		return 1;
 
2485	}
2486	unlock_page_memcg(page);
2487	return 0;
2488}
2489EXPORT_SYMBOL(__set_page_dirty_nobuffers);
2490
2491/*
2492 * Call this whenever redirtying a page, to de-account the dirty counters
2493 * (NR_DIRTIED, BDI_DIRTIED, tsk->nr_dirtied), so that they match the written
2494 * counters (NR_WRITTEN, BDI_WRITTEN) in long term. The mismatches will lead to
2495 * systematic errors in balanced_dirty_ratelimit and the dirty pages position
2496 * control.
 
 
 
 
 
 
 
 
 
 
 
 
2497 */
2498void account_page_redirty(struct page *page)
2499{
2500	struct address_space *mapping = page->mapping;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2501
2502	if (mapping && mapping_cap_account_dirty(mapping)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2503		struct inode *inode = mapping->host;
2504		struct bdi_writeback *wb;
2505		struct wb_lock_cookie cookie = {};
2506
2507		wb = unlocked_inode_to_wb_begin(inode, &cookie);
2508		current->nr_dirtied--;
2509		dec_node_page_state(page, NR_DIRTIED);
2510		dec_wb_stat(wb, WB_DIRTIED);
2511		unlocked_inode_to_wb_end(inode, &cookie);
2512	}
2513}
2514EXPORT_SYMBOL(account_page_redirty);
2515
2516/*
2517 * When a writepage implementation decides that it doesn't want to write this
2518 * page for some reason, it should redirty the locked page via
2519 * redirty_page_for_writepage() and it should then unlock the page and return 0
2520 */
2521int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
2522{
2523	int ret;
2524
2525	wbc->pages_skipped++;
2526	ret = __set_page_dirty_nobuffers(page);
2527	account_page_redirty(page);
2528	return ret;
2529}
2530EXPORT_SYMBOL(redirty_page_for_writepage);
2531
2532/*
2533 * Dirty a page.
 
2534 *
2535 * For pages with a mapping this should be done under the page lock
2536 * for the benefit of asynchronous memory errors who prefer a consistent
2537 * dirty state. This rule can be broken in some special cases,
2538 * but should be better not to.
 
 
2539 *
2540 * If the mapping doesn't provide a set_page_dirty a_op, then
2541 * just fall through and assume that it wants buffer_heads.
2542 */
2543int set_page_dirty(struct page *page)
2544{
2545	struct address_space *mapping = page_mapping(page);
2546
2547	page = compound_head(page);
2548	if (likely(mapping)) {
2549		int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
2550		/*
2551		 * readahead/lru_deactivate_page could remain
2552		 * PG_readahead/PG_reclaim due to race with end_page_writeback
2553		 * About readahead, if the page is written, the flags would be
2554		 * reset. So no problem.
2555		 * About lru_deactivate_page, if the page is redirty, the flag
2556		 * will be reset. So no problem. but if the page is used by readahead
2557		 * it will confuse readahead and make it restart the size rampup
2558		 * process. But it's a trivial problem.
2559		 */
2560		if (PageReclaim(page))
2561			ClearPageReclaim(page);
2562#ifdef CONFIG_BLOCK
2563		if (!spd)
2564			spd = __set_page_dirty_buffers;
2565#endif
2566		return (*spd)(page);
2567	}
2568	if (!PageDirty(page)) {
2569		if (!TestSetPageDirty(page))
2570			return 1;
2571	}
2572	return 0;
 
2573}
2574EXPORT_SYMBOL(set_page_dirty);
2575
2576/*
2577 * set_page_dirty() is racy if the caller has no reference against
2578 * page->mapping->host, and if the page is unlocked.  This is because another
2579 * CPU could truncate the page off the mapping and then free the mapping.
2580 *
2581 * Usually, the page _is_ locked, or the caller is a user-space process which
2582 * holds a reference on the inode by having an open file.
2583 *
2584 * In other cases, the page should be locked before running set_page_dirty().
2585 */
2586int set_page_dirty_lock(struct page *page)
2587{
2588	int ret;
2589
2590	lock_page(page);
2591	ret = set_page_dirty(page);
2592	unlock_page(page);
2593	return ret;
2594}
2595EXPORT_SYMBOL(set_page_dirty_lock);
2596
2597/*
2598 * This cancels just the dirty bit on the kernel page itself, it does NOT
2599 * actually remove dirty bits on any mmap's that may be around. It also
2600 * leaves the page tagged dirty, so any sync activity will still find it on
2601 * the dirty lists, and in particular, clear_page_dirty_for_io() will still
2602 * look at the dirty bits in the VM.
2603 *
2604 * Doing this should *normally* only ever be done when a page is truncated,
2605 * and is not actually mapped anywhere at all. However, fs/buffer.c does
2606 * this when it notices that somebody has cleaned out all the buffers on a
2607 * page without actually doing it through the VM. Can you say "ext3 is
2608 * horribly ugly"? Thought you could.
2609 */
2610void __cancel_dirty_page(struct page *page)
2611{
2612	struct address_space *mapping = page_mapping(page);
2613
2614	if (mapping_cap_account_dirty(mapping)) {
2615		struct inode *inode = mapping->host;
2616		struct bdi_writeback *wb;
2617		struct wb_lock_cookie cookie = {};
2618
2619		lock_page_memcg(page);
2620		wb = unlocked_inode_to_wb_begin(inode, &cookie);
2621
2622		if (TestClearPageDirty(page))
2623			account_page_cleaned(page, mapping, wb);
2624
2625		unlocked_inode_to_wb_end(inode, &cookie);
2626		unlock_page_memcg(page);
2627	} else {
2628		ClearPageDirty(page);
2629	}
2630}
2631EXPORT_SYMBOL(__cancel_dirty_page);
2632
2633/*
2634 * Clear a page's dirty flag, while caring for dirty memory accounting.
2635 * Returns true if the page was previously dirty.
2636 *
2637 * This is for preparing to put the page under writeout.  We leave the page
2638 * tagged as dirty in the radix tree so that a concurrent write-for-sync
2639 * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage
2640 * implementation will run either set_page_writeback() or set_page_dirty(),
2641 * at which stage we bring the page's dirty flag and radix-tree dirty tag
2642 * back into sync.
2643 *
2644 * This incoherency between the page's dirty flag and radix-tree tag is
2645 * unfortunate, but it only exists while the page is locked.
2646 */
2647int clear_page_dirty_for_io(struct page *page)
2648{
2649	struct address_space *mapping = page_mapping(page);
2650	int ret = 0;
2651
2652	BUG_ON(!PageLocked(page));
2653
2654	if (mapping && mapping_cap_account_dirty(mapping)) {
2655		struct inode *inode = mapping->host;
2656		struct bdi_writeback *wb;
2657		struct wb_lock_cookie cookie = {};
2658
2659		/*
2660		 * Yes, Virginia, this is indeed insane.
2661		 *
2662		 * We use this sequence to make sure that
2663		 *  (a) we account for dirty stats properly
2664		 *  (b) we tell the low-level filesystem to
2665		 *      mark the whole page dirty if it was
2666		 *      dirty in a pagetable. Only to then
2667		 *  (c) clean the page again and return 1 to
2668		 *      cause the writeback.
2669		 *
2670		 * This way we avoid all nasty races with the
2671		 * dirty bit in multiple places and clearing
2672		 * them concurrently from different threads.
2673		 *
2674		 * Note! Normally the "set_page_dirty(page)"
2675		 * has no effect on the actual dirty bit - since
2676		 * that will already usually be set. But we
2677		 * need the side effects, and it can help us
2678		 * avoid races.
2679		 *
2680		 * We basically use the page "master dirty bit"
2681		 * as a serialization point for all the different
2682		 * threads doing their things.
2683		 */
2684		if (page_mkclean(page))
2685			set_page_dirty(page);
2686		/*
2687		 * We carefully synchronise fault handlers against
2688		 * installing a dirty pte and marking the page dirty
2689		 * at this point.  We do this by having them hold the
2690		 * page lock while dirtying the page, and pages are
2691		 * always locked coming in here, so we get the desired
2692		 * exclusion.
2693		 */
2694		wb = unlocked_inode_to_wb_begin(inode, &cookie);
2695		if (TestClearPageDirty(page)) {
2696			dec_lruvec_page_state(page, NR_FILE_DIRTY);
2697			dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2698			dec_wb_stat(wb, WB_RECLAIMABLE);
2699			ret = 1;
 
2700		}
2701		unlocked_inode_to_wb_end(inode, &cookie);
2702		return ret;
2703	}
2704	return TestClearPageDirty(page);
2705}
2706EXPORT_SYMBOL(clear_page_dirty_for_io);
2707
2708int test_clear_page_writeback(struct page *page)
2709{
2710	struct address_space *mapping = page_mapping(page);
2711	struct mem_cgroup *memcg;
2712	struct lruvec *lruvec;
2713	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2714
2715	memcg = lock_page_memcg(page);
2716	lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
2717	if (mapping && mapping_use_writeback_tags(mapping)) {
2718		struct inode *inode = mapping->host;
2719		struct backing_dev_info *bdi = inode_to_bdi(inode);
2720		unsigned long flags;
2721
2722		xa_lock_irqsave(&mapping->i_pages, flags);
2723		ret = TestClearPageWriteback(page);
2724		if (ret) {
2725			radix_tree_tag_clear(&mapping->i_pages, page_index(page),
2726						PAGECACHE_TAG_WRITEBACK);
2727			if (bdi_cap_account_writeback(bdi)) {
2728				struct bdi_writeback *wb = inode_to_wb(inode);
2729
2730				dec_wb_stat(wb, WB_WRITEBACK);
2731				__wb_writeout_inc(wb);
2732			}
2733		}
2734
2735		if (mapping->host && !mapping_tagged(mapping,
2736						     PAGECACHE_TAG_WRITEBACK))
2737			sb_clear_inode_writeback(mapping->host);
2738
2739		xa_unlock_irqrestore(&mapping->i_pages, flags);
2740	} else {
2741		ret = TestClearPageWriteback(page);
2742	}
2743	/*
2744	 * NOTE: Page might be free now! Writeback doesn't hold a page
2745	 * reference on its own, it relies on truncation to wait for
2746	 * the clearing of PG_writeback. The below can only access
2747	 * page state that is static across allocation cycles.
2748	 */
2749	if (ret) {
2750		dec_lruvec_state(lruvec, NR_WRITEBACK);
2751		dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2752		inc_node_page_state(page, NR_WRITTEN);
2753	}
2754	__unlock_page_memcg(memcg);
2755	return ret;
2756}
2757
2758int __test_set_page_writeback(struct page *page, bool keep_write)
2759{
2760	struct address_space *mapping = page_mapping(page);
2761	int ret;
 
 
 
2762
2763	lock_page_memcg(page);
2764	if (mapping && mapping_use_writeback_tags(mapping)) {
 
2765		struct inode *inode = mapping->host;
2766		struct backing_dev_info *bdi = inode_to_bdi(inode);
2767		unsigned long flags;
 
2768
2769		xa_lock_irqsave(&mapping->i_pages, flags);
2770		ret = TestSetPageWriteback(page);
2771		if (!ret) {
2772			bool on_wblist;
2773
2774			on_wblist = mapping_tagged(mapping,
2775						   PAGECACHE_TAG_WRITEBACK);
2776
2777			radix_tree_tag_set(&mapping->i_pages, page_index(page),
2778						PAGECACHE_TAG_WRITEBACK);
2779			if (bdi_cap_account_writeback(bdi))
2780				inc_wb_stat(inode_to_wb(inode), WB_WRITEBACK);
2781
2782			/*
2783			 * We can come through here when swapping anonymous
2784			 * pages, so we don't necessarily have an inode to track
2785			 * for sync.
2786			 */
2787			if (mapping->host && !on_wblist)
2788				sb_mark_inode_writeback(mapping->host);
 
 
2789		}
2790		if (!PageDirty(page))
2791			radix_tree_tag_clear(&mapping->i_pages, page_index(page),
2792						PAGECACHE_TAG_DIRTY);
 
 
 
 
 
 
 
2793		if (!keep_write)
2794			radix_tree_tag_clear(&mapping->i_pages, page_index(page),
2795						PAGECACHE_TAG_TOWRITE);
2796		xa_unlock_irqrestore(&mapping->i_pages, flags);
2797	} else {
2798		ret = TestSetPageWriteback(page);
2799	}
2800	if (!ret) {
2801		inc_lruvec_page_state(page, NR_WRITEBACK);
2802		inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2803	}
2804	unlock_page_memcg(page);
2805	return ret;
2806
 
 
 
 
 
 
 
 
 
 
2807}
2808EXPORT_SYMBOL(__test_set_page_writeback);
2809
2810/*
2811 * Return true if any of the pages in the mapping are marked with the
2812 * passed tag.
 
 
 
 
 
 
 
 
2813 */
2814int mapping_tagged(struct address_space *mapping, int tag)
2815{
2816	return radix_tree_tagged(&mapping->i_pages, tag);
 
 
 
2817}
2818EXPORT_SYMBOL(mapping_tagged);
2819
2820/**
2821 * wait_for_stable_page() - wait for writeback to finish, if necessary.
2822 * @page:	The page to wait on.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2823 *
2824 * This function determines if the given page is related to a backing device
2825 * that requires page contents to be held stable during writeback.  If so, then
2826 * it will wait for any pending writeback to complete.
 
2827 */
2828void wait_for_stable_page(struct page *page)
2829{
2830	if (bdi_cap_stable_pages_required(inode_to_bdi(page->mapping->host)))
2831		wait_on_page_writeback(page);
2832}
2833EXPORT_SYMBOL_GPL(wait_for_stable_page);
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * mm/page-writeback.c
   4 *
   5 * Copyright (C) 2002, Linus Torvalds.
   6 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
   7 *
   8 * Contains functions related to writing back dirty pages at the
   9 * address_space level.
  10 *
  11 * 10Apr2002	Andrew Morton
  12 *		Initial version
  13 */
  14
  15#include <linux/kernel.h>
  16#include <linux/math64.h>
  17#include <linux/export.h>
  18#include <linux/spinlock.h>
  19#include <linux/fs.h>
  20#include <linux/mm.h>
  21#include <linux/swap.h>
  22#include <linux/slab.h>
  23#include <linux/pagemap.h>
  24#include <linux/writeback.h>
  25#include <linux/init.h>
  26#include <linux/backing-dev.h>
  27#include <linux/task_io_accounting_ops.h>
  28#include <linux/blkdev.h>
  29#include <linux/mpage.h>
  30#include <linux/rmap.h>
  31#include <linux/percpu.h>
 
  32#include <linux/smp.h>
  33#include <linux/sysctl.h>
  34#include <linux/cpu.h>
  35#include <linux/syscalls.h>
 
  36#include <linux/pagevec.h>
  37#include <linux/timer.h>
  38#include <linux/sched/rt.h>
  39#include <linux/sched/signal.h>
  40#include <linux/mm_inline.h>
  41#include <trace/events/writeback.h>
  42
  43#include "internal.h"
  44
  45/*
  46 * Sleep at most 200ms at a time in balance_dirty_pages().
  47 */
  48#define MAX_PAUSE		max(HZ/5, 1)
  49
  50/*
  51 * Try to keep balance_dirty_pages() call intervals higher than this many pages
  52 * by raising pause time to max_pause when falls below it.
  53 */
  54#define DIRTY_POLL_THRESH	(128 >> (PAGE_SHIFT - 10))
  55
  56/*
  57 * Estimate write bandwidth at 200ms intervals.
  58 */
  59#define BANDWIDTH_INTERVAL	max(HZ/5, 1)
  60
  61#define RATELIMIT_CALC_SHIFT	10
  62
  63/*
  64 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
  65 * will look to see if it needs to force writeback or throttling.
  66 */
  67static long ratelimit_pages = 32;
  68
  69/* The following parameters are exported via /proc/sys/vm */
  70
  71/*
  72 * Start background writeback (via writeback threads) at this percentage
  73 */
  74static int dirty_background_ratio = 10;
  75
  76/*
  77 * dirty_background_bytes starts at 0 (disabled) so that it is a function of
  78 * dirty_background_ratio * the amount of dirtyable memory
  79 */
  80static unsigned long dirty_background_bytes;
  81
  82/*
  83 * free highmem will not be subtracted from the total free memory
  84 * for calculating free ratios if vm_highmem_is_dirtyable is true
  85 */
  86static int vm_highmem_is_dirtyable;
  87
  88/*
  89 * The generator of dirty data starts writeback at this percentage
  90 */
  91static int vm_dirty_ratio = 20;
  92
  93/*
  94 * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
  95 * vm_dirty_ratio * the amount of dirtyable memory
  96 */
  97static unsigned long vm_dirty_bytes;
  98
  99/*
 100 * The interval between `kupdate'-style writebacks
 101 */
 102unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
 103
 104EXPORT_SYMBOL_GPL(dirty_writeback_interval);
 105
 106/*
 107 * The longest time for which data is allowed to remain dirty
 108 */
 109unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
 110
 111/*
 
 
 
 
 
 112 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
 113 * a full sync is triggered after this time elapses without any disk activity.
 114 */
 115int laptop_mode;
 116
 117EXPORT_SYMBOL(laptop_mode);
 118
 119/* End of sysctl-exported parameters */
 120
 121struct wb_domain global_wb_domain;
 122
 123/* consolidated parameters for balance_dirty_pages() and its subroutines */
 124struct dirty_throttle_control {
 125#ifdef CONFIG_CGROUP_WRITEBACK
 126	struct wb_domain	*dom;
 127	struct dirty_throttle_control *gdtc;	/* only set in memcg dtc's */
 128#endif
 129	struct bdi_writeback	*wb;
 130	struct fprop_local_percpu *wb_completions;
 131
 132	unsigned long		avail;		/* dirtyable */
 133	unsigned long		dirty;		/* file_dirty + write + nfs */
 134	unsigned long		thresh;		/* dirty threshold */
 135	unsigned long		bg_thresh;	/* dirty background threshold */
 136
 137	unsigned long		wb_dirty;	/* per-wb counterparts */
 138	unsigned long		wb_thresh;
 139	unsigned long		wb_bg_thresh;
 140
 141	unsigned long		pos_ratio;
 142};
 143
 144/*
 145 * Length of period for aging writeout fractions of bdis. This is an
 146 * arbitrarily chosen number. The longer the period, the slower fractions will
 147 * reflect changes in current writeout rate.
 148 */
 149#define VM_COMPLETIONS_PERIOD_LEN (3*HZ)
 150
 151#ifdef CONFIG_CGROUP_WRITEBACK
 152
 153#define GDTC_INIT(__wb)		.wb = (__wb),				\
 154				.dom = &global_wb_domain,		\
 155				.wb_completions = &(__wb)->completions
 156
 157#define GDTC_INIT_NO_WB		.dom = &global_wb_domain
 158
 159#define MDTC_INIT(__wb, __gdtc)	.wb = (__wb),				\
 160				.dom = mem_cgroup_wb_domain(__wb),	\
 161				.wb_completions = &(__wb)->memcg_completions, \
 162				.gdtc = __gdtc
 163
 164static bool mdtc_valid(struct dirty_throttle_control *dtc)
 165{
 166	return dtc->dom;
 167}
 168
 169static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
 170{
 171	return dtc->dom;
 172}
 173
 174static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
 175{
 176	return mdtc->gdtc;
 177}
 178
 179static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
 180{
 181	return &wb->memcg_completions;
 182}
 183
 184static void wb_min_max_ratio(struct bdi_writeback *wb,
 185			     unsigned long *minp, unsigned long *maxp)
 186{
 187	unsigned long this_bw = READ_ONCE(wb->avg_write_bandwidth);
 188	unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
 189	unsigned long long min = wb->bdi->min_ratio;
 190	unsigned long long max = wb->bdi->max_ratio;
 191
 192	/*
 193	 * @wb may already be clean by the time control reaches here and
 194	 * the total may not include its bw.
 195	 */
 196	if (this_bw < tot_bw) {
 197		if (min) {
 198			min *= this_bw;
 199			min = div64_ul(min, tot_bw);
 200		}
 201		if (max < 100 * BDI_RATIO_SCALE) {
 202			max *= this_bw;
 203			max = div64_ul(max, tot_bw);
 204		}
 205	}
 206
 207	*minp = min;
 208	*maxp = max;
 209}
 210
 211#else	/* CONFIG_CGROUP_WRITEBACK */
 212
 213#define GDTC_INIT(__wb)		.wb = (__wb),                           \
 214				.wb_completions = &(__wb)->completions
 215#define GDTC_INIT_NO_WB
 216#define MDTC_INIT(__wb, __gdtc)
 217
 218static bool mdtc_valid(struct dirty_throttle_control *dtc)
 219{
 220	return false;
 221}
 222
 223static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
 224{
 225	return &global_wb_domain;
 226}
 227
 228static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
 229{
 230	return NULL;
 231}
 232
 233static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
 234{
 235	return NULL;
 236}
 237
 238static void wb_min_max_ratio(struct bdi_writeback *wb,
 239			     unsigned long *minp, unsigned long *maxp)
 240{
 241	*minp = wb->bdi->min_ratio;
 242	*maxp = wb->bdi->max_ratio;
 243}
 244
 245#endif	/* CONFIG_CGROUP_WRITEBACK */
 246
 247/*
 248 * In a memory zone, there is a certain amount of pages we consider
 249 * available for the page cache, which is essentially the number of
 250 * free and reclaimable pages, minus some zone reserves to protect
 251 * lowmem and the ability to uphold the zone's watermarks without
 252 * requiring writeback.
 253 *
 254 * This number of dirtyable pages is the base value of which the
 255 * user-configurable dirty ratio is the effective number of pages that
 256 * are allowed to be actually dirtied.  Per individual zone, or
 257 * globally by using the sum of dirtyable pages over all zones.
 258 *
 259 * Because the user is allowed to specify the dirty limit globally as
 260 * absolute number of bytes, calculating the per-zone dirty limit can
 261 * require translating the configured limit into a percentage of
 262 * global dirtyable memory first.
 263 */
 264
 265/**
 266 * node_dirtyable_memory - number of dirtyable pages in a node
 267 * @pgdat: the node
 268 *
 269 * Return: the node's number of pages potentially available for dirty
 270 * page cache.  This is the base value for the per-node dirty limits.
 271 */
 272static unsigned long node_dirtyable_memory(struct pglist_data *pgdat)
 273{
 274	unsigned long nr_pages = 0;
 275	int z;
 276
 277	for (z = 0; z < MAX_NR_ZONES; z++) {
 278		struct zone *zone = pgdat->node_zones + z;
 279
 280		if (!populated_zone(zone))
 281			continue;
 282
 283		nr_pages += zone_page_state(zone, NR_FREE_PAGES);
 284	}
 285
 286	/*
 287	 * Pages reserved for the kernel should not be considered
 288	 * dirtyable, to prevent a situation where reclaim has to
 289	 * clean pages in order to balance the zones.
 290	 */
 291	nr_pages -= min(nr_pages, pgdat->totalreserve_pages);
 292
 293	nr_pages += node_page_state(pgdat, NR_INACTIVE_FILE);
 294	nr_pages += node_page_state(pgdat, NR_ACTIVE_FILE);
 295
 296	return nr_pages;
 297}
 298
 299static unsigned long highmem_dirtyable_memory(unsigned long total)
 300{
 301#ifdef CONFIG_HIGHMEM
 302	int node;
 303	unsigned long x = 0;
 304	int i;
 305
 306	for_each_node_state(node, N_HIGH_MEMORY) {
 307		for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) {
 308			struct zone *z;
 309			unsigned long nr_pages;
 310
 311			if (!is_highmem_idx(i))
 312				continue;
 313
 314			z = &NODE_DATA(node)->node_zones[i];
 315			if (!populated_zone(z))
 316				continue;
 317
 318			nr_pages = zone_page_state(z, NR_FREE_PAGES);
 319			/* watch for underflows */
 320			nr_pages -= min(nr_pages, high_wmark_pages(z));
 321			nr_pages += zone_page_state(z, NR_ZONE_INACTIVE_FILE);
 322			nr_pages += zone_page_state(z, NR_ZONE_ACTIVE_FILE);
 323			x += nr_pages;
 324		}
 325	}
 326
 327	/*
 
 
 
 
 
 
 
 
 
 
 
 
 328	 * Make sure that the number of highmem pages is never larger
 329	 * than the number of the total dirtyable memory. This can only
 330	 * occur in very strange VM situations but we want to make sure
 331	 * that this does not occur.
 332	 */
 333	return min(x, total);
 334#else
 335	return 0;
 336#endif
 337}
 338
 339/**
 340 * global_dirtyable_memory - number of globally dirtyable pages
 341 *
 342 * Return: the global number of pages potentially available for dirty
 343 * page cache.  This is the base value for the global dirty limits.
 344 */
 345static unsigned long global_dirtyable_memory(void)
 346{
 347	unsigned long x;
 348
 349	x = global_zone_page_state(NR_FREE_PAGES);
 350	/*
 351	 * Pages reserved for the kernel should not be considered
 352	 * dirtyable, to prevent a situation where reclaim has to
 353	 * clean pages in order to balance the zones.
 354	 */
 355	x -= min(x, totalreserve_pages);
 356
 357	x += global_node_page_state(NR_INACTIVE_FILE);
 358	x += global_node_page_state(NR_ACTIVE_FILE);
 359
 360	if (!vm_highmem_is_dirtyable)
 361		x -= highmem_dirtyable_memory(x);
 362
 363	return x + 1;	/* Ensure that we never return 0 */
 364}
 365
 366/**
 367 * domain_dirty_limits - calculate thresh and bg_thresh for a wb_domain
 368 * @dtc: dirty_throttle_control of interest
 369 *
 370 * Calculate @dtc->thresh and ->bg_thresh considering
 371 * vm_dirty_{bytes|ratio} and dirty_background_{bytes|ratio}.  The caller
 372 * must ensure that @dtc->avail is set before calling this function.  The
 373 * dirty limits will be lifted by 1/4 for real-time tasks.
 
 374 */
 375static void domain_dirty_limits(struct dirty_throttle_control *dtc)
 376{
 377	const unsigned long available_memory = dtc->avail;
 378	struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
 379	unsigned long bytes = vm_dirty_bytes;
 380	unsigned long bg_bytes = dirty_background_bytes;
 381	/* convert ratios to per-PAGE_SIZE for higher precision */
 382	unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100;
 383	unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100;
 384	unsigned long thresh;
 385	unsigned long bg_thresh;
 386	struct task_struct *tsk;
 387
 388	/* gdtc is !NULL iff @dtc is for memcg domain */
 389	if (gdtc) {
 390		unsigned long global_avail = gdtc->avail;
 391
 392		/*
 393		 * The byte settings can't be applied directly to memcg
 394		 * domains.  Convert them to ratios by scaling against
 395		 * globally available memory.  As the ratios are in
 396		 * per-PAGE_SIZE, they can be obtained by dividing bytes by
 397		 * number of pages.
 398		 */
 399		if (bytes)
 400			ratio = min(DIV_ROUND_UP(bytes, global_avail),
 401				    PAGE_SIZE);
 402		if (bg_bytes)
 403			bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail),
 404				       PAGE_SIZE);
 405		bytes = bg_bytes = 0;
 406	}
 407
 408	if (bytes)
 409		thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
 410	else
 411		thresh = (ratio * available_memory) / PAGE_SIZE;
 412
 413	if (bg_bytes)
 414		bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
 415	else
 416		bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
 417
 418	if (bg_thresh >= thresh)
 419		bg_thresh = thresh / 2;
 420	tsk = current;
 421	if (rt_task(tsk)) {
 422		bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
 423		thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
 424	}
 425	dtc->thresh = thresh;
 426	dtc->bg_thresh = bg_thresh;
 427
 428	/* we should eventually report the domain in the TP */
 429	if (!gdtc)
 430		trace_global_dirty_state(bg_thresh, thresh);
 431}
 432
 433/**
 434 * global_dirty_limits - background-writeback and dirty-throttling thresholds
 435 * @pbackground: out parameter for bg_thresh
 436 * @pdirty: out parameter for thresh
 437 *
 438 * Calculate bg_thresh and thresh for global_wb_domain.  See
 439 * domain_dirty_limits() for details.
 440 */
 441void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
 442{
 443	struct dirty_throttle_control gdtc = { GDTC_INIT_NO_WB };
 444
 445	gdtc.avail = global_dirtyable_memory();
 446	domain_dirty_limits(&gdtc);
 447
 448	*pbackground = gdtc.bg_thresh;
 449	*pdirty = gdtc.thresh;
 450}
 451
 452/**
 453 * node_dirty_limit - maximum number of dirty pages allowed in a node
 454 * @pgdat: the node
 455 *
 456 * Return: the maximum number of dirty pages allowed in a node, based
 457 * on the node's dirtyable memory.
 458 */
 459static unsigned long node_dirty_limit(struct pglist_data *pgdat)
 460{
 461	unsigned long node_memory = node_dirtyable_memory(pgdat);
 462	struct task_struct *tsk = current;
 463	unsigned long dirty;
 464
 465	if (vm_dirty_bytes)
 466		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) *
 467			node_memory / global_dirtyable_memory();
 468	else
 469		dirty = vm_dirty_ratio * node_memory / 100;
 470
 471	if (rt_task(tsk))
 472		dirty += dirty / 4;
 473
 474	return dirty;
 475}
 476
 477/**
 478 * node_dirty_ok - tells whether a node is within its dirty limits
 479 * @pgdat: the node to check
 480 *
 481 * Return: %true when the dirty pages in @pgdat are within the node's
 482 * dirty limit, %false if the limit is exceeded.
 483 */
 484bool node_dirty_ok(struct pglist_data *pgdat)
 485{
 486	unsigned long limit = node_dirty_limit(pgdat);
 487	unsigned long nr_pages = 0;
 488
 489	nr_pages += node_page_state(pgdat, NR_FILE_DIRTY);
 
 490	nr_pages += node_page_state(pgdat, NR_WRITEBACK);
 491
 492	return nr_pages <= limit;
 493}
 494
 495#ifdef CONFIG_SYSCTL
 496static int dirty_background_ratio_handler(struct ctl_table *table, int write,
 497		void *buffer, size_t *lenp, loff_t *ppos)
 498{
 499	int ret;
 500
 501	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 502	if (ret == 0 && write)
 503		dirty_background_bytes = 0;
 504	return ret;
 505}
 506
 507static int dirty_background_bytes_handler(struct ctl_table *table, int write,
 508		void *buffer, size_t *lenp, loff_t *ppos)
 
 509{
 510	int ret;
 511
 512	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 513	if (ret == 0 && write)
 514		dirty_background_ratio = 0;
 515	return ret;
 516}
 517
 518static int dirty_ratio_handler(struct ctl_table *table, int write, void *buffer,
 519		size_t *lenp, loff_t *ppos)
 
 520{
 521	int old_ratio = vm_dirty_ratio;
 522	int ret;
 523
 524	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 525	if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
 526		writeback_set_ratelimit();
 527		vm_dirty_bytes = 0;
 528	}
 529	return ret;
 530}
 531
 532static int dirty_bytes_handler(struct ctl_table *table, int write,
 533		void *buffer, size_t *lenp, loff_t *ppos)
 
 534{
 535	unsigned long old_bytes = vm_dirty_bytes;
 536	int ret;
 537
 538	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 539	if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
 540		writeback_set_ratelimit();
 541		vm_dirty_ratio = 0;
 542	}
 543	return ret;
 544}
 545#endif
 546
 547static unsigned long wp_next_time(unsigned long cur_time)
 548{
 549	cur_time += VM_COMPLETIONS_PERIOD_LEN;
 550	/* 0 has a special meaning... */
 551	if (!cur_time)
 552		return 1;
 553	return cur_time;
 554}
 555
 556static void wb_domain_writeout_add(struct wb_domain *dom,
 557				   struct fprop_local_percpu *completions,
 558				   unsigned int max_prop_frac, long nr)
 559{
 560	__fprop_add_percpu_max(&dom->completions, completions,
 561			       max_prop_frac, nr);
 562	/* First event after period switching was turned off? */
 563	if (unlikely(!dom->period_time)) {
 564		/*
 565		 * We can race with other __bdi_writeout_inc calls here but
 566		 * it does not cause any harm since the resulting time when
 567		 * timer will fire and what is in writeout_period_time will be
 568		 * roughly the same.
 569		 */
 570		dom->period_time = wp_next_time(jiffies);
 571		mod_timer(&dom->period_timer, dom->period_time);
 572	}
 573}
 574
 575/*
 576 * Increment @wb's writeout completion count and the global writeout
 577 * completion count. Called from __folio_end_writeback().
 578 */
 579static inline void __wb_writeout_add(struct bdi_writeback *wb, long nr)
 580{
 581	struct wb_domain *cgdom;
 582
 583	wb_stat_mod(wb, WB_WRITTEN, nr);
 584	wb_domain_writeout_add(&global_wb_domain, &wb->completions,
 585			       wb->bdi->max_prop_frac, nr);
 586
 587	cgdom = mem_cgroup_wb_domain(wb);
 588	if (cgdom)
 589		wb_domain_writeout_add(cgdom, wb_memcg_completions(wb),
 590				       wb->bdi->max_prop_frac, nr);
 591}
 592
 593void wb_writeout_inc(struct bdi_writeback *wb)
 594{
 595	unsigned long flags;
 596
 597	local_irq_save(flags);
 598	__wb_writeout_add(wb, 1);
 599	local_irq_restore(flags);
 600}
 601EXPORT_SYMBOL_GPL(wb_writeout_inc);
 602
 603/*
 604 * On idle system, we can be called long after we scheduled because we use
 605 * deferred timers so count with missed periods.
 606 */
 607static void writeout_period(struct timer_list *t)
 608{
 609	struct wb_domain *dom = from_timer(dom, t, period_timer);
 610	int miss_periods = (jiffies - dom->period_time) /
 611						 VM_COMPLETIONS_PERIOD_LEN;
 612
 613	if (fprop_new_period(&dom->completions, miss_periods + 1)) {
 614		dom->period_time = wp_next_time(dom->period_time +
 615				miss_periods * VM_COMPLETIONS_PERIOD_LEN);
 616		mod_timer(&dom->period_timer, dom->period_time);
 617	} else {
 618		/*
 619		 * Aging has zeroed all fractions. Stop wasting CPU on period
 620		 * updates.
 621		 */
 622		dom->period_time = 0;
 623	}
 624}
 625
 626int wb_domain_init(struct wb_domain *dom, gfp_t gfp)
 627{
 628	memset(dom, 0, sizeof(*dom));
 629
 630	spin_lock_init(&dom->lock);
 631
 632	timer_setup(&dom->period_timer, writeout_period, TIMER_DEFERRABLE);
 633
 634	dom->dirty_limit_tstamp = jiffies;
 635
 636	return fprop_global_init(&dom->completions, gfp);
 637}
 638
 639#ifdef CONFIG_CGROUP_WRITEBACK
 640void wb_domain_exit(struct wb_domain *dom)
 641{
 642	del_timer_sync(&dom->period_timer);
 643	fprop_global_destroy(&dom->completions);
 644}
 645#endif
 646
 647/*
 648 * bdi_min_ratio keeps the sum of the minimum dirty shares of all
 649 * registered backing devices, which, for obvious reasons, can not
 650 * exceed 100%.
 651 */
 652static unsigned int bdi_min_ratio;
 653
 654static int bdi_check_pages_limit(unsigned long pages)
 655{
 656	unsigned long max_dirty_pages = global_dirtyable_memory();
 657
 658	if (pages > max_dirty_pages)
 659		return -EINVAL;
 660
 661	return 0;
 662}
 663
 664static unsigned long bdi_ratio_from_pages(unsigned long pages)
 665{
 666	unsigned long background_thresh;
 667	unsigned long dirty_thresh;
 668	unsigned long ratio;
 669
 670	global_dirty_limits(&background_thresh, &dirty_thresh);
 671	ratio = div64_u64(pages * 100ULL * BDI_RATIO_SCALE, dirty_thresh);
 672
 673	return ratio;
 674}
 675
 676static u64 bdi_get_bytes(unsigned int ratio)
 677{
 678	unsigned long background_thresh;
 679	unsigned long dirty_thresh;
 680	u64 bytes;
 681
 682	global_dirty_limits(&background_thresh, &dirty_thresh);
 683	bytes = (dirty_thresh * PAGE_SIZE * ratio) / BDI_RATIO_SCALE / 100;
 684
 685	return bytes;
 686}
 687
 688static int __bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
 689{
 690	unsigned int delta;
 691	int ret = 0;
 692
 693	if (min_ratio > 100 * BDI_RATIO_SCALE)
 694		return -EINVAL;
 695
 696	spin_lock_bh(&bdi_lock);
 697	if (min_ratio > bdi->max_ratio) {
 698		ret = -EINVAL;
 699	} else {
 700		if (min_ratio < bdi->min_ratio) {
 701			delta = bdi->min_ratio - min_ratio;
 702			bdi_min_ratio -= delta;
 703			bdi->min_ratio = min_ratio;
 704		} else {
 705			delta = min_ratio - bdi->min_ratio;
 706			if (bdi_min_ratio + delta < 100 * BDI_RATIO_SCALE) {
 707				bdi_min_ratio += delta;
 708				bdi->min_ratio = min_ratio;
 709			} else {
 710				ret = -EINVAL;
 711			}
 712		}
 713	}
 714	spin_unlock_bh(&bdi_lock);
 715
 716	return ret;
 717}
 718
 719static int __bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio)
 720{
 721	int ret = 0;
 722
 723	if (max_ratio > 100 * BDI_RATIO_SCALE)
 724		return -EINVAL;
 725
 726	spin_lock_bh(&bdi_lock);
 727	if (bdi->min_ratio > max_ratio) {
 728		ret = -EINVAL;
 729	} else {
 730		bdi->max_ratio = max_ratio;
 731		bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) /
 732						(100 * BDI_RATIO_SCALE);
 733	}
 734	spin_unlock_bh(&bdi_lock);
 735
 736	return ret;
 737}
 738
 739int bdi_set_min_ratio_no_scale(struct backing_dev_info *bdi, unsigned int min_ratio)
 740{
 741	return __bdi_set_min_ratio(bdi, min_ratio);
 742}
 743
 744int bdi_set_max_ratio_no_scale(struct backing_dev_info *bdi, unsigned int max_ratio)
 745{
 746	return __bdi_set_max_ratio(bdi, max_ratio);
 747}
 748
 749int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
 750{
 751	return __bdi_set_min_ratio(bdi, min_ratio * BDI_RATIO_SCALE);
 752}
 753
 754int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio)
 755{
 756	return __bdi_set_max_ratio(bdi, max_ratio * BDI_RATIO_SCALE);
 757}
 758EXPORT_SYMBOL(bdi_set_max_ratio);
 759
 760u64 bdi_get_min_bytes(struct backing_dev_info *bdi)
 761{
 762	return bdi_get_bytes(bdi->min_ratio);
 763}
 764
 765int bdi_set_min_bytes(struct backing_dev_info *bdi, u64 min_bytes)
 766{
 767	int ret;
 768	unsigned long pages = min_bytes >> PAGE_SHIFT;
 769	unsigned long min_ratio;
 770
 771	ret = bdi_check_pages_limit(pages);
 772	if (ret)
 773		return ret;
 774
 775	min_ratio = bdi_ratio_from_pages(pages);
 776	return __bdi_set_min_ratio(bdi, min_ratio);
 777}
 778
 779u64 bdi_get_max_bytes(struct backing_dev_info *bdi)
 780{
 781	return bdi_get_bytes(bdi->max_ratio);
 782}
 783
 784int bdi_set_max_bytes(struct backing_dev_info *bdi, u64 max_bytes)
 785{
 786	int ret;
 787	unsigned long pages = max_bytes >> PAGE_SHIFT;
 788	unsigned long max_ratio;
 789
 790	ret = bdi_check_pages_limit(pages);
 791	if (ret)
 792		return ret;
 793
 794	max_ratio = bdi_ratio_from_pages(pages);
 795	return __bdi_set_max_ratio(bdi, max_ratio);
 796}
 797
 798int bdi_set_strict_limit(struct backing_dev_info *bdi, unsigned int strict_limit)
 799{
 800	if (strict_limit > 1)
 801		return -EINVAL;
 802
 803	spin_lock_bh(&bdi_lock);
 804	if (strict_limit)
 805		bdi->capabilities |= BDI_CAP_STRICTLIMIT;
 806	else
 807		bdi->capabilities &= ~BDI_CAP_STRICTLIMIT;
 808	spin_unlock_bh(&bdi_lock);
 809
 810	return 0;
 811}
 812
 813static unsigned long dirty_freerun_ceiling(unsigned long thresh,
 814					   unsigned long bg_thresh)
 815{
 816	return (thresh + bg_thresh) / 2;
 817}
 818
 819static unsigned long hard_dirty_limit(struct wb_domain *dom,
 820				      unsigned long thresh)
 821{
 822	return max(thresh, dom->dirty_limit);
 823}
 824
 825/*
 826 * Memory which can be further allocated to a memcg domain is capped by
 827 * system-wide clean memory excluding the amount being used in the domain.
 828 */
 829static void mdtc_calc_avail(struct dirty_throttle_control *mdtc,
 830			    unsigned long filepages, unsigned long headroom)
 831{
 832	struct dirty_throttle_control *gdtc = mdtc_gdtc(mdtc);
 833	unsigned long clean = filepages - min(filepages, mdtc->dirty);
 834	unsigned long global_clean = gdtc->avail - min(gdtc->avail, gdtc->dirty);
 835	unsigned long other_clean = global_clean - min(global_clean, clean);
 836
 837	mdtc->avail = filepages + min(headroom, other_clean);
 838}
 839
 840/**
 841 * __wb_calc_thresh - @wb's share of dirty throttling threshold
 842 * @dtc: dirty_throttle_context of interest
 843 *
 
 
 
 844 * Note that balance_dirty_pages() will only seriously take it as a hard limit
 845 * when sleeping max_pause per page is not enough to keep the dirty pages under
 846 * control. For example, when the device is completely stalled due to some error
 847 * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
 848 * In the other normal situations, it acts more gently by throttling the tasks
 849 * more (rather than completely block them) when the wb dirty pages go high.
 850 *
 851 * It allocates high/low dirty limits to fast/slow devices, in order to prevent
 852 * - starving fast devices
 853 * - piling up dirty pages (that will take long time to sync) on slow devices
 854 *
 855 * The wb's share of dirty limit will be adapting to its throughput and
 856 * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
 857 *
 858 * Return: @wb's dirty limit in pages. The term "dirty" in the context of
 859 * dirty balancing includes all PG_dirty and PG_writeback pages.
 860 */
 861static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc)
 862{
 863	struct wb_domain *dom = dtc_dom(dtc);
 864	unsigned long thresh = dtc->thresh;
 865	u64 wb_thresh;
 866	unsigned long numerator, denominator;
 867	unsigned long wb_min_ratio, wb_max_ratio;
 868
 869	/*
 870	 * Calculate this BDI's share of the thresh ratio.
 871	 */
 872	fprop_fraction_percpu(&dom->completions, dtc->wb_completions,
 873			      &numerator, &denominator);
 874
 875	wb_thresh = (thresh * (100 * BDI_RATIO_SCALE - bdi_min_ratio)) / (100 * BDI_RATIO_SCALE);
 876	wb_thresh *= numerator;
 877	wb_thresh = div64_ul(wb_thresh, denominator);
 878
 879	wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio);
 880
 881	wb_thresh += (thresh * wb_min_ratio) / (100 * BDI_RATIO_SCALE);
 882	if (wb_thresh > (thresh * wb_max_ratio) / (100 * BDI_RATIO_SCALE))
 883		wb_thresh = thresh * wb_max_ratio / (100 * BDI_RATIO_SCALE);
 884
 885	return wb_thresh;
 886}
 887
 888unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh)
 889{
 890	struct dirty_throttle_control gdtc = { GDTC_INIT(wb),
 891					       .thresh = thresh };
 892	return __wb_calc_thresh(&gdtc);
 893}
 894
 895/*
 896 *                           setpoint - dirty 3
 897 *        f(dirty) := 1.0 + (----------------)
 898 *                           limit - setpoint
 899 *
 900 * it's a 3rd order polynomial that subjects to
 901 *
 902 * (1) f(freerun)  = 2.0 => rampup dirty_ratelimit reasonably fast
 903 * (2) f(setpoint) = 1.0 => the balance point
 904 * (3) f(limit)    = 0   => the hard limit
 905 * (4) df/dx      <= 0	 => negative feedback control
 906 * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
 907 *     => fast response on large errors; small oscillation near setpoint
 908 */
 909static long long pos_ratio_polynom(unsigned long setpoint,
 910					  unsigned long dirty,
 911					  unsigned long limit)
 912{
 913	long long pos_ratio;
 914	long x;
 915
 916	x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
 917		      (limit - setpoint) | 1);
 918	pos_ratio = x;
 919	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
 920	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
 921	pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
 922
 923	return clamp(pos_ratio, 0LL, 2LL << RATELIMIT_CALC_SHIFT);
 924}
 925
 926/*
 927 * Dirty position control.
 928 *
 929 * (o) global/bdi setpoints
 930 *
 931 * We want the dirty pages be balanced around the global/wb setpoints.
 932 * When the number of dirty pages is higher/lower than the setpoint, the
 933 * dirty position control ratio (and hence task dirty ratelimit) will be
 934 * decreased/increased to bring the dirty pages back to the setpoint.
 935 *
 936 *     pos_ratio = 1 << RATELIMIT_CALC_SHIFT
 937 *
 938 *     if (dirty < setpoint) scale up   pos_ratio
 939 *     if (dirty > setpoint) scale down pos_ratio
 940 *
 941 *     if (wb_dirty < wb_setpoint) scale up   pos_ratio
 942 *     if (wb_dirty > wb_setpoint) scale down pos_ratio
 943 *
 944 *     task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT
 945 *
 946 * (o) global control line
 947 *
 948 *     ^ pos_ratio
 949 *     |
 950 *     |            |<===== global dirty control scope ======>|
 951 * 2.0  * * * * * * *
 952 *     |            .*
 953 *     |            . *
 954 *     |            .   *
 955 *     |            .     *
 956 *     |            .        *
 957 *     |            .            *
 958 * 1.0 ................................*
 959 *     |            .                  .     *
 960 *     |            .                  .          *
 961 *     |            .                  .              *
 962 *     |            .                  .                 *
 963 *     |            .                  .                    *
 964 *   0 +------------.------------------.----------------------*------------->
 965 *           freerun^          setpoint^                 limit^   dirty pages
 966 *
 967 * (o) wb control line
 968 *
 969 *     ^ pos_ratio
 970 *     |
 971 *     |            *
 972 *     |              *
 973 *     |                *
 974 *     |                  *
 975 *     |                    * |<=========== span ============>|
 976 * 1.0 .......................*
 977 *     |                      . *
 978 *     |                      .   *
 979 *     |                      .     *
 980 *     |                      .       *
 981 *     |                      .         *
 982 *     |                      .           *
 983 *     |                      .             *
 984 *     |                      .               *
 985 *     |                      .                 *
 986 *     |                      .                   *
 987 *     |                      .                     *
 988 * 1/4 ...............................................* * * * * * * * * * * *
 989 *     |                      .                         .
 990 *     |                      .                           .
 991 *     |                      .                             .
 992 *   0 +----------------------.-------------------------------.------------->
 993 *                wb_setpoint^                    x_intercept^
 994 *
 995 * The wb control line won't drop below pos_ratio=1/4, so that wb_dirty can
 996 * be smoothly throttled down to normal if it starts high in situations like
 997 * - start writing to a slow SD card and a fast disk at the same time. The SD
 998 *   card's wb_dirty may rush to many times higher than wb_setpoint.
 999 * - the wb dirty thresh drops quickly due to change of JBOD workload
1000 */
1001static void wb_position_ratio(struct dirty_throttle_control *dtc)
1002{
1003	struct bdi_writeback *wb = dtc->wb;
1004	unsigned long write_bw = READ_ONCE(wb->avg_write_bandwidth);
1005	unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
1006	unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
1007	unsigned long wb_thresh = dtc->wb_thresh;
1008	unsigned long x_intercept;
1009	unsigned long setpoint;		/* dirty pages' target balance point */
1010	unsigned long wb_setpoint;
1011	unsigned long span;
1012	long long pos_ratio;		/* for scaling up/down the rate limit */
1013	long x;
1014
1015	dtc->pos_ratio = 0;
1016
1017	if (unlikely(dtc->dirty >= limit))
1018		return;
1019
1020	/*
1021	 * global setpoint
1022	 *
1023	 * See comment for pos_ratio_polynom().
1024	 */
1025	setpoint = (freerun + limit) / 2;
1026	pos_ratio = pos_ratio_polynom(setpoint, dtc->dirty, limit);
1027
1028	/*
1029	 * The strictlimit feature is a tool preventing mistrusted filesystems
1030	 * from growing a large number of dirty pages before throttling. For
1031	 * such filesystems balance_dirty_pages always checks wb counters
1032	 * against wb limits. Even if global "nr_dirty" is under "freerun".
1033	 * This is especially important for fuse which sets bdi->max_ratio to
1034	 * 1% by default. Without strictlimit feature, fuse writeback may
1035	 * consume arbitrary amount of RAM because it is accounted in
1036	 * NR_WRITEBACK_TEMP which is not involved in calculating "nr_dirty".
1037	 *
1038	 * Here, in wb_position_ratio(), we calculate pos_ratio based on
1039	 * two values: wb_dirty and wb_thresh. Let's consider an example:
1040	 * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global
1041	 * limits are set by default to 10% and 20% (background and throttle).
1042	 * Then wb_thresh is 1% of 20% of 16GB. This amounts to ~8K pages.
1043	 * wb_calc_thresh(wb, bg_thresh) is about ~4K pages. wb_setpoint is
1044	 * about ~6K pages (as the average of background and throttle wb
1045	 * limits). The 3rd order polynomial will provide positive feedback if
1046	 * wb_dirty is under wb_setpoint and vice versa.
1047	 *
1048	 * Note, that we cannot use global counters in these calculations
1049	 * because we want to throttle process writing to a strictlimit wb
1050	 * much earlier than global "freerun" is reached (~23MB vs. ~2.3GB
1051	 * in the example above).
1052	 */
1053	if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
1054		long long wb_pos_ratio;
1055
1056		if (dtc->wb_dirty < 8) {
1057			dtc->pos_ratio = min_t(long long, pos_ratio * 2,
1058					   2 << RATELIMIT_CALC_SHIFT);
1059			return;
1060		}
1061
1062		if (dtc->wb_dirty >= wb_thresh)
1063			return;
1064
1065		wb_setpoint = dirty_freerun_ceiling(wb_thresh,
1066						    dtc->wb_bg_thresh);
1067
1068		if (wb_setpoint == 0 || wb_setpoint == wb_thresh)
1069			return;
1070
1071		wb_pos_ratio = pos_ratio_polynom(wb_setpoint, dtc->wb_dirty,
1072						 wb_thresh);
1073
1074		/*
1075		 * Typically, for strictlimit case, wb_setpoint << setpoint
1076		 * and pos_ratio >> wb_pos_ratio. In the other words global
1077		 * state ("dirty") is not limiting factor and we have to
1078		 * make decision based on wb counters. But there is an
1079		 * important case when global pos_ratio should get precedence:
1080		 * global limits are exceeded (e.g. due to activities on other
1081		 * wb's) while given strictlimit wb is below limit.
1082		 *
1083		 * "pos_ratio * wb_pos_ratio" would work for the case above,
1084		 * but it would look too non-natural for the case of all
1085		 * activity in the system coming from a single strictlimit wb
1086		 * with bdi->max_ratio == 100%.
1087		 *
1088		 * Note that min() below somewhat changes the dynamics of the
1089		 * control system. Normally, pos_ratio value can be well over 3
1090		 * (when globally we are at freerun and wb is well below wb
1091		 * setpoint). Now the maximum pos_ratio in the same situation
1092		 * is 2. We might want to tweak this if we observe the control
1093		 * system is too slow to adapt.
1094		 */
1095		dtc->pos_ratio = min(pos_ratio, wb_pos_ratio);
1096		return;
1097	}
1098
1099	/*
1100	 * We have computed basic pos_ratio above based on global situation. If
1101	 * the wb is over/under its share of dirty pages, we want to scale
1102	 * pos_ratio further down/up. That is done by the following mechanism.
1103	 */
1104
1105	/*
1106	 * wb setpoint
1107	 *
1108	 *        f(wb_dirty) := 1.0 + k * (wb_dirty - wb_setpoint)
1109	 *
1110	 *                        x_intercept - wb_dirty
1111	 *                     := --------------------------
1112	 *                        x_intercept - wb_setpoint
1113	 *
1114	 * The main wb control line is a linear function that subjects to
1115	 *
1116	 * (1) f(wb_setpoint) = 1.0
1117	 * (2) k = - 1 / (8 * write_bw)  (in single wb case)
1118	 *     or equally: x_intercept = wb_setpoint + 8 * write_bw
1119	 *
1120	 * For single wb case, the dirty pages are observed to fluctuate
1121	 * regularly within range
1122	 *        [wb_setpoint - write_bw/2, wb_setpoint + write_bw/2]
1123	 * for various filesystems, where (2) can yield in a reasonable 12.5%
1124	 * fluctuation range for pos_ratio.
1125	 *
1126	 * For JBOD case, wb_thresh (not wb_dirty!) could fluctuate up to its
1127	 * own size, so move the slope over accordingly and choose a slope that
1128	 * yields 100% pos_ratio fluctuation on suddenly doubled wb_thresh.
1129	 */
1130	if (unlikely(wb_thresh > dtc->thresh))
1131		wb_thresh = dtc->thresh;
1132	/*
1133	 * It's very possible that wb_thresh is close to 0 not because the
1134	 * device is slow, but that it has remained inactive for long time.
1135	 * Honour such devices a reasonable good (hopefully IO efficient)
1136	 * threshold, so that the occasional writes won't be blocked and active
1137	 * writes can rampup the threshold quickly.
1138	 */
1139	wb_thresh = max(wb_thresh, (limit - dtc->dirty) / 8);
1140	/*
1141	 * scale global setpoint to wb's:
1142	 *	wb_setpoint = setpoint * wb_thresh / thresh
1143	 */
1144	x = div_u64((u64)wb_thresh << 16, dtc->thresh | 1);
1145	wb_setpoint = setpoint * (u64)x >> 16;
1146	/*
1147	 * Use span=(8*write_bw) in single wb case as indicated by
1148	 * (thresh - wb_thresh ~= 0) and transit to wb_thresh in JBOD case.
1149	 *
1150	 *        wb_thresh                    thresh - wb_thresh
1151	 * span = --------- * (8 * write_bw) + ------------------ * wb_thresh
1152	 *         thresh                           thresh
1153	 */
1154	span = (dtc->thresh - wb_thresh + 8 * write_bw) * (u64)x >> 16;
1155	x_intercept = wb_setpoint + span;
1156
1157	if (dtc->wb_dirty < x_intercept - span / 4) {
1158		pos_ratio = div64_u64(pos_ratio * (x_intercept - dtc->wb_dirty),
1159				      (x_intercept - wb_setpoint) | 1);
1160	} else
1161		pos_ratio /= 4;
1162
1163	/*
1164	 * wb reserve area, safeguard against dirty pool underrun and disk idle
1165	 * It may push the desired control point of global dirty pages higher
1166	 * than setpoint.
1167	 */
1168	x_intercept = wb_thresh / 2;
1169	if (dtc->wb_dirty < x_intercept) {
1170		if (dtc->wb_dirty > x_intercept / 8)
1171			pos_ratio = div_u64(pos_ratio * x_intercept,
1172					    dtc->wb_dirty);
1173		else
1174			pos_ratio *= 8;
1175	}
1176
1177	dtc->pos_ratio = pos_ratio;
1178}
1179
1180static void wb_update_write_bandwidth(struct bdi_writeback *wb,
1181				      unsigned long elapsed,
1182				      unsigned long written)
1183{
1184	const unsigned long period = roundup_pow_of_two(3 * HZ);
1185	unsigned long avg = wb->avg_write_bandwidth;
1186	unsigned long old = wb->write_bandwidth;
1187	u64 bw;
1188
1189	/*
1190	 * bw = written * HZ / elapsed
1191	 *
1192	 *                   bw * elapsed + write_bandwidth * (period - elapsed)
1193	 * write_bandwidth = ---------------------------------------------------
1194	 *                                          period
1195	 *
1196	 * @written may have decreased due to folio_redirty_for_writepage().
1197	 * Avoid underflowing @bw calculation.
1198	 */
1199	bw = written - min(written, wb->written_stamp);
1200	bw *= HZ;
1201	if (unlikely(elapsed > period)) {
1202		bw = div64_ul(bw, elapsed);
1203		avg = bw;
1204		goto out;
1205	}
1206	bw += (u64)wb->write_bandwidth * (period - elapsed);
1207	bw >>= ilog2(period);
1208
1209	/*
1210	 * one more level of smoothing, for filtering out sudden spikes
1211	 */
1212	if (avg > old && old >= (unsigned long)bw)
1213		avg -= (avg - old) >> 3;
1214
1215	if (avg < old && old <= (unsigned long)bw)
1216		avg += (old - avg) >> 3;
1217
1218out:
1219	/* keep avg > 0 to guarantee that tot > 0 if there are dirty wbs */
1220	avg = max(avg, 1LU);
1221	if (wb_has_dirty_io(wb)) {
1222		long delta = avg - wb->avg_write_bandwidth;
1223		WARN_ON_ONCE(atomic_long_add_return(delta,
1224					&wb->bdi->tot_write_bandwidth) <= 0);
1225	}
1226	wb->write_bandwidth = bw;
1227	WRITE_ONCE(wb->avg_write_bandwidth, avg);
1228}
1229
1230static void update_dirty_limit(struct dirty_throttle_control *dtc)
1231{
1232	struct wb_domain *dom = dtc_dom(dtc);
1233	unsigned long thresh = dtc->thresh;
1234	unsigned long limit = dom->dirty_limit;
1235
1236	/*
1237	 * Follow up in one step.
1238	 */
1239	if (limit < thresh) {
1240		limit = thresh;
1241		goto update;
1242	}
1243
1244	/*
1245	 * Follow down slowly. Use the higher one as the target, because thresh
1246	 * may drop below dirty. This is exactly the reason to introduce
1247	 * dom->dirty_limit which is guaranteed to lie above the dirty pages.
1248	 */
1249	thresh = max(thresh, dtc->dirty);
1250	if (limit > thresh) {
1251		limit -= (limit - thresh) >> 5;
1252		goto update;
1253	}
1254	return;
1255update:
1256	dom->dirty_limit = limit;
1257}
1258
1259static void domain_update_dirty_limit(struct dirty_throttle_control *dtc,
1260				      unsigned long now)
1261{
1262	struct wb_domain *dom = dtc_dom(dtc);
1263
1264	/*
1265	 * check locklessly first to optimize away locking for the most time
1266	 */
1267	if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL))
1268		return;
1269
1270	spin_lock(&dom->lock);
1271	if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) {
1272		update_dirty_limit(dtc);
1273		dom->dirty_limit_tstamp = now;
1274	}
1275	spin_unlock(&dom->lock);
1276}
1277
1278/*
1279 * Maintain wb->dirty_ratelimit, the base dirty throttle rate.
1280 *
1281 * Normal wb tasks will be curbed at or below it in long term.
1282 * Obviously it should be around (write_bw / N) when there are N dd tasks.
1283 */
1284static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc,
1285				      unsigned long dirtied,
1286				      unsigned long elapsed)
1287{
1288	struct bdi_writeback *wb = dtc->wb;
1289	unsigned long dirty = dtc->dirty;
1290	unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
1291	unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
1292	unsigned long setpoint = (freerun + limit) / 2;
1293	unsigned long write_bw = wb->avg_write_bandwidth;
1294	unsigned long dirty_ratelimit = wb->dirty_ratelimit;
1295	unsigned long dirty_rate;
1296	unsigned long task_ratelimit;
1297	unsigned long balanced_dirty_ratelimit;
1298	unsigned long step;
1299	unsigned long x;
1300	unsigned long shift;
1301
1302	/*
1303	 * The dirty rate will match the writeout rate in long term, except
1304	 * when dirty pages are truncated by userspace or re-dirtied by FS.
1305	 */
1306	dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed;
1307
1308	/*
1309	 * task_ratelimit reflects each dd's dirty rate for the past 200ms.
1310	 */
1311	task_ratelimit = (u64)dirty_ratelimit *
1312					dtc->pos_ratio >> RATELIMIT_CALC_SHIFT;
1313	task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */
1314
1315	/*
1316	 * A linear estimation of the "balanced" throttle rate. The theory is,
1317	 * if there are N dd tasks, each throttled at task_ratelimit, the wb's
1318	 * dirty_rate will be measured to be (N * task_ratelimit). So the below
1319	 * formula will yield the balanced rate limit (write_bw / N).
1320	 *
1321	 * Note that the expanded form is not a pure rate feedback:
1322	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate)		     (1)
1323	 * but also takes pos_ratio into account:
1324	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio  (2)
1325	 *
1326	 * (1) is not realistic because pos_ratio also takes part in balancing
1327	 * the dirty rate.  Consider the state
1328	 *	pos_ratio = 0.5						     (3)
1329	 *	rate = 2 * (write_bw / N)				     (4)
1330	 * If (1) is used, it will stuck in that state! Because each dd will
1331	 * be throttled at
1332	 *	task_ratelimit = pos_ratio * rate = (write_bw / N)	     (5)
1333	 * yielding
1334	 *	dirty_rate = N * task_ratelimit = write_bw		     (6)
1335	 * put (6) into (1) we get
1336	 *	rate_(i+1) = rate_(i)					     (7)
1337	 *
1338	 * So we end up using (2) to always keep
1339	 *	rate_(i+1) ~= (write_bw / N)				     (8)
1340	 * regardless of the value of pos_ratio. As long as (8) is satisfied,
1341	 * pos_ratio is able to drive itself to 1.0, which is not only where
1342	 * the dirty count meet the setpoint, but also where the slope of
1343	 * pos_ratio is most flat and hence task_ratelimit is least fluctuated.
1344	 */
1345	balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
1346					   dirty_rate | 1);
1347	/*
1348	 * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw
1349	 */
1350	if (unlikely(balanced_dirty_ratelimit > write_bw))
1351		balanced_dirty_ratelimit = write_bw;
1352
1353	/*
1354	 * We could safely do this and return immediately:
1355	 *
1356	 *	wb->dirty_ratelimit = balanced_dirty_ratelimit;
1357	 *
1358	 * However to get a more stable dirty_ratelimit, the below elaborated
1359	 * code makes use of task_ratelimit to filter out singular points and
1360	 * limit the step size.
1361	 *
1362	 * The below code essentially only uses the relative value of
1363	 *
1364	 *	task_ratelimit - dirty_ratelimit
1365	 *	= (pos_ratio - 1) * dirty_ratelimit
1366	 *
1367	 * which reflects the direction and size of dirty position error.
1368	 */
1369
1370	/*
1371	 * dirty_ratelimit will follow balanced_dirty_ratelimit iff
1372	 * task_ratelimit is on the same side of dirty_ratelimit, too.
1373	 * For example, when
1374	 * - dirty_ratelimit > balanced_dirty_ratelimit
1375	 * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint)
1376	 * lowering dirty_ratelimit will help meet both the position and rate
1377	 * control targets. Otherwise, don't update dirty_ratelimit if it will
1378	 * only help meet the rate target. After all, what the users ultimately
1379	 * feel and care are stable dirty rate and small position error.
1380	 *
1381	 * |task_ratelimit - dirty_ratelimit| is used to limit the step size
1382	 * and filter out the singular points of balanced_dirty_ratelimit. Which
1383	 * keeps jumping around randomly and can even leap far away at times
1384	 * due to the small 200ms estimation period of dirty_rate (we want to
1385	 * keep that period small to reduce time lags).
1386	 */
1387	step = 0;
1388
1389	/*
1390	 * For strictlimit case, calculations above were based on wb counters
1391	 * and limits (starting from pos_ratio = wb_position_ratio() and up to
1392	 * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate).
1393	 * Hence, to calculate "step" properly, we have to use wb_dirty as
1394	 * "dirty" and wb_setpoint as "setpoint".
1395	 *
1396	 * We rampup dirty_ratelimit forcibly if wb_dirty is low because
1397	 * it's possible that wb_thresh is close to zero due to inactivity
1398	 * of backing device.
1399	 */
1400	if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
1401		dirty = dtc->wb_dirty;
1402		if (dtc->wb_dirty < 8)
1403			setpoint = dtc->wb_dirty + 1;
1404		else
1405			setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2;
1406	}
1407
1408	if (dirty < setpoint) {
1409		x = min3(wb->balanced_dirty_ratelimit,
1410			 balanced_dirty_ratelimit, task_ratelimit);
1411		if (dirty_ratelimit < x)
1412			step = x - dirty_ratelimit;
1413	} else {
1414		x = max3(wb->balanced_dirty_ratelimit,
1415			 balanced_dirty_ratelimit, task_ratelimit);
1416		if (dirty_ratelimit > x)
1417			step = dirty_ratelimit - x;
1418	}
1419
1420	/*
1421	 * Don't pursue 100% rate matching. It's impossible since the balanced
1422	 * rate itself is constantly fluctuating. So decrease the track speed
1423	 * when it gets close to the target. Helps eliminate pointless tremors.
1424	 */
1425	shift = dirty_ratelimit / (2 * step + 1);
1426	if (shift < BITS_PER_LONG)
1427		step = DIV_ROUND_UP(step >> shift, 8);
1428	else
1429		step = 0;
1430
1431	if (dirty_ratelimit < balanced_dirty_ratelimit)
1432		dirty_ratelimit += step;
1433	else
1434		dirty_ratelimit -= step;
1435
1436	WRITE_ONCE(wb->dirty_ratelimit, max(dirty_ratelimit, 1UL));
1437	wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
1438
1439	trace_bdi_dirty_ratelimit(wb, dirty_rate, task_ratelimit);
1440}
1441
1442static void __wb_update_bandwidth(struct dirty_throttle_control *gdtc,
1443				  struct dirty_throttle_control *mdtc,
 
1444				  bool update_ratelimit)
1445{
1446	struct bdi_writeback *wb = gdtc->wb;
1447	unsigned long now = jiffies;
1448	unsigned long elapsed;
1449	unsigned long dirtied;
1450	unsigned long written;
1451
1452	spin_lock(&wb->list_lock);
1453
1454	/*
1455	 * Lockless checks for elapsed time are racy and delayed update after
1456	 * IO completion doesn't do it at all (to make sure written pages are
1457	 * accounted reasonably quickly). Make sure elapsed >= 1 to avoid
1458	 * division errors.
1459	 */
1460	elapsed = max(now - wb->bw_time_stamp, 1UL);
 
 
1461	dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]);
1462	written = percpu_counter_read(&wb->stat[WB_WRITTEN]);
1463
 
 
 
 
 
 
 
1464	if (update_ratelimit) {
1465		domain_update_dirty_limit(gdtc, now);
1466		wb_update_dirty_ratelimit(gdtc, dirtied, elapsed);
1467
1468		/*
1469		 * @mdtc is always NULL if !CGROUP_WRITEBACK but the
1470		 * compiler has no way to figure that out.  Help it.
1471		 */
1472		if (IS_ENABLED(CONFIG_CGROUP_WRITEBACK) && mdtc) {
1473			domain_update_dirty_limit(mdtc, now);
1474			wb_update_dirty_ratelimit(mdtc, dirtied, elapsed);
1475		}
1476	}
1477	wb_update_write_bandwidth(wb, elapsed, written);
1478
 
1479	wb->dirtied_stamp = dirtied;
1480	wb->written_stamp = written;
1481	WRITE_ONCE(wb->bw_time_stamp, now);
1482	spin_unlock(&wb->list_lock);
1483}
1484
1485void wb_update_bandwidth(struct bdi_writeback *wb)
1486{
1487	struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };
1488
1489	__wb_update_bandwidth(&gdtc, NULL, false);
1490}
1491
1492/* Interval after which we consider wb idle and don't estimate bandwidth */
1493#define WB_BANDWIDTH_IDLE_JIF (HZ)
1494
1495static void wb_bandwidth_estimate_start(struct bdi_writeback *wb)
1496{
1497	unsigned long now = jiffies;
1498	unsigned long elapsed = now - READ_ONCE(wb->bw_time_stamp);
1499
1500	if (elapsed > WB_BANDWIDTH_IDLE_JIF &&
1501	    !atomic_read(&wb->writeback_inodes)) {
1502		spin_lock(&wb->list_lock);
1503		wb->dirtied_stamp = wb_stat(wb, WB_DIRTIED);
1504		wb->written_stamp = wb_stat(wb, WB_WRITTEN);
1505		WRITE_ONCE(wb->bw_time_stamp, now);
1506		spin_unlock(&wb->list_lock);
1507	}
1508}
1509
1510/*
1511 * After a task dirtied this many pages, balance_dirty_pages_ratelimited()
1512 * will look to see if it needs to start dirty throttling.
1513 *
1514 * If dirty_poll_interval is too low, big NUMA machines will call the expensive
1515 * global_zone_page_state() too often. So scale it near-sqrt to the safety margin
1516 * (the number of pages we may dirty without exceeding the dirty limits).
1517 */
1518static unsigned long dirty_poll_interval(unsigned long dirty,
1519					 unsigned long thresh)
1520{
1521	if (thresh > dirty)
1522		return 1UL << (ilog2(thresh - dirty) >> 1);
1523
1524	return 1;
1525}
1526
1527static unsigned long wb_max_pause(struct bdi_writeback *wb,
1528				  unsigned long wb_dirty)
1529{
1530	unsigned long bw = READ_ONCE(wb->avg_write_bandwidth);
1531	unsigned long t;
1532
1533	/*
1534	 * Limit pause time for small memory systems. If sleeping for too long
1535	 * time, a small pool of dirty/writeback pages may go empty and disk go
1536	 * idle.
1537	 *
1538	 * 8 serves as the safety ratio.
1539	 */
1540	t = wb_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
1541	t++;
1542
1543	return min_t(unsigned long, t, MAX_PAUSE);
1544}
1545
1546static long wb_min_pause(struct bdi_writeback *wb,
1547			 long max_pause,
1548			 unsigned long task_ratelimit,
1549			 unsigned long dirty_ratelimit,
1550			 int *nr_dirtied_pause)
1551{
1552	long hi = ilog2(READ_ONCE(wb->avg_write_bandwidth));
1553	long lo = ilog2(READ_ONCE(wb->dirty_ratelimit));
1554	long t;		/* target pause */
1555	long pause;	/* estimated next pause */
1556	int pages;	/* target nr_dirtied_pause */
1557
1558	/* target for 10ms pause on 1-dd case */
1559	t = max(1, HZ / 100);
1560
1561	/*
1562	 * Scale up pause time for concurrent dirtiers in order to reduce CPU
1563	 * overheads.
1564	 *
1565	 * (N * 10ms) on 2^N concurrent tasks.
1566	 */
1567	if (hi > lo)
1568		t += (hi - lo) * (10 * HZ) / 1024;
1569
1570	/*
1571	 * This is a bit convoluted. We try to base the next nr_dirtied_pause
1572	 * on the much more stable dirty_ratelimit. However the next pause time
1573	 * will be computed based on task_ratelimit and the two rate limits may
1574	 * depart considerably at some time. Especially if task_ratelimit goes
1575	 * below dirty_ratelimit/2 and the target pause is max_pause, the next
1576	 * pause time will be max_pause*2 _trimmed down_ to max_pause.  As a
1577	 * result task_ratelimit won't be executed faithfully, which could
1578	 * eventually bring down dirty_ratelimit.
1579	 *
1580	 * We apply two rules to fix it up:
1581	 * 1) try to estimate the next pause time and if necessary, use a lower
1582	 *    nr_dirtied_pause so as not to exceed max_pause. When this happens,
1583	 *    nr_dirtied_pause will be "dancing" with task_ratelimit.
1584	 * 2) limit the target pause time to max_pause/2, so that the normal
1585	 *    small fluctuations of task_ratelimit won't trigger rule (1) and
1586	 *    nr_dirtied_pause will remain as stable as dirty_ratelimit.
1587	 */
1588	t = min(t, 1 + max_pause / 2);
1589	pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1590
1591	/*
1592	 * Tiny nr_dirtied_pause is found to hurt I/O performance in the test
1593	 * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}.
1594	 * When the 16 consecutive reads are often interrupted by some dirty
1595	 * throttling pause during the async writes, cfq will go into idles
1596	 * (deadline is fine). So push nr_dirtied_pause as high as possible
1597	 * until reaches DIRTY_POLL_THRESH=32 pages.
1598	 */
1599	if (pages < DIRTY_POLL_THRESH) {
1600		t = max_pause;
1601		pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1602		if (pages > DIRTY_POLL_THRESH) {
1603			pages = DIRTY_POLL_THRESH;
1604			t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit;
1605		}
1606	}
1607
1608	pause = HZ * pages / (task_ratelimit + 1);
1609	if (pause > max_pause) {
1610		t = max_pause;
1611		pages = task_ratelimit * t / roundup_pow_of_two(HZ);
1612	}
1613
1614	*nr_dirtied_pause = pages;
1615	/*
1616	 * The minimal pause time will normally be half the target pause time.
1617	 */
1618	return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
1619}
1620
1621static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
1622{
1623	struct bdi_writeback *wb = dtc->wb;
1624	unsigned long wb_reclaimable;
1625
1626	/*
1627	 * wb_thresh is not treated as some limiting factor as
1628	 * dirty_thresh, due to reasons
1629	 * - in JBOD setup, wb_thresh can fluctuate a lot
1630	 * - in a system with HDD and USB key, the USB key may somehow
1631	 *   go into state (wb_dirty >> wb_thresh) either because
1632	 *   wb_dirty starts high, or because wb_thresh drops low.
1633	 *   In this case we don't want to hard throttle the USB key
1634	 *   dirtiers for 100 seconds until wb_dirty drops under
1635	 *   wb_thresh. Instead the auxiliary wb control line in
1636	 *   wb_position_ratio() will let the dirtier task progress
1637	 *   at some rate <= (write_bw / 2) for bringing down wb_dirty.
1638	 */
1639	dtc->wb_thresh = __wb_calc_thresh(dtc);
1640	dtc->wb_bg_thresh = dtc->thresh ?
1641		div64_u64(dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
1642
1643	/*
1644	 * In order to avoid the stacked BDI deadlock we need
1645	 * to ensure we accurately count the 'dirty' pages when
1646	 * the threshold is low.
1647	 *
1648	 * Otherwise it would be possible to get thresh+n pages
1649	 * reported dirty, even though there are thresh-m pages
1650	 * actually dirty; with m+n sitting in the percpu
1651	 * deltas.
1652	 */
1653	if (dtc->wb_thresh < 2 * wb_stat_error()) {
1654		wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
1655		dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK);
1656	} else {
1657		wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE);
1658		dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK);
1659	}
1660}
1661
1662/*
1663 * balance_dirty_pages() must be called by processes which are generating dirty
1664 * data.  It looks at the number of dirty pages in the machine and will force
1665 * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2.
1666 * If we're over `background_thresh' then the writeback threads are woken to
1667 * perform some writeout.
1668 */
1669static int balance_dirty_pages(struct bdi_writeback *wb,
1670			       unsigned long pages_dirtied, unsigned int flags)
1671{
1672	struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
1673	struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
1674	struct dirty_throttle_control * const gdtc = &gdtc_stor;
1675	struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
1676						     &mdtc_stor : NULL;
1677	struct dirty_throttle_control *sdtc;
1678	unsigned long nr_reclaimable;	/* = file_dirty */
1679	long period;
1680	long pause;
1681	long max_pause;
1682	long min_pause;
1683	int nr_dirtied_pause;
1684	bool dirty_exceeded = false;
1685	unsigned long task_ratelimit;
1686	unsigned long dirty_ratelimit;
1687	struct backing_dev_info *bdi = wb->bdi;
1688	bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT;
1689	unsigned long start_time = jiffies;
1690	int ret = 0;
1691
1692	for (;;) {
1693		unsigned long now = jiffies;
1694		unsigned long dirty, thresh, bg_thresh;
1695		unsigned long m_dirty = 0;	/* stop bogus uninit warnings */
1696		unsigned long m_thresh = 0;
1697		unsigned long m_bg_thresh = 0;
1698
1699		nr_reclaimable = global_node_page_state(NR_FILE_DIRTY);
 
 
 
 
 
 
 
1700		gdtc->avail = global_dirtyable_memory();
1701		gdtc->dirty = nr_reclaimable + global_node_page_state(NR_WRITEBACK);
1702
1703		domain_dirty_limits(gdtc);
1704
1705		if (unlikely(strictlimit)) {
1706			wb_dirty_limits(gdtc);
1707
1708			dirty = gdtc->wb_dirty;
1709			thresh = gdtc->wb_thresh;
1710			bg_thresh = gdtc->wb_bg_thresh;
1711		} else {
1712			dirty = gdtc->dirty;
1713			thresh = gdtc->thresh;
1714			bg_thresh = gdtc->bg_thresh;
1715		}
1716
1717		if (mdtc) {
1718			unsigned long filepages, headroom, writeback;
1719
1720			/*
1721			 * If @wb belongs to !root memcg, repeat the same
1722			 * basic calculations for the memcg domain.
1723			 */
1724			mem_cgroup_wb_stats(wb, &filepages, &headroom,
1725					    &mdtc->dirty, &writeback);
1726			mdtc->dirty += writeback;
1727			mdtc_calc_avail(mdtc, filepages, headroom);
1728
1729			domain_dirty_limits(mdtc);
1730
1731			if (unlikely(strictlimit)) {
1732				wb_dirty_limits(mdtc);
1733				m_dirty = mdtc->wb_dirty;
1734				m_thresh = mdtc->wb_thresh;
1735				m_bg_thresh = mdtc->wb_bg_thresh;
1736			} else {
1737				m_dirty = mdtc->dirty;
1738				m_thresh = mdtc->thresh;
1739				m_bg_thresh = mdtc->bg_thresh;
1740			}
1741		}
1742
1743		/*
1744		 * In laptop mode, we wait until hitting the higher threshold
1745		 * before starting background writeout, and then write out all
1746		 * the way down to the lower threshold.  So slow writers cause
1747		 * minimal disk activity.
1748		 *
1749		 * In normal mode, we start background writeout at the lower
1750		 * background_thresh, to keep the amount of dirty memory low.
1751		 */
1752		if (!laptop_mode && nr_reclaimable > gdtc->bg_thresh &&
1753		    !writeback_in_progress(wb))
1754			wb_start_background_writeback(wb);
1755
1756		/*
1757		 * Throttle it only when the background writeback cannot
1758		 * catch-up. This avoids (excessively) small writeouts
1759		 * when the wb limits are ramping up in case of !strictlimit.
1760		 *
1761		 * In strictlimit case make decision based on the wb counters
1762		 * and limits. Small writeouts when the wb limits are ramping
1763		 * up are the price we consciously pay for strictlimit-ing.
1764		 *
1765		 * If memcg domain is in effect, @dirty should be under
1766		 * both global and memcg freerun ceilings.
1767		 */
1768		if (dirty <= dirty_freerun_ceiling(thresh, bg_thresh) &&
1769		    (!mdtc ||
1770		     m_dirty <= dirty_freerun_ceiling(m_thresh, m_bg_thresh))) {
1771			unsigned long intv;
1772			unsigned long m_intv;
1773
1774free_running:
1775			intv = dirty_poll_interval(dirty, thresh);
1776			m_intv = ULONG_MAX;
1777
1778			current->dirty_paused_when = now;
1779			current->nr_dirtied = 0;
1780			if (mdtc)
1781				m_intv = dirty_poll_interval(m_dirty, m_thresh);
1782			current->nr_dirtied_pause = min(intv, m_intv);
1783			break;
1784		}
1785
1786		/* Start writeback even when in laptop mode */
1787		if (unlikely(!writeback_in_progress(wb)))
1788			wb_start_background_writeback(wb);
1789
1790		mem_cgroup_flush_foreign(wb);
1791
1792		/*
1793		 * Calculate global domain's pos_ratio and select the
1794		 * global dtc by default.
1795		 */
1796		if (!strictlimit) {
1797			wb_dirty_limits(gdtc);
1798
1799			if ((current->flags & PF_LOCAL_THROTTLE) &&
1800			    gdtc->wb_dirty <
1801			    dirty_freerun_ceiling(gdtc->wb_thresh,
1802						  gdtc->wb_bg_thresh))
1803				/*
1804				 * LOCAL_THROTTLE tasks must not be throttled
1805				 * when below the per-wb freerun ceiling.
1806				 */
1807				goto free_running;
1808		}
1809
1810		dirty_exceeded = (gdtc->wb_dirty > gdtc->wb_thresh) &&
1811			((gdtc->dirty > gdtc->thresh) || strictlimit);
1812
1813		wb_position_ratio(gdtc);
1814		sdtc = gdtc;
1815
1816		if (mdtc) {
1817			/*
1818			 * If memcg domain is in effect, calculate its
1819			 * pos_ratio.  @wb should satisfy constraints from
1820			 * both global and memcg domains.  Choose the one
1821			 * w/ lower pos_ratio.
1822			 */
1823			if (!strictlimit) {
1824				wb_dirty_limits(mdtc);
1825
1826				if ((current->flags & PF_LOCAL_THROTTLE) &&
1827				    mdtc->wb_dirty <
1828				    dirty_freerun_ceiling(mdtc->wb_thresh,
1829							  mdtc->wb_bg_thresh))
1830					/*
1831					 * LOCAL_THROTTLE tasks must not be
1832					 * throttled when below the per-wb
1833					 * freerun ceiling.
1834					 */
1835					goto free_running;
1836			}
1837			dirty_exceeded |= (mdtc->wb_dirty > mdtc->wb_thresh) &&
1838				((mdtc->dirty > mdtc->thresh) || strictlimit);
1839
1840			wb_position_ratio(mdtc);
1841			if (mdtc->pos_ratio < gdtc->pos_ratio)
1842				sdtc = mdtc;
1843		}
1844
1845		if (dirty_exceeded != wb->dirty_exceeded)
1846			wb->dirty_exceeded = dirty_exceeded;
1847
1848		if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
1849					   BANDWIDTH_INTERVAL))
1850			__wb_update_bandwidth(gdtc, mdtc, true);
 
 
 
1851
1852		/* throttle according to the chosen dtc */
1853		dirty_ratelimit = READ_ONCE(wb->dirty_ratelimit);
1854		task_ratelimit = ((u64)dirty_ratelimit * sdtc->pos_ratio) >>
1855							RATELIMIT_CALC_SHIFT;
1856		max_pause = wb_max_pause(wb, sdtc->wb_dirty);
1857		min_pause = wb_min_pause(wb, max_pause,
1858					 task_ratelimit, dirty_ratelimit,
1859					 &nr_dirtied_pause);
1860
1861		if (unlikely(task_ratelimit == 0)) {
1862			period = max_pause;
1863			pause = max_pause;
1864			goto pause;
1865		}
1866		period = HZ * pages_dirtied / task_ratelimit;
1867		pause = period;
1868		if (current->dirty_paused_when)
1869			pause -= now - current->dirty_paused_when;
1870		/*
1871		 * For less than 1s think time (ext3/4 may block the dirtier
1872		 * for up to 800ms from time to time on 1-HDD; so does xfs,
1873		 * however at much less frequency), try to compensate it in
1874		 * future periods by updating the virtual time; otherwise just
1875		 * do a reset, as it may be a light dirtier.
1876		 */
1877		if (pause < min_pause) {
1878			trace_balance_dirty_pages(wb,
1879						  sdtc->thresh,
1880						  sdtc->bg_thresh,
1881						  sdtc->dirty,
1882						  sdtc->wb_thresh,
1883						  sdtc->wb_dirty,
1884						  dirty_ratelimit,
1885						  task_ratelimit,
1886						  pages_dirtied,
1887						  period,
1888						  min(pause, 0L),
1889						  start_time);
1890			if (pause < -HZ) {
1891				current->dirty_paused_when = now;
1892				current->nr_dirtied = 0;
1893			} else if (period) {
1894				current->dirty_paused_when += period;
1895				current->nr_dirtied = 0;
1896			} else if (current->nr_dirtied_pause <= pages_dirtied)
1897				current->nr_dirtied_pause += pages_dirtied;
1898			break;
1899		}
1900		if (unlikely(pause > max_pause)) {
1901			/* for occasional dropped task_ratelimit */
1902			now += min(pause - max_pause, max_pause);
1903			pause = max_pause;
1904		}
1905
1906pause:
1907		trace_balance_dirty_pages(wb,
1908					  sdtc->thresh,
1909					  sdtc->bg_thresh,
1910					  sdtc->dirty,
1911					  sdtc->wb_thresh,
1912					  sdtc->wb_dirty,
1913					  dirty_ratelimit,
1914					  task_ratelimit,
1915					  pages_dirtied,
1916					  period,
1917					  pause,
1918					  start_time);
1919		if (flags & BDP_ASYNC) {
1920			ret = -EAGAIN;
1921			break;
1922		}
1923		__set_current_state(TASK_KILLABLE);
1924		bdi->last_bdp_sleep = jiffies;
1925		io_schedule_timeout(pause);
1926
1927		current->dirty_paused_when = now + pause;
1928		current->nr_dirtied = 0;
1929		current->nr_dirtied_pause = nr_dirtied_pause;
1930
1931		/*
1932		 * This is typically equal to (dirty < thresh) and can also
1933		 * keep "1000+ dd on a slow USB stick" under control.
1934		 */
1935		if (task_ratelimit)
1936			break;
1937
1938		/*
1939		 * In the case of an unresponsive NFS server and the NFS dirty
1940		 * pages exceeds dirty_thresh, give the other good wb's a pipe
1941		 * to go through, so that tasks on them still remain responsive.
1942		 *
1943		 * In theory 1 page is enough to keep the consumer-producer
1944		 * pipe going: the flusher cleans 1 page => the task dirties 1
1945		 * more page. However wb_dirty has accounting errors.  So use
1946		 * the larger and more IO friendly wb_stat_error.
1947		 */
1948		if (sdtc->wb_dirty <= wb_stat_error())
1949			break;
1950
1951		if (fatal_signal_pending(current))
1952			break;
1953	}
1954	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1955}
1956
1957static DEFINE_PER_CPU(int, bdp_ratelimits);
1958
1959/*
1960 * Normal tasks are throttled by
1961 *	loop {
1962 *		dirty tsk->nr_dirtied_pause pages;
1963 *		take a snap in balance_dirty_pages();
1964 *	}
1965 * However there is a worst case. If every task exit immediately when dirtied
1966 * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
1967 * called to throttle the page dirties. The solution is to save the not yet
1968 * throttled page dirties in dirty_throttle_leaks on task exit and charge them
1969 * randomly into the running tasks. This works well for the above worst case,
1970 * as the new task will pick up and accumulate the old task's leaked dirty
1971 * count and eventually get throttled.
1972 */
1973DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
1974
1975/**
1976 * balance_dirty_pages_ratelimited_flags - Balance dirty memory state.
1977 * @mapping: address_space which was dirtied.
1978 * @flags: BDP flags.
1979 *
1980 * Processes which are dirtying memory should call in here once for each page
1981 * which was newly dirtied.  The function will periodically check the system's
1982 * dirty state and will initiate writeback if needed.
1983 *
1984 * See balance_dirty_pages_ratelimited() for details.
1985 *
1986 * Return: If @flags contains BDP_ASYNC, it may return -EAGAIN to
1987 * indicate that memory is out of balance and the caller must wait
1988 * for I/O to complete.  Otherwise, it will return 0 to indicate
1989 * that either memory was already in balance, or it was able to sleep
1990 * until the amount of dirty memory returned to balance.
1991 */
1992int balance_dirty_pages_ratelimited_flags(struct address_space *mapping,
1993					unsigned int flags)
1994{
1995	struct inode *inode = mapping->host;
1996	struct backing_dev_info *bdi = inode_to_bdi(inode);
1997	struct bdi_writeback *wb = NULL;
1998	int ratelimit;
1999	int ret = 0;
2000	int *p;
2001
2002	if (!(bdi->capabilities & BDI_CAP_WRITEBACK))
2003		return ret;
2004
2005	if (inode_cgwb_enabled(inode))
2006		wb = wb_get_create_current(bdi, GFP_KERNEL);
2007	if (!wb)
2008		wb = &bdi->wb;
2009
2010	ratelimit = current->nr_dirtied_pause;
2011	if (wb->dirty_exceeded)
2012		ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));
2013
2014	preempt_disable();
2015	/*
2016	 * This prevents one CPU to accumulate too many dirtied pages without
2017	 * calling into balance_dirty_pages(), which can happen when there are
2018	 * 1000+ tasks, all of them start dirtying pages at exactly the same
2019	 * time, hence all honoured too large initial task->nr_dirtied_pause.
2020	 */
2021	p =  this_cpu_ptr(&bdp_ratelimits);
2022	if (unlikely(current->nr_dirtied >= ratelimit))
2023		*p = 0;
2024	else if (unlikely(*p >= ratelimit_pages)) {
2025		*p = 0;
2026		ratelimit = 0;
2027	}
2028	/*
2029	 * Pick up the dirtied pages by the exited tasks. This avoids lots of
2030	 * short-lived tasks (eg. gcc invocations in a kernel build) escaping
2031	 * the dirty throttling and livelock other long-run dirtiers.
2032	 */
2033	p = this_cpu_ptr(&dirty_throttle_leaks);
2034	if (*p > 0 && current->nr_dirtied < ratelimit) {
2035		unsigned long nr_pages_dirtied;
2036		nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
2037		*p -= nr_pages_dirtied;
2038		current->nr_dirtied += nr_pages_dirtied;
2039	}
2040	preempt_enable();
2041
2042	if (unlikely(current->nr_dirtied >= ratelimit))
2043		ret = balance_dirty_pages(wb, current->nr_dirtied, flags);
2044
2045	wb_put(wb);
2046	return ret;
2047}
2048EXPORT_SYMBOL_GPL(balance_dirty_pages_ratelimited_flags);
2049
2050/**
2051 * balance_dirty_pages_ratelimited - balance dirty memory state.
2052 * @mapping: address_space which was dirtied.
2053 *
2054 * Processes which are dirtying memory should call in here once for each page
2055 * which was newly dirtied.  The function will periodically check the system's
2056 * dirty state and will initiate writeback if needed.
2057 *
2058 * Once we're over the dirty memory limit we decrease the ratelimiting
2059 * by a lot, to prevent individual processes from overshooting the limit
2060 * by (ratelimit_pages) each.
2061 */
2062void balance_dirty_pages_ratelimited(struct address_space *mapping)
2063{
2064	balance_dirty_pages_ratelimited_flags(mapping, 0);
2065}
2066EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
2067
2068/**
2069 * wb_over_bg_thresh - does @wb need to be written back?
2070 * @wb: bdi_writeback of interest
2071 *
2072 * Determines whether background writeback should keep writing @wb or it's
2073 * clean enough.
2074 *
2075 * Return: %true if writeback should continue.
2076 */
2077bool wb_over_bg_thresh(struct bdi_writeback *wb)
2078{
2079	struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
2080	struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
2081	struct dirty_throttle_control * const gdtc = &gdtc_stor;
2082	struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
2083						     &mdtc_stor : NULL;
2084	unsigned long reclaimable;
2085	unsigned long thresh;
2086
2087	/*
2088	 * Similar to balance_dirty_pages() but ignores pages being written
2089	 * as we're trying to decide whether to put more under writeback.
2090	 */
2091	gdtc->avail = global_dirtyable_memory();
2092	gdtc->dirty = global_node_page_state(NR_FILE_DIRTY);
 
2093	domain_dirty_limits(gdtc);
2094
2095	if (gdtc->dirty > gdtc->bg_thresh)
2096		return true;
2097
2098	thresh = wb_calc_thresh(gdtc->wb, gdtc->bg_thresh);
2099	if (thresh < 2 * wb_stat_error())
2100		reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
2101	else
2102		reclaimable = wb_stat(wb, WB_RECLAIMABLE);
2103
2104	if (reclaimable > thresh)
2105		return true;
2106
2107	if (mdtc) {
2108		unsigned long filepages, headroom, writeback;
2109
2110		mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty,
2111				    &writeback);
2112		mdtc_calc_avail(mdtc, filepages, headroom);
2113		domain_dirty_limits(mdtc);	/* ditto, ignore writeback */
2114
2115		if (mdtc->dirty > mdtc->bg_thresh)
2116			return true;
2117
2118		thresh = wb_calc_thresh(mdtc->wb, mdtc->bg_thresh);
2119		if (thresh < 2 * wb_stat_error())
2120			reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
2121		else
2122			reclaimable = wb_stat(wb, WB_RECLAIMABLE);
2123
2124		if (reclaimable > thresh)
2125			return true;
2126	}
2127
2128	return false;
2129}
2130
2131#ifdef CONFIG_SYSCTL
2132/*
2133 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
2134 */
2135static int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
2136		void *buffer, size_t *length, loff_t *ppos)
2137{
2138	unsigned int old_interval = dirty_writeback_interval;
2139	int ret;
2140
2141	ret = proc_dointvec(table, write, buffer, length, ppos);
2142
2143	/*
2144	 * Writing 0 to dirty_writeback_interval will disable periodic writeback
2145	 * and a different non-zero value will wakeup the writeback threads.
2146	 * wb_wakeup_delayed() would be more appropriate, but it's a pain to
2147	 * iterate over all bdis and wbs.
2148	 * The reason we do this is to make the change take effect immediately.
2149	 */
2150	if (!ret && write && dirty_writeback_interval &&
2151		dirty_writeback_interval != old_interval)
2152		wakeup_flusher_threads(WB_REASON_PERIODIC);
2153
2154	return ret;
2155}
2156#endif
2157
 
2158void laptop_mode_timer_fn(struct timer_list *t)
2159{
2160	struct backing_dev_info *backing_dev_info =
2161		from_timer(backing_dev_info, t, laptop_mode_wb_timer);
2162
2163	wakeup_flusher_threads_bdi(backing_dev_info, WB_REASON_LAPTOP_TIMER);
2164}
2165
2166/*
2167 * We've spun up the disk and we're in laptop mode: schedule writeback
2168 * of all dirty data a few seconds from now.  If the flush is already scheduled
2169 * then push it back - the user is still using the disk.
2170 */
2171void laptop_io_completion(struct backing_dev_info *info)
2172{
2173	mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
2174}
2175
2176/*
2177 * We're in laptop mode and we've just synced. The sync's writes will have
2178 * caused another writeback to be scheduled by laptop_io_completion.
2179 * Nothing needs to be written back anymore, so we unschedule the writeback.
2180 */
2181void laptop_sync_completion(void)
2182{
2183	struct backing_dev_info *bdi;
2184
2185	rcu_read_lock();
2186
2187	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
2188		del_timer(&bdi->laptop_mode_wb_timer);
2189
2190	rcu_read_unlock();
2191}
 
2192
2193/*
2194 * If ratelimit_pages is too high then we can get into dirty-data overload
2195 * if a large number of processes all perform writes at the same time.
 
 
2196 *
2197 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
2198 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
2199 * thresholds.
2200 */
2201
2202void writeback_set_ratelimit(void)
2203{
2204	struct wb_domain *dom = &global_wb_domain;
2205	unsigned long background_thresh;
2206	unsigned long dirty_thresh;
2207
2208	global_dirty_limits(&background_thresh, &dirty_thresh);
2209	dom->dirty_limit = dirty_thresh;
2210	ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
2211	if (ratelimit_pages < 16)
2212		ratelimit_pages = 16;
2213}
2214
2215static int page_writeback_cpu_online(unsigned int cpu)
2216{
2217	writeback_set_ratelimit();
2218	return 0;
2219}
2220
2221#ifdef CONFIG_SYSCTL
2222
2223/* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
2224static const unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
2225
2226static struct ctl_table vm_page_writeback_sysctls[] = {
2227	{
2228		.procname   = "dirty_background_ratio",
2229		.data       = &dirty_background_ratio,
2230		.maxlen     = sizeof(dirty_background_ratio),
2231		.mode       = 0644,
2232		.proc_handler   = dirty_background_ratio_handler,
2233		.extra1     = SYSCTL_ZERO,
2234		.extra2     = SYSCTL_ONE_HUNDRED,
2235	},
2236	{
2237		.procname   = "dirty_background_bytes",
2238		.data       = &dirty_background_bytes,
2239		.maxlen     = sizeof(dirty_background_bytes),
2240		.mode       = 0644,
2241		.proc_handler   = dirty_background_bytes_handler,
2242		.extra1     = SYSCTL_LONG_ONE,
2243	},
2244	{
2245		.procname   = "dirty_ratio",
2246		.data       = &vm_dirty_ratio,
2247		.maxlen     = sizeof(vm_dirty_ratio),
2248		.mode       = 0644,
2249		.proc_handler   = dirty_ratio_handler,
2250		.extra1     = SYSCTL_ZERO,
2251		.extra2     = SYSCTL_ONE_HUNDRED,
2252	},
2253	{
2254		.procname   = "dirty_bytes",
2255		.data       = &vm_dirty_bytes,
2256		.maxlen     = sizeof(vm_dirty_bytes),
2257		.mode       = 0644,
2258		.proc_handler   = dirty_bytes_handler,
2259		.extra1     = (void *)&dirty_bytes_min,
2260	},
2261	{
2262		.procname   = "dirty_writeback_centisecs",
2263		.data       = &dirty_writeback_interval,
2264		.maxlen     = sizeof(dirty_writeback_interval),
2265		.mode       = 0644,
2266		.proc_handler   = dirty_writeback_centisecs_handler,
2267	},
2268	{
2269		.procname   = "dirty_expire_centisecs",
2270		.data       = &dirty_expire_interval,
2271		.maxlen     = sizeof(dirty_expire_interval),
2272		.mode       = 0644,
2273		.proc_handler   = proc_dointvec_minmax,
2274		.extra1     = SYSCTL_ZERO,
2275	},
2276#ifdef CONFIG_HIGHMEM
2277	{
2278		.procname	= "highmem_is_dirtyable",
2279		.data		= &vm_highmem_is_dirtyable,
2280		.maxlen		= sizeof(vm_highmem_is_dirtyable),
2281		.mode		= 0644,
2282		.proc_handler	= proc_dointvec_minmax,
2283		.extra1		= SYSCTL_ZERO,
2284		.extra2		= SYSCTL_ONE,
2285	},
2286#endif
2287	{
2288		.procname	= "laptop_mode",
2289		.data		= &laptop_mode,
2290		.maxlen		= sizeof(laptop_mode),
2291		.mode		= 0644,
2292		.proc_handler	= proc_dointvec_jiffies,
2293	},
2294	{}
2295};
2296#endif
2297
2298/*
2299 * Called early on to tune the page writeback dirty limits.
2300 *
2301 * We used to scale dirty pages according to how total memory
2302 * related to pages that could be allocated for buffers.
 
2303 *
2304 * However, that was when we used "dirty_ratio" to scale with
2305 * all memory, and we don't do that any more. "dirty_ratio"
2306 * is now applied to total non-HIGHPAGE memory, and as such we can't
 
2307 * get into the old insane situation any more where we had
2308 * large amounts of dirty pages compared to a small amount of
2309 * non-HIGHMEM memory.
2310 *
2311 * But we might still want to scale the dirty_ratio by how
2312 * much memory the box has..
2313 */
2314void __init page_writeback_init(void)
2315{
2316	BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
2317
2318	cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/writeback:online",
2319			  page_writeback_cpu_online, NULL);
2320	cpuhp_setup_state(CPUHP_MM_WRITEBACK_DEAD, "mm/writeback:dead", NULL,
2321			  page_writeback_cpu_online);
2322#ifdef CONFIG_SYSCTL
2323	register_sysctl_init("vm", vm_page_writeback_sysctls);
2324#endif
2325}
2326
2327/**
2328 * tag_pages_for_writeback - tag pages to be written by writeback
2329 * @mapping: address space structure to write
2330 * @start: starting page index
2331 * @end: ending page index (inclusive)
2332 *
2333 * This function scans the page range from @start to @end (inclusive) and tags
2334 * all pages that have DIRTY tag set with a special TOWRITE tag.  The caller
2335 * can then use the TOWRITE tag to identify pages eligible for writeback.
2336 * This mechanism is used to avoid livelocking of writeback by a process
2337 * steadily creating new dirty pages in the file (thus it is important for this
2338 * function to be quick so that it can tag pages faster than a dirtying process
2339 * can create them).
 
 
 
 
2340 */
2341void tag_pages_for_writeback(struct address_space *mapping,
2342			     pgoff_t start, pgoff_t end)
2343{
2344	XA_STATE(xas, &mapping->i_pages, start);
2345	unsigned int tagged = 0;
2346	void *page;
2347
2348	xas_lock_irq(&xas);
2349	xas_for_each_marked(&xas, page, end, PAGECACHE_TAG_DIRTY) {
2350		xas_set_mark(&xas, PAGECACHE_TAG_TOWRITE);
2351		if (++tagged % XA_CHECK_SCHED)
 
 
 
 
 
 
2352			continue;
2353
2354		xas_pause(&xas);
2355		xas_unlock_irq(&xas);
2356		cond_resched();
2357		xas_lock_irq(&xas);
2358	}
2359	xas_unlock_irq(&xas);
2360}
2361EXPORT_SYMBOL(tag_pages_for_writeback);
2362
2363static bool folio_prepare_writeback(struct address_space *mapping,
2364		struct writeback_control *wbc, struct folio *folio)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2365{
2366	/*
2367	 * Folio truncated or invalidated. We can freely skip it then,
2368	 * even for data integrity operations: the folio has disappeared
2369	 * concurrently, so there could be no real expectation of this
2370	 * data integrity operation even if there is now a new, dirty
2371	 * folio at the same pagecache index.
2372	 */
2373	if (unlikely(folio->mapping != mapping))
2374		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2375
2376	/*
2377	 * Did somebody else write it for us?
2378	 */
2379	if (!folio_test_dirty(folio))
2380		return false;
2381
2382	if (folio_test_writeback(folio)) {
2383		if (wbc->sync_mode == WB_SYNC_NONE)
2384			return false;
2385		folio_wait_writeback(folio);
2386	}
2387	BUG_ON(folio_test_writeback(folio));
2388
2389	if (!folio_clear_dirty_for_io(folio))
2390		return false;
2391
2392	return true;
2393}
 
 
 
 
 
 
 
 
 
 
 
2394
2395static xa_mark_t wbc_to_tag(struct writeback_control *wbc)
2396{
2397	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2398		return PAGECACHE_TAG_TOWRITE;
2399	return PAGECACHE_TAG_DIRTY;
2400}
2401
2402static pgoff_t wbc_end(struct writeback_control *wbc)
2403{
2404	if (wbc->range_cyclic)
2405		return -1;
2406	return wbc->range_end >> PAGE_SHIFT;
2407}
2408
2409static struct folio *writeback_get_folio(struct address_space *mapping,
2410		struct writeback_control *wbc)
2411{
2412	struct folio *folio;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2413
2414retry:
2415	folio = folio_batch_next(&wbc->fbatch);
2416	if (!folio) {
2417		folio_batch_release(&wbc->fbatch);
 
 
 
 
 
 
 
 
 
2418		cond_resched();
2419		filemap_get_folios_tag(mapping, &wbc->index, wbc_end(wbc),
2420				wbc_to_tag(wbc), &wbc->fbatch);
2421		folio = folio_batch_next(&wbc->fbatch);
2422		if (!folio)
2423			return NULL;
2424	}
2425
2426	folio_lock(folio);
2427	if (unlikely(!folio_prepare_writeback(mapping, wbc, folio))) {
2428		folio_unlock(folio);
 
 
 
 
 
2429		goto retry;
2430	}
 
 
2431
2432	trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
2433	return folio;
2434}
 
2435
2436/**
2437 * writeback_iter - iterate folio of a mapping for writeback
2438 * @mapping: address space structure to write
2439 * @wbc: writeback context
2440 * @folio: previously iterated folio (%NULL to start)
2441 * @error: in-out pointer for writeback errors (see below)
2442 *
2443 * This function returns the next folio for the writeback operation described by
2444 * @wbc on @mapping and  should be called in a while loop in the ->writepages
2445 * implementation.
2446 *
2447 * To start the writeback operation, %NULL is passed in the @folio argument, and
2448 * for every subsequent iteration the folio returned previously should be passed
2449 * back in.
2450 *
2451 * If there was an error in the per-folio writeback inside the writeback_iter()
2452 * loop, @error should be set to the error value.
2453 *
2454 * Once the writeback described in @wbc has finished, this function will return
2455 * %NULL and if there was an error in any iteration restore it to @error.
2456 *
2457 * Note: callers should not manually break out of the loop using break or goto
2458 * but must keep calling writeback_iter() until it returns %NULL.
2459 *
2460 * Return: the folio to write or %NULL if the loop is done.
2461 */
2462struct folio *writeback_iter(struct address_space *mapping,
2463		struct writeback_control *wbc, struct folio *folio, int *error)
2464{
2465	if (!folio) {
2466		folio_batch_init(&wbc->fbatch);
2467		wbc->saved_err = *error = 0;
2468
2469		/*
2470		 * For range cyclic writeback we remember where we stopped so
2471		 * that we can continue where we stopped.
2472		 *
2473		 * For non-cyclic writeback we always start at the beginning of
2474		 * the passed in range.
2475		 */
2476		if (wbc->range_cyclic)
2477			wbc->index = mapping->writeback_index;
2478		else
2479			wbc->index = wbc->range_start >> PAGE_SHIFT;
2480
2481		/*
2482		 * To avoid livelocks when other processes dirty new pages, we
2483		 * first tag pages which should be written back and only then
2484		 * start writing them.
2485		 *
2486		 * For data-integrity writeback we have to be careful so that we
2487		 * do not miss some pages (e.g., because some other process has
2488		 * cleared the TOWRITE tag we set).  The rule we follow is that
2489		 * TOWRITE tag can be cleared only by the process clearing the
2490		 * DIRTY tag (and submitting the page for I/O).
2491		 */
2492		if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2493			tag_pages_for_writeback(mapping, wbc->index,
2494					wbc_end(wbc));
2495	} else {
2496		wbc->nr_to_write -= folio_nr_pages(folio);
2497
2498		WARN_ON_ONCE(*error > 0);
2499
2500		/*
2501		 * For integrity writeback we have to keep going until we have
2502		 * written all the folios we tagged for writeback above, even if
2503		 * we run past wbc->nr_to_write or encounter errors.
2504		 * We stash away the first error we encounter in wbc->saved_err
2505		 * so that it can be retrieved when we're done.  This is because
2506		 * the file system may still have state to clear for each folio.
2507		 *
2508		 * For background writeback we exit as soon as we run past
2509		 * wbc->nr_to_write or encounter the first error.
2510		 */
2511		if (wbc->sync_mode == WB_SYNC_ALL) {
2512			if (*error && !wbc->saved_err)
2513				wbc->saved_err = *error;
2514		} else {
2515			if (*error || wbc->nr_to_write <= 0)
2516				goto done;
2517		}
2518	}
2519
2520	folio = writeback_get_folio(mapping, wbc);
2521	if (!folio) {
2522		/*
2523		 * To avoid deadlocks between range_cyclic writeback and callers
2524		 * that hold pages in PageWriteback to aggregate I/O until
2525		 * the writeback iteration finishes, we do not loop back to the
2526		 * start of the file.  Doing so causes a page lock/page
2527		 * writeback access order inversion - we should only ever lock
2528		 * multiple pages in ascending page->index order, and looping
2529		 * back to the start of the file violates that rule and causes
2530		 * deadlocks.
2531		 */
2532		if (wbc->range_cyclic)
2533			mapping->writeback_index = 0;
2534
2535		/*
2536		 * Return the first error we encountered (if there was any) to
2537		 * the caller.
2538		 */
2539		*error = wbc->saved_err;
2540	}
2541	return folio;
2542
2543done:
2544	if (wbc->range_cyclic)
2545		mapping->writeback_index = folio->index + folio_nr_pages(folio);
2546	folio_batch_release(&wbc->fbatch);
2547	return NULL;
2548}
2549
2550/**
2551 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2552 * @mapping: address space structure to write
2553 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2554 * @writepage: function called for each page
2555 * @data: data passed to writepage function
2556 *
2557 * Return: %0 on success, negative error code otherwise
2558 *
2559 * Note: please use writeback_iter() instead.
 
2560 */
2561int write_cache_pages(struct address_space *mapping,
2562		      struct writeback_control *wbc, writepage_t writepage,
2563		      void *data)
2564{
2565	struct folio *folio = NULL;
2566	int error;
2567
2568	while ((folio = writeback_iter(mapping, wbc, folio, &error))) {
2569		error = writepage(folio, wbc, data);
2570		if (error == AOP_WRITEPAGE_ACTIVATE) {
2571			folio_unlock(folio);
2572			error = 0;
2573		}
2574	}
2575
2576	return error;
2577}
2578EXPORT_SYMBOL(write_cache_pages);
2579
2580static int writeback_use_writepage(struct address_space *mapping,
2581		struct writeback_control *wbc)
2582{
2583	struct folio *folio = NULL;
2584	struct blk_plug plug;
2585	int err;
2586
2587	blk_start_plug(&plug);
2588	while ((folio = writeback_iter(mapping, wbc, folio, &err))) {
2589		err = mapping->a_ops->writepage(&folio->page, wbc);
2590		if (err == AOP_WRITEPAGE_ACTIVATE) {
2591			folio_unlock(folio);
2592			err = 0;
2593		}
2594		mapping_set_error(mapping, err);
2595	}
2596	blk_finish_plug(&plug);
 
 
2597
2598	return err;
2599}
2600
2601int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
2602{
2603	int ret;
2604	struct bdi_writeback *wb;
2605
2606	if (wbc->nr_to_write <= 0)
2607		return 0;
2608	wb = inode_to_wb_wbc(mapping->host, wbc);
2609	wb_bandwidth_estimate_start(wb);
2610	while (1) {
2611		if (mapping->a_ops->writepages) {
2612			ret = mapping->a_ops->writepages(mapping, wbc);
2613		} else if (mapping->a_ops->writepage) {
2614			ret = writeback_use_writepage(mapping, wbc);
2615		} else {
2616			/* deal with chardevs and other special files */
2617			ret = 0;
2618		}
2619		if (ret != -ENOMEM || wbc->sync_mode != WB_SYNC_ALL)
2620			break;
 
 
 
 
 
2621
2622		/*
2623		 * Lacking an allocation context or the locality or writeback
2624		 * state of any of the inode's pages, throttle based on
2625		 * writeback activity on the local node. It's as good a
2626		 * guess as any.
2627		 */
2628		reclaim_throttle(NODE_DATA(numa_node_id()),
2629			VMSCAN_THROTTLE_WRITEBACK);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2630	}
2631	/*
2632	 * Usually few pages are written by now from those we've just submitted
2633	 * but if there's constant writeback being submitted, this makes sure
2634	 * writeback bandwidth is updated once in a while.
2635	 */
2636	if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
2637				   BANDWIDTH_INTERVAL))
2638		wb_update_bandwidth(wb);
2639	return ret;
2640}
 
2641
2642/*
2643 * For address_spaces which do not use buffers nor write back.
2644 */
2645bool noop_dirty_folio(struct address_space *mapping, struct folio *folio)
2646{
2647	if (!folio_test_dirty(folio))
2648		return !folio_test_set_dirty(folio);
2649	return false;
2650}
2651EXPORT_SYMBOL(noop_dirty_folio);
2652
2653/*
2654 * Helper function for set_page_dirty family.
2655 *
2656 * Caller must hold folio_memcg_lock().
2657 *
2658 * NOTE: This relies on being atomic wrt interrupts.
2659 */
2660static void folio_account_dirtied(struct folio *folio,
2661		struct address_space *mapping)
2662{
2663	struct inode *inode = mapping->host;
2664
2665	trace_writeback_dirty_folio(folio, mapping);
2666
2667	if (mapping_can_writeback(mapping)) {
2668		struct bdi_writeback *wb;
2669		long nr = folio_nr_pages(folio);
2670
2671		inode_attach_wb(inode, folio);
2672		wb = inode_to_wb(inode);
2673
2674		__lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, nr);
2675		__zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
2676		__node_stat_mod_folio(folio, NR_DIRTIED, nr);
2677		wb_stat_mod(wb, WB_RECLAIMABLE, nr);
2678		wb_stat_mod(wb, WB_DIRTIED, nr);
2679		task_io_account_write(nr * PAGE_SIZE);
2680		current->nr_dirtied += nr;
2681		__this_cpu_add(bdp_ratelimits, nr);
2682
2683		mem_cgroup_track_foreign_dirty(folio, wb);
2684	}
2685}
 
2686
2687/*
2688 * Helper function for deaccounting dirty page without writeback.
2689 *
2690 * Caller must hold folio_memcg_lock().
2691 */
2692void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb)
 
2693{
2694	long nr = folio_nr_pages(folio);
2695
2696	lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr);
2697	zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
2698	wb_stat_mod(wb, WB_RECLAIMABLE, -nr);
2699	task_io_account_cancelled_write(nr * PAGE_SIZE);
2700}
2701
2702/*
2703 * Mark the folio dirty, and set it dirty in the page cache, and mark
2704 * the inode dirty.
2705 *
2706 * If warn is true, then emit a warning if the folio is not uptodate and has
2707 * not been truncated.
 
2708 *
2709 * The caller must hold folio_memcg_lock().  Most callers have the folio
2710 * locked.  A few have the folio blocked from truncation through other
2711 * means (eg zap_vma_pages() has it mapped and is holding the page table
2712 * lock).  This can also be called from mark_buffer_dirty(), which I
2713 * cannot prove is always protected against truncate.
2714 */
2715void __folio_mark_dirty(struct folio *folio, struct address_space *mapping,
2716			     int warn)
2717{
2718	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2719
2720	xa_lock_irqsave(&mapping->i_pages, flags);
2721	if (folio->mapping) {	/* Race with truncate? */
2722		WARN_ON_ONCE(warn && !folio_test_uptodate(folio));
2723		folio_account_dirtied(folio, mapping);
2724		__xa_set_mark(&mapping->i_pages, folio_index(folio),
2725				PAGECACHE_TAG_DIRTY);
2726	}
2727	xa_unlock_irqrestore(&mapping->i_pages, flags);
 
2728}
 
2729
2730/**
2731 * filemap_dirty_folio - Mark a folio dirty for filesystems which do not use buffer_heads.
2732 * @mapping: Address space this folio belongs to.
2733 * @folio: Folio to be marked as dirty.
2734 *
2735 * Filesystems which do not use buffer heads should call this function
2736 * from their dirty_folio address space operation.  It ignores the
2737 * contents of folio_get_private(), so if the filesystem marks individual
2738 * blocks as dirty, the filesystem should handle that itself.
2739 *
2740 * This is also sometimes used by filesystems which use buffer_heads when
2741 * a single buffer is being dirtied: we want to set the folio dirty in
2742 * that case, but not all the buffers.  This is a "bottom-up" dirtying,
2743 * whereas block_dirty_folio() is a "top-down" dirtying.
2744 *
2745 * The caller must ensure this doesn't race with truncation.  Most will
2746 * simply hold the folio lock, but e.g. zap_pte_range() calls with the
2747 * folio mapped and the pte lock held, which also locks out truncation.
2748 */
2749bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio)
2750{
2751	folio_memcg_lock(folio);
2752	if (folio_test_set_dirty(folio)) {
2753		folio_memcg_unlock(folio);
2754		return false;
2755	}
2756
2757	__folio_mark_dirty(folio, mapping, !folio_test_private(folio));
2758	folio_memcg_unlock(folio);
2759
2760	if (mapping->host) {
2761		/* !PageAnon && !swapper_space */
2762		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
2763	}
2764	return true;
2765}
2766EXPORT_SYMBOL(filemap_dirty_folio);
2767
2768/**
2769 * folio_redirty_for_writepage - Decline to write a dirty folio.
2770 * @wbc: The writeback control.
2771 * @folio: The folio.
2772 *
2773 * When a writepage implementation decides that it doesn't want to write
2774 * @folio for some reason, it should call this function, unlock @folio and
2775 * return 0.
2776 *
2777 * Return: True if we redirtied the folio.  False if someone else dirtied
2778 * it first.
2779 */
2780bool folio_redirty_for_writepage(struct writeback_control *wbc,
2781		struct folio *folio)
2782{
2783	struct address_space *mapping = folio->mapping;
2784	long nr = folio_nr_pages(folio);
2785	bool ret;
2786
2787	wbc->pages_skipped += nr;
2788	ret = filemap_dirty_folio(mapping, folio);
2789	if (mapping && mapping_can_writeback(mapping)) {
2790		struct inode *inode = mapping->host;
2791		struct bdi_writeback *wb;
2792		struct wb_lock_cookie cookie = {};
2793
2794		wb = unlocked_inode_to_wb_begin(inode, &cookie);
2795		current->nr_dirtied -= nr;
2796		node_stat_mod_folio(folio, NR_DIRTIED, -nr);
2797		wb_stat_mod(wb, WB_DIRTIED, -nr);
2798		unlocked_inode_to_wb_end(inode, &cookie);
2799	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2800	return ret;
2801}
2802EXPORT_SYMBOL(folio_redirty_for_writepage);
2803
2804/**
2805 * folio_mark_dirty - Mark a folio as being modified.
2806 * @folio: The folio.
2807 *
2808 * The folio may not be truncated while this function is running.
2809 * Holding the folio lock is sufficient to prevent truncation, but some
2810 * callers cannot acquire a sleeping lock.  These callers instead hold
2811 * the page table lock for a page table which contains at least one page
2812 * in this folio.  Truncation will block on the page table lock as it
2813 * unmaps pages before removing the folio from its mapping.
2814 *
2815 * Return: True if the folio was newly dirtied, false if it was already dirty.
 
2816 */
2817bool folio_mark_dirty(struct folio *folio)
2818{
2819	struct address_space *mapping = folio_mapping(folio);
2820
 
2821	if (likely(mapping)) {
 
2822		/*
2823		 * readahead/folio_deactivate could remain
2824		 * PG_readahead/PG_reclaim due to race with folio_end_writeback
2825		 * About readahead, if the folio is written, the flags would be
2826		 * reset. So no problem.
2827		 * About folio_deactivate, if the folio is redirtied,
2828		 * the flag will be reset. So no problem. but if the
2829		 * folio is used by readahead it will confuse readahead
2830		 * and make it restart the size rampup process. But it's
2831		 * a trivial problem.
2832		 */
2833		if (folio_test_reclaim(folio))
2834			folio_clear_reclaim(folio);
2835		return mapping->a_ops->dirty_folio(mapping, folio);
 
 
 
 
 
 
 
2836	}
2837
2838	return noop_dirty_folio(mapping, folio);
2839}
2840EXPORT_SYMBOL(folio_mark_dirty);
2841
2842/*
2843 * set_page_dirty() is racy if the caller has no reference against
2844 * page->mapping->host, and if the page is unlocked.  This is because another
2845 * CPU could truncate the page off the mapping and then free the mapping.
2846 *
2847 * Usually, the page _is_ locked, or the caller is a user-space process which
2848 * holds a reference on the inode by having an open file.
2849 *
2850 * In other cases, the page should be locked before running set_page_dirty().
2851 */
2852int set_page_dirty_lock(struct page *page)
2853{
2854	int ret;
2855
2856	lock_page(page);
2857	ret = set_page_dirty(page);
2858	unlock_page(page);
2859	return ret;
2860}
2861EXPORT_SYMBOL(set_page_dirty_lock);
2862
2863/*
2864 * This cancels just the dirty bit on the kernel page itself, it does NOT
2865 * actually remove dirty bits on any mmap's that may be around. It also
2866 * leaves the page tagged dirty, so any sync activity will still find it on
2867 * the dirty lists, and in particular, clear_page_dirty_for_io() will still
2868 * look at the dirty bits in the VM.
2869 *
2870 * Doing this should *normally* only ever be done when a page is truncated,
2871 * and is not actually mapped anywhere at all. However, fs/buffer.c does
2872 * this when it notices that somebody has cleaned out all the buffers on a
2873 * page without actually doing it through the VM. Can you say "ext3 is
2874 * horribly ugly"? Thought you could.
2875 */
2876void __folio_cancel_dirty(struct folio *folio)
2877{
2878	struct address_space *mapping = folio_mapping(folio);
2879
2880	if (mapping_can_writeback(mapping)) {
2881		struct inode *inode = mapping->host;
2882		struct bdi_writeback *wb;
2883		struct wb_lock_cookie cookie = {};
2884
2885		folio_memcg_lock(folio);
2886		wb = unlocked_inode_to_wb_begin(inode, &cookie);
2887
2888		if (folio_test_clear_dirty(folio))
2889			folio_account_cleaned(folio, wb);
2890
2891		unlocked_inode_to_wb_end(inode, &cookie);
2892		folio_memcg_unlock(folio);
2893	} else {
2894		folio_clear_dirty(folio);
2895	}
2896}
2897EXPORT_SYMBOL(__folio_cancel_dirty);
2898
2899/*
2900 * Clear a folio's dirty flag, while caring for dirty memory accounting.
2901 * Returns true if the folio was previously dirty.
2902 *
2903 * This is for preparing to put the folio under writeout.  We leave
2904 * the folio tagged as dirty in the xarray so that a concurrent
2905 * write-for-sync can discover it via a PAGECACHE_TAG_DIRTY walk.
2906 * The ->writepage implementation will run either folio_start_writeback()
2907 * or folio_mark_dirty(), at which stage we bring the folio's dirty flag
2908 * and xarray dirty tag back into sync.
2909 *
2910 * This incoherency between the folio's dirty flag and xarray tag is
2911 * unfortunate, but it only exists while the folio is locked.
2912 */
2913bool folio_clear_dirty_for_io(struct folio *folio)
2914{
2915	struct address_space *mapping = folio_mapping(folio);
2916	bool ret = false;
2917
2918	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
2919
2920	if (mapping && mapping_can_writeback(mapping)) {
2921		struct inode *inode = mapping->host;
2922		struct bdi_writeback *wb;
2923		struct wb_lock_cookie cookie = {};
2924
2925		/*
2926		 * Yes, Virginia, this is indeed insane.
2927		 *
2928		 * We use this sequence to make sure that
2929		 *  (a) we account for dirty stats properly
2930		 *  (b) we tell the low-level filesystem to
2931		 *      mark the whole folio dirty if it was
2932		 *      dirty in a pagetable. Only to then
2933		 *  (c) clean the folio again and return 1 to
2934		 *      cause the writeback.
2935		 *
2936		 * This way we avoid all nasty races with the
2937		 * dirty bit in multiple places and clearing
2938		 * them concurrently from different threads.
2939		 *
2940		 * Note! Normally the "folio_mark_dirty(folio)"
2941		 * has no effect on the actual dirty bit - since
2942		 * that will already usually be set. But we
2943		 * need the side effects, and it can help us
2944		 * avoid races.
2945		 *
2946		 * We basically use the folio "master dirty bit"
2947		 * as a serialization point for all the different
2948		 * threads doing their things.
2949		 */
2950		if (folio_mkclean(folio))
2951			folio_mark_dirty(folio);
2952		/*
2953		 * We carefully synchronise fault handlers against
2954		 * installing a dirty pte and marking the folio dirty
2955		 * at this point.  We do this by having them hold the
2956		 * page lock while dirtying the folio, and folios are
2957		 * always locked coming in here, so we get the desired
2958		 * exclusion.
2959		 */
2960		wb = unlocked_inode_to_wb_begin(inode, &cookie);
2961		if (folio_test_clear_dirty(folio)) {
2962			long nr = folio_nr_pages(folio);
2963			lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr);
2964			zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
2965			wb_stat_mod(wb, WB_RECLAIMABLE, -nr);
2966			ret = true;
2967		}
2968		unlocked_inode_to_wb_end(inode, &cookie);
2969		return ret;
2970	}
2971	return folio_test_clear_dirty(folio);
2972}
2973EXPORT_SYMBOL(folio_clear_dirty_for_io);
2974
2975static void wb_inode_writeback_start(struct bdi_writeback *wb)
2976{
2977	atomic_inc(&wb->writeback_inodes);
2978}
2979
2980static void wb_inode_writeback_end(struct bdi_writeback *wb)
2981{
2982	unsigned long flags;
2983	atomic_dec(&wb->writeback_inodes);
2984	/*
2985	 * Make sure estimate of writeback throughput gets updated after
2986	 * writeback completed. We delay the update by BANDWIDTH_INTERVAL
2987	 * (which is the interval other bandwidth updates use for batching) so
2988	 * that if multiple inodes end writeback at a similar time, they get
2989	 * batched into one bandwidth update.
2990	 */
2991	spin_lock_irqsave(&wb->work_lock, flags);
2992	if (test_bit(WB_registered, &wb->state))
2993		queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL);
2994	spin_unlock_irqrestore(&wb->work_lock, flags);
2995}
2996
2997bool __folio_end_writeback(struct folio *folio)
2998{
2999	long nr = folio_nr_pages(folio);
3000	struct address_space *mapping = folio_mapping(folio);
3001	bool ret;
3002
3003	folio_memcg_lock(folio);
 
3004	if (mapping && mapping_use_writeback_tags(mapping)) {
3005		struct inode *inode = mapping->host;
3006		struct backing_dev_info *bdi = inode_to_bdi(inode);
3007		unsigned long flags;
3008
3009		xa_lock_irqsave(&mapping->i_pages, flags);
3010		ret = folio_xor_flags_has_waiters(folio, 1 << PG_writeback);
3011		__xa_clear_mark(&mapping->i_pages, folio_index(folio),
3012					PAGECACHE_TAG_WRITEBACK);
3013		if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
3014			struct bdi_writeback *wb = inode_to_wb(inode);
3015
3016			wb_stat_mod(wb, WB_WRITEBACK, -nr);
3017			__wb_writeout_add(wb, nr);
3018			if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
3019				wb_inode_writeback_end(wb);
3020		}
3021
3022		if (mapping->host && !mapping_tagged(mapping,
3023						     PAGECACHE_TAG_WRITEBACK))
3024			sb_clear_inode_writeback(mapping->host);
3025
3026		xa_unlock_irqrestore(&mapping->i_pages, flags);
3027	} else {
3028		ret = folio_xor_flags_has_waiters(folio, 1 << PG_writeback);
3029	}
3030
3031	lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
3032	zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
3033	node_stat_mod_folio(folio, NR_WRITTEN, nr);
3034	folio_memcg_unlock(folio);
3035
 
 
 
 
 
 
3036	return ret;
3037}
3038
3039void __folio_start_writeback(struct folio *folio, bool keep_write)
3040{
3041	long nr = folio_nr_pages(folio);
3042	struct address_space *mapping = folio_mapping(folio);
3043	int access_ret;
3044
3045	VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
3046
3047	folio_memcg_lock(folio);
3048	if (mapping && mapping_use_writeback_tags(mapping)) {
3049		XA_STATE(xas, &mapping->i_pages, folio_index(folio));
3050		struct inode *inode = mapping->host;
3051		struct backing_dev_info *bdi = inode_to_bdi(inode);
3052		unsigned long flags;
3053		bool on_wblist;
3054
3055		xas_lock_irqsave(&xas, flags);
3056		xas_load(&xas);
3057		folio_test_set_writeback(folio);
 
 
 
 
 
 
 
 
 
3058
3059		on_wblist = mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
3060
3061		xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK);
3062		if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
3063			struct bdi_writeback *wb = inode_to_wb(inode);
3064
3065			wb_stat_mod(wb, WB_WRITEBACK, nr);
3066			if (!on_wblist)
3067				wb_inode_writeback_start(wb);
3068		}
3069
3070		/*
3071		 * We can come through here when swapping anonymous
3072		 * folios, so we don't necessarily have an inode to
3073		 * track for sync.
3074		 */
3075		if (mapping->host && !on_wblist)
3076			sb_mark_inode_writeback(mapping->host);
3077		if (!folio_test_dirty(folio))
3078			xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
3079		if (!keep_write)
3080			xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
3081		xas_unlock_irqrestore(&xas, flags);
 
3082	} else {
3083		folio_test_set_writeback(folio);
 
 
 
 
3084	}
 
 
3085
3086	lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr);
3087	zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
3088	folio_memcg_unlock(folio);
3089
3090	access_ret = arch_make_folio_accessible(folio);
3091	/*
3092	 * If writeback has been triggered on a page that cannot be made
3093	 * accessible, it is too late to recover here.
3094	 */
3095	VM_BUG_ON_FOLIO(access_ret != 0, folio);
3096}
3097EXPORT_SYMBOL(__folio_start_writeback);
3098
3099/**
3100 * folio_wait_writeback - Wait for a folio to finish writeback.
3101 * @folio: The folio to wait for.
3102 *
3103 * If the folio is currently being written back to storage, wait for the
3104 * I/O to complete.
3105 *
3106 * Context: Sleeps.  Must be called in process context and with
3107 * no spinlocks held.  Caller should hold a reference on the folio.
3108 * If the folio is not locked, writeback may start again after writeback
3109 * has finished.
3110 */
3111void folio_wait_writeback(struct folio *folio)
3112{
3113	while (folio_test_writeback(folio)) {
3114		trace_folio_wait_writeback(folio, folio_mapping(folio));
3115		folio_wait_bit(folio, PG_writeback);
3116	}
3117}
3118EXPORT_SYMBOL_GPL(folio_wait_writeback);
3119
3120/**
3121 * folio_wait_writeback_killable - Wait for a folio to finish writeback.
3122 * @folio: The folio to wait for.
3123 *
3124 * If the folio is currently being written back to storage, wait for the
3125 * I/O to complete or a fatal signal to arrive.
3126 *
3127 * Context: Sleeps.  Must be called in process context and with
3128 * no spinlocks held.  Caller should hold a reference on the folio.
3129 * If the folio is not locked, writeback may start again after writeback
3130 * has finished.
3131 * Return: 0 on success, -EINTR if we get a fatal signal while waiting.
3132 */
3133int folio_wait_writeback_killable(struct folio *folio)
3134{
3135	while (folio_test_writeback(folio)) {
3136		trace_folio_wait_writeback(folio, folio_mapping(folio));
3137		if (folio_wait_bit_killable(folio, PG_writeback))
3138			return -EINTR;
3139	}
3140
3141	return 0;
3142}
3143EXPORT_SYMBOL_GPL(folio_wait_writeback_killable);
3144
3145/**
3146 * folio_wait_stable() - wait for writeback to finish, if necessary.
3147 * @folio: The folio to wait on.
3148 *
3149 * This function determines if the given folio is related to a backing
3150 * device that requires folio contents to be held stable during writeback.
3151 * If so, then it will wait for any pending writeback to complete.
3152 *
3153 * Context: Sleeps.  Must be called in process context and with
3154 * no spinlocks held.  Caller should hold a reference on the folio.
3155 * If the folio is not locked, writeback may start again after writeback
3156 * has finished.
3157 */
3158void folio_wait_stable(struct folio *folio)
3159{
3160	if (mapping_stable_writes(folio_mapping(folio)))
3161		folio_wait_writeback(folio);
3162}
3163EXPORT_SYMBOL_GPL(folio_wait_stable);