Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/mm/vmstat.c
   4 *
   5 *  Manages VM statistics
   6 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   7 *
   8 *  zoned VM statistics
   9 *  Copyright (C) 2006 Silicon Graphics, Inc.,
  10 *		Christoph Lameter <christoph@lameter.com>
  11 *  Copyright (C) 2008-2014 Christoph Lameter
  12 */
  13#include <linux/fs.h>
  14#include <linux/mm.h>
  15#include <linux/err.h>
  16#include <linux/module.h>
  17#include <linux/slab.h>
  18#include <linux/cpu.h>
  19#include <linux/cpumask.h>
  20#include <linux/vmstat.h>
  21#include <linux/proc_fs.h>
  22#include <linux/seq_file.h>
  23#include <linux/debugfs.h>
  24#include <linux/sched.h>
  25#include <linux/math64.h>
  26#include <linux/writeback.h>
  27#include <linux/compaction.h>
  28#include <linux/mm_inline.h>
  29#include <linux/page_ext.h>
  30#include <linux/page_owner.h>
 
  31
  32#include "internal.h"
  33
  34#define NUMA_STATS_THRESHOLD (U16_MAX - 2)
  35
  36#ifdef CONFIG_NUMA
  37int sysctl_vm_numa_stat = ENABLE_NUMA_STAT;
  38
  39/* zero numa counters within a zone */
  40static void zero_zone_numa_counters(struct zone *zone)
  41{
  42	int item, cpu;
  43
  44	for (item = 0; item < NR_VM_NUMA_STAT_ITEMS; item++) {
  45		atomic_long_set(&zone->vm_numa_stat[item], 0);
  46		for_each_online_cpu(cpu)
  47			per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item]
  48						= 0;
 
  49	}
  50}
  51
  52/* zero numa counters of all the populated zones */
  53static void zero_zones_numa_counters(void)
  54{
  55	struct zone *zone;
  56
  57	for_each_populated_zone(zone)
  58		zero_zone_numa_counters(zone);
  59}
  60
  61/* zero global numa counters */
  62static void zero_global_numa_counters(void)
  63{
  64	int item;
  65
  66	for (item = 0; item < NR_VM_NUMA_STAT_ITEMS; item++)
  67		atomic_long_set(&vm_numa_stat[item], 0);
  68}
  69
  70static void invalid_numa_statistics(void)
  71{
  72	zero_zones_numa_counters();
  73	zero_global_numa_counters();
  74}
  75
  76static DEFINE_MUTEX(vm_numa_stat_lock);
  77
  78int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
  79		void *buffer, size_t *length, loff_t *ppos)
  80{
  81	int ret, oldval;
  82
  83	mutex_lock(&vm_numa_stat_lock);
  84	if (write)
  85		oldval = sysctl_vm_numa_stat;
  86	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
  87	if (ret || !write)
  88		goto out;
  89
  90	if (oldval == sysctl_vm_numa_stat)
  91		goto out;
  92	else if (sysctl_vm_numa_stat == ENABLE_NUMA_STAT) {
  93		static_branch_enable(&vm_numa_stat_key);
  94		pr_info("enable numa statistics\n");
  95	} else {
  96		static_branch_disable(&vm_numa_stat_key);
  97		invalid_numa_statistics();
  98		pr_info("disable numa statistics, and clear numa counters\n");
  99	}
 100
 101out:
 102	mutex_unlock(&vm_numa_stat_lock);
 103	return ret;
 104}
 105#endif
 106
 107#ifdef CONFIG_VM_EVENT_COUNTERS
 108DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
 109EXPORT_PER_CPU_SYMBOL(vm_event_states);
 110
 111static void sum_vm_events(unsigned long *ret)
 112{
 113	int cpu;
 114	int i;
 115
 116	memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
 117
 118	for_each_online_cpu(cpu) {
 119		struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
 120
 121		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
 122			ret[i] += this->event[i];
 123	}
 124}
 125
 126/*
 127 * Accumulate the vm event counters across all CPUs.
 128 * The result is unavoidably approximate - it can change
 129 * during and after execution of this function.
 130*/
 131void all_vm_events(unsigned long *ret)
 132{
 133	get_online_cpus();
 134	sum_vm_events(ret);
 135	put_online_cpus();
 136}
 137EXPORT_SYMBOL_GPL(all_vm_events);
 138
 139/*
 140 * Fold the foreign cpu events into our own.
 141 *
 142 * This is adding to the events on one processor
 143 * but keeps the global counts constant.
 144 */
 145void vm_events_fold_cpu(int cpu)
 146{
 147	struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
 148	int i;
 149
 150	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
 151		count_vm_events(i, fold_state->event[i]);
 152		fold_state->event[i] = 0;
 153	}
 154}
 155
 156#endif /* CONFIG_VM_EVENT_COUNTERS */
 157
 158/*
 159 * Manage combined zone based / global counters
 160 *
 161 * vm_stat contains the global counters
 162 */
 163atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
 164atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS] __cacheline_aligned_in_smp;
 165atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp;
 
 166EXPORT_SYMBOL(vm_zone_stat);
 167EXPORT_SYMBOL(vm_numa_stat);
 168EXPORT_SYMBOL(vm_node_stat);
 169
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 170#ifdef CONFIG_SMP
 171
 172int calculate_pressure_threshold(struct zone *zone)
 173{
 174	int threshold;
 175	int watermark_distance;
 176
 177	/*
 178	 * As vmstats are not up to date, there is drift between the estimated
 179	 * and real values. For high thresholds and a high number of CPUs, it
 180	 * is possible for the min watermark to be breached while the estimated
 181	 * value looks fine. The pressure threshold is a reduced value such
 182	 * that even the maximum amount of drift will not accidentally breach
 183	 * the min watermark
 184	 */
 185	watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
 186	threshold = max(1, (int)(watermark_distance / num_online_cpus()));
 187
 188	/*
 189	 * Maximum threshold is 125
 190	 */
 191	threshold = min(125, threshold);
 192
 193	return threshold;
 194}
 195
 196int calculate_normal_threshold(struct zone *zone)
 197{
 198	int threshold;
 199	int mem;	/* memory in 128 MB units */
 200
 201	/*
 202	 * The threshold scales with the number of processors and the amount
 203	 * of memory per zone. More memory means that we can defer updates for
 204	 * longer, more processors could lead to more contention.
 205 	 * fls() is used to have a cheap way of logarithmic scaling.
 206	 *
 207	 * Some sample thresholds:
 208	 *
 209	 * Threshold	Processors	(fls)	Zonesize	fls(mem+1)
 210	 * ------------------------------------------------------------------
 211	 * 8		1		1	0.9-1 GB	4
 212	 * 16		2		2	0.9-1 GB	4
 213	 * 20 		2		2	1-2 GB		5
 214	 * 24		2		2	2-4 GB		6
 215	 * 28		2		2	4-8 GB		7
 216	 * 32		2		2	8-16 GB		8
 217	 * 4		2		2	<128M		1
 218	 * 30		4		3	2-4 GB		5
 219	 * 48		4		3	8-16 GB		8
 220	 * 32		8		4	1-2 GB		4
 221	 * 32		8		4	0.9-1GB		4
 222	 * 10		16		5	<128M		1
 223	 * 40		16		5	900M		4
 224	 * 70		64		7	2-4 GB		5
 225	 * 84		64		7	4-8 GB		6
 226	 * 108		512		9	4-8 GB		6
 227	 * 125		1024		10	8-16 GB		8
 228	 * 125		1024		10	16-32 GB	9
 229	 */
 230
 231	mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT);
 232
 233	threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
 234
 235	/*
 236	 * Maximum threshold is 125
 237	 */
 238	threshold = min(125, threshold);
 239
 240	return threshold;
 241}
 242
 243/*
 244 * Refresh the thresholds for each zone.
 245 */
 246void refresh_zone_stat_thresholds(void)
 247{
 248	struct pglist_data *pgdat;
 249	struct zone *zone;
 250	int cpu;
 251	int threshold;
 252
 253	/* Zero current pgdat thresholds */
 254	for_each_online_pgdat(pgdat) {
 255		for_each_online_cpu(cpu) {
 256			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0;
 257		}
 258	}
 259
 260	for_each_populated_zone(zone) {
 261		struct pglist_data *pgdat = zone->zone_pgdat;
 262		unsigned long max_drift, tolerate_drift;
 263
 264		threshold = calculate_normal_threshold(zone);
 265
 266		for_each_online_cpu(cpu) {
 267			int pgdat_threshold;
 268
 269			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
 270							= threshold;
 271
 272			/* Base nodestat threshold on the largest populated zone. */
 273			pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold;
 274			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold
 275				= max(threshold, pgdat_threshold);
 276		}
 277
 278		/*
 279		 * Only set percpu_drift_mark if there is a danger that
 280		 * NR_FREE_PAGES reports the low watermark is ok when in fact
 281		 * the min watermark could be breached by an allocation
 282		 */
 283		tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
 284		max_drift = num_online_cpus() * threshold;
 285		if (max_drift > tolerate_drift)
 286			zone->percpu_drift_mark = high_wmark_pages(zone) +
 287					max_drift;
 288	}
 289}
 290
 291void set_pgdat_percpu_threshold(pg_data_t *pgdat,
 292				int (*calculate_pressure)(struct zone *))
 293{
 294	struct zone *zone;
 295	int cpu;
 296	int threshold;
 297	int i;
 298
 299	for (i = 0; i < pgdat->nr_zones; i++) {
 300		zone = &pgdat->node_zones[i];
 301		if (!zone->percpu_drift_mark)
 302			continue;
 303
 304		threshold = (*calculate_pressure)(zone);
 305		for_each_online_cpu(cpu)
 306			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
 307							= threshold;
 308	}
 309}
 310
 311/*
 312 * For use when we know that interrupts are disabled,
 313 * or when we know that preemption is disabled and that
 314 * particular counter cannot be updated from interrupt context.
 315 */
 316void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 317			   long delta)
 318{
 319	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 320	s8 __percpu *p = pcp->vm_stat_diff + item;
 321	long x;
 322	long t;
 323
 
 
 
 
 
 
 
 
 
 324	x = delta + __this_cpu_read(*p);
 325
 326	t = __this_cpu_read(pcp->stat_threshold);
 327
 328	if (unlikely(x > t || x < -t)) {
 329		zone_page_state_add(x, zone, item);
 330		x = 0;
 331	}
 332	__this_cpu_write(*p, x);
 
 
 333}
 334EXPORT_SYMBOL(__mod_zone_page_state);
 335
 336void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
 337				long delta)
 338{
 339	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
 340	s8 __percpu *p = pcp->vm_node_stat_diff + item;
 341	long x;
 342	long t;
 343
 344	if (vmstat_item_in_bytes(item)) {
 
 
 
 
 
 
 345		VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
 346		delta >>= PAGE_SHIFT;
 347	}
 348
 
 
 
 349	x = delta + __this_cpu_read(*p);
 350
 351	t = __this_cpu_read(pcp->stat_threshold);
 352
 353	if (unlikely(x > t || x < -t)) {
 354		node_page_state_add(x, pgdat, item);
 355		x = 0;
 356	}
 357	__this_cpu_write(*p, x);
 
 
 358}
 359EXPORT_SYMBOL(__mod_node_page_state);
 360
 361/*
 362 * Optimized increment and decrement functions.
 363 *
 364 * These are only for a single page and therefore can take a struct page *
 365 * argument instead of struct zone *. This allows the inclusion of the code
 366 * generated for page_zone(page) into the optimized functions.
 367 *
 368 * No overflow check is necessary and therefore the differential can be
 369 * incremented or decremented in place which may allow the compilers to
 370 * generate better code.
 371 * The increment or decrement is known and therefore one boundary check can
 372 * be omitted.
 373 *
 374 * NOTE: These functions are very performance sensitive. Change only
 375 * with care.
 376 *
 377 * Some processors have inc/dec instructions that are atomic vs an interrupt.
 378 * However, the code must first determine the differential location in a zone
 379 * based on the processor number and then inc/dec the counter. There is no
 380 * guarantee without disabling preemption that the processor will not change
 381 * in between and therefore the atomicity vs. interrupt cannot be exploited
 382 * in a useful way here.
 383 */
 384void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
 385{
 386	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 387	s8 __percpu *p = pcp->vm_stat_diff + item;
 388	s8 v, t;
 389
 
 
 
 390	v = __this_cpu_inc_return(*p);
 391	t = __this_cpu_read(pcp->stat_threshold);
 392	if (unlikely(v > t)) {
 393		s8 overstep = t >> 1;
 394
 395		zone_page_state_add(v + overstep, zone, item);
 396		__this_cpu_write(*p, -overstep);
 397	}
 
 
 398}
 399
 400void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 401{
 402	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
 403	s8 __percpu *p = pcp->vm_node_stat_diff + item;
 404	s8 v, t;
 405
 406	VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
 407
 
 
 
 408	v = __this_cpu_inc_return(*p);
 409	t = __this_cpu_read(pcp->stat_threshold);
 410	if (unlikely(v > t)) {
 411		s8 overstep = t >> 1;
 412
 413		node_page_state_add(v + overstep, pgdat, item);
 414		__this_cpu_write(*p, -overstep);
 415	}
 
 
 416}
 417
 418void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
 419{
 420	__inc_zone_state(page_zone(page), item);
 421}
 422EXPORT_SYMBOL(__inc_zone_page_state);
 423
 424void __inc_node_page_state(struct page *page, enum node_stat_item item)
 425{
 426	__inc_node_state(page_pgdat(page), item);
 427}
 428EXPORT_SYMBOL(__inc_node_page_state);
 429
 430void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
 431{
 432	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 433	s8 __percpu *p = pcp->vm_stat_diff + item;
 434	s8 v, t;
 435
 
 
 
 436	v = __this_cpu_dec_return(*p);
 437	t = __this_cpu_read(pcp->stat_threshold);
 438	if (unlikely(v < - t)) {
 439		s8 overstep = t >> 1;
 440
 441		zone_page_state_add(v - overstep, zone, item);
 442		__this_cpu_write(*p, overstep);
 443	}
 
 
 444}
 445
 446void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 447{
 448	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
 449	s8 __percpu *p = pcp->vm_node_stat_diff + item;
 450	s8 v, t;
 451
 452	VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
 453
 
 
 
 454	v = __this_cpu_dec_return(*p);
 455	t = __this_cpu_read(pcp->stat_threshold);
 456	if (unlikely(v < - t)) {
 457		s8 overstep = t >> 1;
 458
 459		node_page_state_add(v - overstep, pgdat, item);
 460		__this_cpu_write(*p, overstep);
 461	}
 
 
 462}
 463
 464void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
 465{
 466	__dec_zone_state(page_zone(page), item);
 467}
 468EXPORT_SYMBOL(__dec_zone_page_state);
 469
 470void __dec_node_page_state(struct page *page, enum node_stat_item item)
 471{
 472	__dec_node_state(page_pgdat(page), item);
 473}
 474EXPORT_SYMBOL(__dec_node_page_state);
 475
 476#ifdef CONFIG_HAVE_CMPXCHG_LOCAL
 477/*
 478 * If we have cmpxchg_local support then we do not need to incur the overhead
 479 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
 480 *
 481 * mod_state() modifies the zone counter state through atomic per cpu
 482 * operations.
 483 *
 484 * Overstep mode specifies how overstep should handled:
 485 *     0       No overstepping
 486 *     1       Overstepping half of threshold
 487 *     -1      Overstepping minus half of threshold
 488*/
 489static inline void mod_zone_state(struct zone *zone,
 490       enum zone_stat_item item, long delta, int overstep_mode)
 491{
 492	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 493	s8 __percpu *p = pcp->vm_stat_diff + item;
 494	long o, n, t, z;
 
 495
 
 496	do {
 497		z = 0;  /* overflow to zone counters */
 498
 499		/*
 500		 * The fetching of the stat_threshold is racy. We may apply
 501		 * a counter threshold to the wrong the cpu if we get
 502		 * rescheduled while executing here. However, the next
 503		 * counter update will apply the threshold again and
 504		 * therefore bring the counter under the threshold again.
 505		 *
 506		 * Most of the time the thresholds are the same anyways
 507		 * for all cpus in a zone.
 508		 */
 509		t = this_cpu_read(pcp->stat_threshold);
 510
 511		o = this_cpu_read(*p);
 512		n = delta + o;
 513
 514		if (n > t || n < -t) {
 515			int os = overstep_mode * (t >> 1) ;
 516
 517			/* Overflow must be added to zone counters */
 518			z = n + os;
 519			n = -os;
 520		}
 521	} while (this_cpu_cmpxchg(*p, o, n) != o);
 522
 523	if (z)
 524		zone_page_state_add(z, zone, item);
 525}
 526
 527void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 528			 long delta)
 529{
 530	mod_zone_state(zone, item, delta, 0);
 531}
 532EXPORT_SYMBOL(mod_zone_page_state);
 533
 534void inc_zone_page_state(struct page *page, enum zone_stat_item item)
 535{
 536	mod_zone_state(page_zone(page), item, 1, 1);
 537}
 538EXPORT_SYMBOL(inc_zone_page_state);
 539
 540void dec_zone_page_state(struct page *page, enum zone_stat_item item)
 541{
 542	mod_zone_state(page_zone(page), item, -1, -1);
 543}
 544EXPORT_SYMBOL(dec_zone_page_state);
 545
 546static inline void mod_node_state(struct pglist_data *pgdat,
 547       enum node_stat_item item, int delta, int overstep_mode)
 548{
 549	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
 550	s8 __percpu *p = pcp->vm_node_stat_diff + item;
 551	long o, n, t, z;
 
 552
 553	if (vmstat_item_in_bytes(item)) {
 
 
 
 
 
 
 554		VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
 555		delta >>= PAGE_SHIFT;
 556	}
 557
 
 558	do {
 559		z = 0;  /* overflow to node counters */
 560
 561		/*
 562		 * The fetching of the stat_threshold is racy. We may apply
 563		 * a counter threshold to the wrong the cpu if we get
 564		 * rescheduled while executing here. However, the next
 565		 * counter update will apply the threshold again and
 566		 * therefore bring the counter under the threshold again.
 567		 *
 568		 * Most of the time the thresholds are the same anyways
 569		 * for all cpus in a node.
 570		 */
 571		t = this_cpu_read(pcp->stat_threshold);
 572
 573		o = this_cpu_read(*p);
 574		n = delta + o;
 575
 576		if (n > t || n < -t) {
 577			int os = overstep_mode * (t >> 1) ;
 578
 579			/* Overflow must be added to node counters */
 580			z = n + os;
 581			n = -os;
 582		}
 583	} while (this_cpu_cmpxchg(*p, o, n) != o);
 584
 585	if (z)
 586		node_page_state_add(z, pgdat, item);
 587}
 588
 589void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
 590					long delta)
 591{
 592	mod_node_state(pgdat, item, delta, 0);
 593}
 594EXPORT_SYMBOL(mod_node_page_state);
 595
 596void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 597{
 598	mod_node_state(pgdat, item, 1, 1);
 599}
 600
 601void inc_node_page_state(struct page *page, enum node_stat_item item)
 602{
 603	mod_node_state(page_pgdat(page), item, 1, 1);
 604}
 605EXPORT_SYMBOL(inc_node_page_state);
 606
 607void dec_node_page_state(struct page *page, enum node_stat_item item)
 608{
 609	mod_node_state(page_pgdat(page), item, -1, -1);
 610}
 611EXPORT_SYMBOL(dec_node_page_state);
 612#else
 613/*
 614 * Use interrupt disable to serialize counter updates
 615 */
 616void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 617			 long delta)
 618{
 619	unsigned long flags;
 620
 621	local_irq_save(flags);
 622	__mod_zone_page_state(zone, item, delta);
 623	local_irq_restore(flags);
 624}
 625EXPORT_SYMBOL(mod_zone_page_state);
 626
 627void inc_zone_page_state(struct page *page, enum zone_stat_item item)
 628{
 629	unsigned long flags;
 630	struct zone *zone;
 631
 632	zone = page_zone(page);
 633	local_irq_save(flags);
 634	__inc_zone_state(zone, item);
 635	local_irq_restore(flags);
 636}
 637EXPORT_SYMBOL(inc_zone_page_state);
 638
 639void dec_zone_page_state(struct page *page, enum zone_stat_item item)
 640{
 641	unsigned long flags;
 642
 643	local_irq_save(flags);
 644	__dec_zone_page_state(page, item);
 645	local_irq_restore(flags);
 646}
 647EXPORT_SYMBOL(dec_zone_page_state);
 648
 649void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 650{
 651	unsigned long flags;
 652
 653	local_irq_save(flags);
 654	__inc_node_state(pgdat, item);
 655	local_irq_restore(flags);
 656}
 657EXPORT_SYMBOL(inc_node_state);
 658
 659void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
 660					long delta)
 661{
 662	unsigned long flags;
 663
 664	local_irq_save(flags);
 665	__mod_node_page_state(pgdat, item, delta);
 666	local_irq_restore(flags);
 667}
 668EXPORT_SYMBOL(mod_node_page_state);
 669
 670void inc_node_page_state(struct page *page, enum node_stat_item item)
 671{
 672	unsigned long flags;
 673	struct pglist_data *pgdat;
 674
 675	pgdat = page_pgdat(page);
 676	local_irq_save(flags);
 677	__inc_node_state(pgdat, item);
 678	local_irq_restore(flags);
 679}
 680EXPORT_SYMBOL(inc_node_page_state);
 681
 682void dec_node_page_state(struct page *page, enum node_stat_item item)
 683{
 684	unsigned long flags;
 685
 686	local_irq_save(flags);
 687	__dec_node_page_state(page, item);
 688	local_irq_restore(flags);
 689}
 690EXPORT_SYMBOL(dec_node_page_state);
 691#endif
 692
 693/*
 694 * Fold a differential into the global counters.
 695 * Returns the number of counters updated.
 696 */
 697#ifdef CONFIG_NUMA
 698static int fold_diff(int *zone_diff, int *numa_diff, int *node_diff)
 699{
 700	int i;
 701	int changes = 0;
 702
 703	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 704		if (zone_diff[i]) {
 705			atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
 706			changes++;
 707	}
 708
 709	for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
 710		if (numa_diff[i]) {
 711			atomic_long_add(numa_diff[i], &vm_numa_stat[i]);
 712			changes++;
 713	}
 714
 715	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
 716		if (node_diff[i]) {
 717			atomic_long_add(node_diff[i], &vm_node_stat[i]);
 718			changes++;
 719	}
 720	return changes;
 721}
 722#else
 723static int fold_diff(int *zone_diff, int *node_diff)
 724{
 725	int i;
 726	int changes = 0;
 727
 728	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 729		if (zone_diff[i]) {
 730			atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
 731			changes++;
 732	}
 733
 734	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
 735		if (node_diff[i]) {
 736			atomic_long_add(node_diff[i], &vm_node_stat[i]);
 737			changes++;
 738	}
 739	return changes;
 740}
 741#endif /* CONFIG_NUMA */
 742
 743/*
 744 * Update the zone counters for the current cpu.
 745 *
 746 * Note that refresh_cpu_vm_stats strives to only access
 747 * node local memory. The per cpu pagesets on remote zones are placed
 748 * in the memory local to the processor using that pageset. So the
 749 * loop over all zones will access a series of cachelines local to
 750 * the processor.
 751 *
 752 * The call to zone_page_state_add updates the cachelines with the
 753 * statistics in the remote zone struct as well as the global cachelines
 754 * with the global counters. These could cause remote node cache line
 755 * bouncing and will have to be only done when necessary.
 756 *
 757 * The function returns the number of global counters updated.
 758 */
 759static int refresh_cpu_vm_stats(bool do_pagesets)
 760{
 761	struct pglist_data *pgdat;
 762	struct zone *zone;
 763	int i;
 764	int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
 765#ifdef CONFIG_NUMA
 766	int global_numa_diff[NR_VM_NUMA_STAT_ITEMS] = { 0, };
 767#endif
 768	int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
 769	int changes = 0;
 770
 771	for_each_populated_zone(zone) {
 772		struct per_cpu_pageset __percpu *p = zone->pageset;
 
 773
 774		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
 775			int v;
 776
 777			v = this_cpu_xchg(p->vm_stat_diff[i], 0);
 778			if (v) {
 779
 780				atomic_long_add(v, &zone->vm_stat[i]);
 781				global_zone_diff[i] += v;
 782#ifdef CONFIG_NUMA
 783				/* 3 seconds idle till flush */
 784				__this_cpu_write(p->expire, 3);
 785#endif
 786			}
 787		}
 788#ifdef CONFIG_NUMA
 789		for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) {
 790			int v;
 791
 792			v = this_cpu_xchg(p->vm_numa_stat_diff[i], 0);
 793			if (v) {
 794
 795				atomic_long_add(v, &zone->vm_numa_stat[i]);
 796				global_numa_diff[i] += v;
 797				__this_cpu_write(p->expire, 3);
 798			}
 799		}
 800
 801		if (do_pagesets) {
 802			cond_resched();
 
 
 
 803			/*
 804			 * Deal with draining the remote pageset of this
 805			 * processor
 806			 *
 807			 * Check if there are pages remaining in this pageset
 808			 * if not then there is nothing to expire.
 809			 */
 810			if (!__this_cpu_read(p->expire) ||
 811			       !__this_cpu_read(p->pcp.count))
 812				continue;
 813
 814			/*
 815			 * We never drain zones local to this processor.
 816			 */
 817			if (zone_to_nid(zone) == numa_node_id()) {
 818				__this_cpu_write(p->expire, 0);
 819				continue;
 820			}
 821
 822			if (__this_cpu_dec_return(p->expire))
 
 823				continue;
 
 824
 825			if (__this_cpu_read(p->pcp.count)) {
 826				drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
 827				changes++;
 828			}
 829		}
 830#endif
 
 831	}
 832
 833	for_each_online_pgdat(pgdat) {
 834		struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;
 835
 836		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
 837			int v;
 838
 839			v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
 840			if (v) {
 841				atomic_long_add(v, &pgdat->vm_stat[i]);
 842				global_node_diff[i] += v;
 843			}
 844		}
 845	}
 846
 847#ifdef CONFIG_NUMA
 848	changes += fold_diff(global_zone_diff, global_numa_diff,
 849			     global_node_diff);
 850#else
 851	changes += fold_diff(global_zone_diff, global_node_diff);
 852#endif
 853	return changes;
 854}
 855
 856/*
 857 * Fold the data for an offline cpu into the global array.
 858 * There cannot be any access by the offline cpu and therefore
 859 * synchronization is simplified.
 860 */
 861void cpu_vm_stats_fold(int cpu)
 862{
 863	struct pglist_data *pgdat;
 864	struct zone *zone;
 865	int i;
 866	int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
 867#ifdef CONFIG_NUMA
 868	int global_numa_diff[NR_VM_NUMA_STAT_ITEMS] = { 0, };
 869#endif
 870	int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
 871
 872	for_each_populated_zone(zone) {
 873		struct per_cpu_pageset *p;
 874
 875		p = per_cpu_ptr(zone->pageset, cpu);
 876
 877		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 878			if (p->vm_stat_diff[i]) {
 879				int v;
 880
 881				v = p->vm_stat_diff[i];
 882				p->vm_stat_diff[i] = 0;
 883				atomic_long_add(v, &zone->vm_stat[i]);
 884				global_zone_diff[i] += v;
 885			}
 886
 887#ifdef CONFIG_NUMA
 888		for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
 889			if (p->vm_numa_stat_diff[i]) {
 890				int v;
 891
 892				v = p->vm_numa_stat_diff[i];
 893				p->vm_numa_stat_diff[i] = 0;
 894				atomic_long_add(v, &zone->vm_numa_stat[i]);
 895				global_numa_diff[i] += v;
 896			}
 
 897#endif
 898	}
 899
 900	for_each_online_pgdat(pgdat) {
 901		struct per_cpu_nodestat *p;
 902
 903		p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
 904
 905		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
 906			if (p->vm_node_stat_diff[i]) {
 907				int v;
 908
 909				v = p->vm_node_stat_diff[i];
 910				p->vm_node_stat_diff[i] = 0;
 911				atomic_long_add(v, &pgdat->vm_stat[i]);
 912				global_node_diff[i] += v;
 913			}
 914	}
 915
 916#ifdef CONFIG_NUMA
 917	fold_diff(global_zone_diff, global_numa_diff, global_node_diff);
 918#else
 919	fold_diff(global_zone_diff, global_node_diff);
 920#endif
 921}
 922
 923/*
 924 * this is only called if !populated_zone(zone), which implies no other users of
 925 * pset->vm_stat_diff[] exsist.
 926 */
 927void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
 928{
 
 929	int i;
 930
 931	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 932		if (pset->vm_stat_diff[i]) {
 933			int v = pset->vm_stat_diff[i];
 934			pset->vm_stat_diff[i] = 0;
 935			atomic_long_add(v, &zone->vm_stat[i]);
 936			atomic_long_add(v, &vm_zone_stat[i]);
 937		}
 
 938
 939#ifdef CONFIG_NUMA
 940	for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
 941		if (pset->vm_numa_stat_diff[i]) {
 942			int v = pset->vm_numa_stat_diff[i];
 943
 944			pset->vm_numa_stat_diff[i] = 0;
 945			atomic_long_add(v, &zone->vm_numa_stat[i]);
 946			atomic_long_add(v, &vm_numa_stat[i]);
 947		}
 
 948#endif
 949}
 950#endif
 951
 952#ifdef CONFIG_NUMA
 953void __inc_numa_state(struct zone *zone,
 954				 enum numa_stat_item item)
 955{
 956	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 957	u16 __percpu *p = pcp->vm_numa_stat_diff + item;
 958	u16 v;
 959
 960	v = __this_cpu_inc_return(*p);
 961
 962	if (unlikely(v > NUMA_STATS_THRESHOLD)) {
 963		zone_numa_state_add(v, zone, item);
 964		__this_cpu_write(*p, 0);
 965	}
 966}
 967
 968/*
 969 * Determine the per node value of a stat item. This function
 970 * is called frequently in a NUMA machine, so try to be as
 971 * frugal as possible.
 972 */
 973unsigned long sum_zone_node_page_state(int node,
 974				 enum zone_stat_item item)
 975{
 976	struct zone *zones = NODE_DATA(node)->node_zones;
 977	int i;
 978	unsigned long count = 0;
 979
 980	for (i = 0; i < MAX_NR_ZONES; i++)
 981		count += zone_page_state(zones + i, item);
 982
 983	return count;
 984}
 985
 986/*
 987 * Determine the per node value of a numa stat item. To avoid deviation,
 988 * the per cpu stat number in vm_numa_stat_diff[] is also included.
 989 */
 990unsigned long sum_zone_numa_state(int node,
 991				 enum numa_stat_item item)
 992{
 993	struct zone *zones = NODE_DATA(node)->node_zones;
 994	int i;
 995	unsigned long count = 0;
 
 996
 997	for (i = 0; i < MAX_NR_ZONES; i++)
 998		count += zone_numa_state_snapshot(zones + i, item);
 999
1000	return count;
1001}
1002
1003/*
1004 * Determine the per node value of a stat item.
1005 */
1006unsigned long node_page_state_pages(struct pglist_data *pgdat,
1007				    enum node_stat_item item)
1008{
1009	long x = atomic_long_read(&pgdat->vm_stat[item]);
1010#ifdef CONFIG_SMP
1011	if (x < 0)
1012		x = 0;
1013#endif
1014	return x;
1015}
1016
1017unsigned long node_page_state(struct pglist_data *pgdat,
1018			      enum node_stat_item item)
1019{
1020	VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
1021
1022	return node_page_state_pages(pgdat, item);
1023}
1024#endif
1025
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1026#ifdef CONFIG_COMPACTION
1027
1028struct contig_page_info {
1029	unsigned long free_pages;
1030	unsigned long free_blocks_total;
1031	unsigned long free_blocks_suitable;
1032};
1033
1034/*
1035 * Calculate the number of free pages in a zone, how many contiguous
1036 * pages are free and how many are large enough to satisfy an allocation of
1037 * the target size. Note that this function makes no attempt to estimate
1038 * how many suitable free blocks there *might* be if MOVABLE pages were
1039 * migrated. Calculating that is possible, but expensive and can be
1040 * figured out from userspace
1041 */
1042static void fill_contig_page_info(struct zone *zone,
1043				unsigned int suitable_order,
1044				struct contig_page_info *info)
1045{
1046	unsigned int order;
1047
1048	info->free_pages = 0;
1049	info->free_blocks_total = 0;
1050	info->free_blocks_suitable = 0;
1051
1052	for (order = 0; order < MAX_ORDER; order++) {
1053		unsigned long blocks;
1054
1055		/* Count number of free blocks */
1056		blocks = zone->free_area[order].nr_free;
 
 
 
 
 
1057		info->free_blocks_total += blocks;
1058
1059		/* Count free base pages */
1060		info->free_pages += blocks << order;
1061
1062		/* Count the suitable free blocks */
1063		if (order >= suitable_order)
1064			info->free_blocks_suitable += blocks <<
1065						(order - suitable_order);
1066	}
1067}
1068
1069/*
1070 * A fragmentation index only makes sense if an allocation of a requested
1071 * size would fail. If that is true, the fragmentation index indicates
1072 * whether external fragmentation or a lack of memory was the problem.
1073 * The value can be used to determine if page reclaim or compaction
1074 * should be used
1075 */
1076static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
1077{
1078	unsigned long requested = 1UL << order;
1079
1080	if (WARN_ON_ONCE(order >= MAX_ORDER))
1081		return 0;
1082
1083	if (!info->free_blocks_total)
1084		return 0;
1085
1086	/* Fragmentation index only makes sense when a request would fail */
1087	if (info->free_blocks_suitable)
1088		return -1000;
1089
1090	/*
1091	 * Index is between 0 and 1 so return within 3 decimal places
1092	 *
1093	 * 0 => allocation would fail due to lack of memory
1094	 * 1 => allocation would fail due to fragmentation
1095	 */
1096	return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
1097}
1098
1099/*
1100 * Calculates external fragmentation within a zone wrt the given order.
1101 * It is defined as the percentage of pages found in blocks of size
1102 * less than 1 << order. It returns values in range [0, 100].
1103 */
1104unsigned int extfrag_for_order(struct zone *zone, unsigned int order)
1105{
1106	struct contig_page_info info;
1107
1108	fill_contig_page_info(zone, order, &info);
1109	if (info.free_pages == 0)
1110		return 0;
1111
1112	return div_u64((info.free_pages -
1113			(info.free_blocks_suitable << order)) * 100,
1114			info.free_pages);
1115}
1116
1117/* Same as __fragmentation index but allocs contig_page_info on stack */
1118int fragmentation_index(struct zone *zone, unsigned int order)
1119{
1120	struct contig_page_info info;
1121
1122	fill_contig_page_info(zone, order, &info);
1123	return __fragmentation_index(order, &info);
1124}
1125#endif
1126
1127#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || \
1128    defined(CONFIG_NUMA) || defined(CONFIG_MEMCG)
1129#ifdef CONFIG_ZONE_DMA
1130#define TEXT_FOR_DMA(xx) xx "_dma",
1131#else
1132#define TEXT_FOR_DMA(xx)
1133#endif
1134
1135#ifdef CONFIG_ZONE_DMA32
1136#define TEXT_FOR_DMA32(xx) xx "_dma32",
1137#else
1138#define TEXT_FOR_DMA32(xx)
1139#endif
1140
1141#ifdef CONFIG_HIGHMEM
1142#define TEXT_FOR_HIGHMEM(xx) xx "_high",
1143#else
1144#define TEXT_FOR_HIGHMEM(xx)
1145#endif
1146
 
 
 
 
 
 
1147#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
1148					TEXT_FOR_HIGHMEM(xx) xx "_movable",
 
1149
1150const char * const vmstat_text[] = {
1151	/* enum zone_stat_item counters */
1152	"nr_free_pages",
1153	"nr_zone_inactive_anon",
1154	"nr_zone_active_anon",
1155	"nr_zone_inactive_file",
1156	"nr_zone_active_file",
1157	"nr_zone_unevictable",
1158	"nr_zone_write_pending",
1159	"nr_mlock",
1160	"nr_page_table_pages",
1161	"nr_bounce",
1162#if IS_ENABLED(CONFIG_ZSMALLOC)
1163	"nr_zspages",
1164#endif
1165	"nr_free_cma",
 
 
 
1166
1167	/* enum numa_stat_item counters */
1168#ifdef CONFIG_NUMA
1169	"numa_hit",
1170	"numa_miss",
1171	"numa_foreign",
1172	"numa_interleave",
1173	"numa_local",
1174	"numa_other",
1175#endif
1176
1177	/* enum node_stat_item counters */
1178	"nr_inactive_anon",
1179	"nr_active_anon",
1180	"nr_inactive_file",
1181	"nr_active_file",
1182	"nr_unevictable",
1183	"nr_slab_reclaimable",
1184	"nr_slab_unreclaimable",
1185	"nr_isolated_anon",
1186	"nr_isolated_file",
1187	"workingset_nodes",
1188	"workingset_refault_anon",
1189	"workingset_refault_file",
1190	"workingset_activate_anon",
1191	"workingset_activate_file",
1192	"workingset_restore_anon",
1193	"workingset_restore_file",
1194	"workingset_nodereclaim",
1195	"nr_anon_pages",
1196	"nr_mapped",
1197	"nr_file_pages",
1198	"nr_dirty",
1199	"nr_writeback",
1200	"nr_writeback_temp",
1201	"nr_shmem",
1202	"nr_shmem_hugepages",
1203	"nr_shmem_pmdmapped",
1204	"nr_file_hugepages",
1205	"nr_file_pmdmapped",
1206	"nr_anon_transparent_hugepages",
1207	"nr_vmscan_write",
1208	"nr_vmscan_immediate_reclaim",
1209	"nr_dirtied",
1210	"nr_written",
 
1211	"nr_kernel_misc_reclaimable",
1212	"nr_foll_pin_acquired",
1213	"nr_foll_pin_released",
1214	"nr_kernel_stack",
1215#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
1216	"nr_shadow_call_stack",
1217#endif
1218
1219	/* enum writeback_stat_item counters */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1220	"nr_dirty_threshold",
1221	"nr_dirty_background_threshold",
 
 
1222
1223#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
1224	/* enum vm_event_item counters */
1225	"pgpgin",
1226	"pgpgout",
1227	"pswpin",
1228	"pswpout",
1229
1230	TEXTS_FOR_ZONES("pgalloc")
1231	TEXTS_FOR_ZONES("allocstall")
1232	TEXTS_FOR_ZONES("pgskip")
1233
1234	"pgfree",
1235	"pgactivate",
1236	"pgdeactivate",
1237	"pglazyfree",
1238
1239	"pgfault",
1240	"pgmajfault",
1241	"pglazyfreed",
1242
1243	"pgrefill",
1244	"pgreuse",
1245	"pgsteal_kswapd",
1246	"pgsteal_direct",
 
1247	"pgscan_kswapd",
1248	"pgscan_direct",
 
1249	"pgscan_direct_throttle",
1250	"pgscan_anon",
1251	"pgscan_file",
1252	"pgsteal_anon",
1253	"pgsteal_file",
1254
1255#ifdef CONFIG_NUMA
 
1256	"zone_reclaim_failed",
1257#endif
1258	"pginodesteal",
1259	"slabs_scanned",
1260	"kswapd_inodesteal",
1261	"kswapd_low_wmark_hit_quickly",
1262	"kswapd_high_wmark_hit_quickly",
1263	"pageoutrun",
1264
1265	"pgrotated",
1266
1267	"drop_pagecache",
1268	"drop_slab",
1269	"oom_kill",
1270
1271#ifdef CONFIG_NUMA_BALANCING
1272	"numa_pte_updates",
1273	"numa_huge_pte_updates",
1274	"numa_hint_faults",
1275	"numa_hint_faults_local",
1276	"numa_pages_migrated",
1277#endif
1278#ifdef CONFIG_MIGRATION
1279	"pgmigrate_success",
1280	"pgmigrate_fail",
1281	"thp_migration_success",
1282	"thp_migration_fail",
1283	"thp_migration_split",
1284#endif
1285#ifdef CONFIG_COMPACTION
1286	"compact_migrate_scanned",
1287	"compact_free_scanned",
1288	"compact_isolated",
1289	"compact_stall",
1290	"compact_fail",
1291	"compact_success",
1292	"compact_daemon_wake",
1293	"compact_daemon_migrate_scanned",
1294	"compact_daemon_free_scanned",
1295#endif
1296
1297#ifdef CONFIG_HUGETLB_PAGE
1298	"htlb_buddy_alloc_success",
1299	"htlb_buddy_alloc_fail",
1300#endif
 
 
 
 
1301	"unevictable_pgs_culled",
1302	"unevictable_pgs_scanned",
1303	"unevictable_pgs_rescued",
1304	"unevictable_pgs_mlocked",
1305	"unevictable_pgs_munlocked",
1306	"unevictable_pgs_cleared",
1307	"unevictable_pgs_stranded",
1308
1309#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1310	"thp_fault_alloc",
1311	"thp_fault_fallback",
1312	"thp_fault_fallback_charge",
1313	"thp_collapse_alloc",
1314	"thp_collapse_alloc_failed",
1315	"thp_file_alloc",
1316	"thp_file_fallback",
1317	"thp_file_fallback_charge",
1318	"thp_file_mapped",
1319	"thp_split_page",
1320	"thp_split_page_failed",
1321	"thp_deferred_split_page",
 
1322	"thp_split_pmd",
 
 
 
1323#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1324	"thp_split_pud",
1325#endif
1326	"thp_zero_page_alloc",
1327	"thp_zero_page_alloc_failed",
1328	"thp_swpout",
1329	"thp_swpout_fallback",
1330#endif
1331#ifdef CONFIG_MEMORY_BALLOON
1332	"balloon_inflate",
1333	"balloon_deflate",
1334#ifdef CONFIG_BALLOON_COMPACTION
1335	"balloon_migrate",
1336#endif
1337#endif /* CONFIG_MEMORY_BALLOON */
1338#ifdef CONFIG_DEBUG_TLBFLUSH
1339	"nr_tlb_remote_flush",
1340	"nr_tlb_remote_flush_received",
1341	"nr_tlb_local_flush_all",
1342	"nr_tlb_local_flush_one",
1343#endif /* CONFIG_DEBUG_TLBFLUSH */
1344
1345#ifdef CONFIG_DEBUG_VM_VMACACHE
1346	"vmacache_find_calls",
1347	"vmacache_find_hits",
1348#endif
1349#ifdef CONFIG_SWAP
1350	"swap_ra",
1351	"swap_ra_hit",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1352#endif
1353#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
1354};
1355#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */
1356
1357#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
1358     defined(CONFIG_PROC_FS)
1359static void *frag_start(struct seq_file *m, loff_t *pos)
1360{
1361	pg_data_t *pgdat;
1362	loff_t node = *pos;
1363
1364	for (pgdat = first_online_pgdat();
1365	     pgdat && node;
1366	     pgdat = next_online_pgdat(pgdat))
1367		--node;
1368
1369	return pgdat;
1370}
1371
1372static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
1373{
1374	pg_data_t *pgdat = (pg_data_t *)arg;
1375
1376	(*pos)++;
1377	return next_online_pgdat(pgdat);
1378}
1379
1380static void frag_stop(struct seq_file *m, void *arg)
1381{
1382}
1383
1384/*
1385 * Walk zones in a node and print using a callback.
1386 * If @assert_populated is true, only use callback for zones that are populated.
1387 */
1388static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
1389		bool assert_populated, bool nolock,
1390		void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
1391{
1392	struct zone *zone;
1393	struct zone *node_zones = pgdat->node_zones;
1394	unsigned long flags;
1395
1396	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
1397		if (assert_populated && !populated_zone(zone))
1398			continue;
1399
1400		if (!nolock)
1401			spin_lock_irqsave(&zone->lock, flags);
1402		print(m, pgdat, zone);
1403		if (!nolock)
1404			spin_unlock_irqrestore(&zone->lock, flags);
1405	}
1406}
1407#endif
1408
1409#ifdef CONFIG_PROC_FS
1410static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
1411						struct zone *zone)
1412{
1413	int order;
1414
1415	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1416	for (order = 0; order < MAX_ORDER; ++order)
1417		seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
 
 
 
 
1418	seq_putc(m, '\n');
1419}
1420
1421/*
1422 * This walks the free areas for each zone.
1423 */
1424static int frag_show(struct seq_file *m, void *arg)
1425{
1426	pg_data_t *pgdat = (pg_data_t *)arg;
1427	walk_zones_in_node(m, pgdat, true, false, frag_show_print);
1428	return 0;
1429}
1430
1431static void pagetypeinfo_showfree_print(struct seq_file *m,
1432					pg_data_t *pgdat, struct zone *zone)
1433{
1434	int order, mtype;
1435
1436	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
1437		seq_printf(m, "Node %4d, zone %8s, type %12s ",
1438					pgdat->node_id,
1439					zone->name,
1440					migratetype_names[mtype]);
1441		for (order = 0; order < MAX_ORDER; ++order) {
1442			unsigned long freecount = 0;
1443			struct free_area *area;
1444			struct list_head *curr;
1445			bool overflow = false;
1446
1447			area = &(zone->free_area[order]);
1448
1449			list_for_each(curr, &area->free_list[mtype]) {
1450				/*
1451				 * Cap the free_list iteration because it might
1452				 * be really large and we are under a spinlock
1453				 * so a long time spent here could trigger a
1454				 * hard lockup detector. Anyway this is a
1455				 * debugging tool so knowing there is a handful
1456				 * of pages of this order should be more than
1457				 * sufficient.
1458				 */
1459				if (++freecount >= 100000) {
1460					overflow = true;
1461					break;
1462				}
1463			}
1464			seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount);
1465			spin_unlock_irq(&zone->lock);
1466			cond_resched();
1467			spin_lock_irq(&zone->lock);
1468		}
1469		seq_putc(m, '\n');
1470	}
1471}
1472
1473/* Print out the free pages at each order for each migatetype */
1474static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
1475{
1476	int order;
1477	pg_data_t *pgdat = (pg_data_t *)arg;
1478
1479	/* Print header */
1480	seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
1481	for (order = 0; order < MAX_ORDER; ++order)
1482		seq_printf(m, "%6d ", order);
1483	seq_putc(m, '\n');
1484
1485	walk_zones_in_node(m, pgdat, true, false, pagetypeinfo_showfree_print);
1486
1487	return 0;
1488}
1489
1490static void pagetypeinfo_showblockcount_print(struct seq_file *m,
1491					pg_data_t *pgdat, struct zone *zone)
1492{
1493	int mtype;
1494	unsigned long pfn;
1495	unsigned long start_pfn = zone->zone_start_pfn;
1496	unsigned long end_pfn = zone_end_pfn(zone);
1497	unsigned long count[MIGRATE_TYPES] = { 0, };
1498
1499	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
1500		struct page *page;
1501
1502		page = pfn_to_online_page(pfn);
1503		if (!page)
1504			continue;
1505
1506		/* Watch for unexpected holes punched in the memmap */
1507		if (!memmap_valid_within(pfn, page, zone))
1508			continue;
1509
1510		if (page_zone(page) != zone)
1511			continue;
1512
1513		mtype = get_pageblock_migratetype(page);
1514
1515		if (mtype < MIGRATE_TYPES)
1516			count[mtype]++;
1517	}
1518
1519	/* Print counts */
1520	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1521	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1522		seq_printf(m, "%12lu ", count[mtype]);
1523	seq_putc(m, '\n');
1524}
1525
1526/* Print out the number of pageblocks for each migratetype */
1527static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1528{
1529	int mtype;
1530	pg_data_t *pgdat = (pg_data_t *)arg;
1531
1532	seq_printf(m, "\n%-23s", "Number of blocks type ");
1533	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1534		seq_printf(m, "%12s ", migratetype_names[mtype]);
1535	seq_putc(m, '\n');
1536	walk_zones_in_node(m, pgdat, true, false,
1537		pagetypeinfo_showblockcount_print);
1538
1539	return 0;
1540}
1541
1542/*
1543 * Print out the number of pageblocks for each migratetype that contain pages
1544 * of other types. This gives an indication of how well fallbacks are being
1545 * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1546 * to determine what is going on
1547 */
1548static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1549{
1550#ifdef CONFIG_PAGE_OWNER
1551	int mtype;
1552
1553	if (!static_branch_unlikely(&page_owner_inited))
1554		return;
1555
1556	drain_all_pages(NULL);
1557
1558	seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1559	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1560		seq_printf(m, "%12s ", migratetype_names[mtype]);
1561	seq_putc(m, '\n');
1562
1563	walk_zones_in_node(m, pgdat, true, true,
1564		pagetypeinfo_showmixedcount_print);
1565#endif /* CONFIG_PAGE_OWNER */
1566}
1567
1568/*
1569 * This prints out statistics in relation to grouping pages by mobility.
1570 * It is expensive to collect so do not constantly read the file.
1571 */
1572static int pagetypeinfo_show(struct seq_file *m, void *arg)
1573{
1574	pg_data_t *pgdat = (pg_data_t *)arg;
1575
1576	/* check memoryless node */
1577	if (!node_state(pgdat->node_id, N_MEMORY))
1578		return 0;
1579
1580	seq_printf(m, "Page block order: %d\n", pageblock_order);
1581	seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
1582	seq_putc(m, '\n');
1583	pagetypeinfo_showfree(m, pgdat);
1584	pagetypeinfo_showblockcount(m, pgdat);
1585	pagetypeinfo_showmixedcount(m, pgdat);
1586
1587	return 0;
1588}
1589
1590static const struct seq_operations fragmentation_op = {
1591	.start	= frag_start,
1592	.next	= frag_next,
1593	.stop	= frag_stop,
1594	.show	= frag_show,
1595};
1596
1597static const struct seq_operations pagetypeinfo_op = {
1598	.start	= frag_start,
1599	.next	= frag_next,
1600	.stop	= frag_stop,
1601	.show	= pagetypeinfo_show,
1602};
1603
1604static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
1605{
1606	int zid;
1607
1608	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1609		struct zone *compare = &pgdat->node_zones[zid];
1610
1611		if (populated_zone(compare))
1612			return zone == compare;
1613	}
1614
1615	return false;
1616}
1617
1618static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1619							struct zone *zone)
1620{
1621	int i;
1622	seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1623	if (is_zone_first_populated(pgdat, zone)) {
1624		seq_printf(m, "\n  per-node stats");
1625		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
 
 
 
 
1626			seq_printf(m, "\n      %-12s %lu", node_stat_name(i),
1627				   node_page_state_pages(pgdat, i));
1628		}
1629	}
1630	seq_printf(m,
1631		   "\n  pages free     %lu"
 
1632		   "\n        min      %lu"
1633		   "\n        low      %lu"
1634		   "\n        high     %lu"
 
1635		   "\n        spanned  %lu"
1636		   "\n        present  %lu"
1637		   "\n        managed  %lu",
 
1638		   zone_page_state(zone, NR_FREE_PAGES),
 
1639		   min_wmark_pages(zone),
1640		   low_wmark_pages(zone),
1641		   high_wmark_pages(zone),
 
1642		   zone->spanned_pages,
1643		   zone->present_pages,
1644		   zone_managed_pages(zone));
 
1645
1646	seq_printf(m,
1647		   "\n        protection: (%ld",
1648		   zone->lowmem_reserve[0]);
1649	for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1650		seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1651	seq_putc(m, ')');
1652
1653	/* If unpopulated, no other information is useful */
1654	if (!populated_zone(zone)) {
1655		seq_putc(m, '\n');
1656		return;
1657	}
1658
1659	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1660		seq_printf(m, "\n      %-12s %lu", zone_stat_name(i),
1661			   zone_page_state(zone, i));
1662
1663#ifdef CONFIG_NUMA
1664	for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
 
1665		seq_printf(m, "\n      %-12s %lu", numa_stat_name(i),
1666			   zone_numa_state_snapshot(zone, i));
1667#endif
1668
1669	seq_printf(m, "\n  pagesets");
1670	for_each_online_cpu(i) {
1671		struct per_cpu_pageset *pageset;
 
1672
1673		pageset = per_cpu_ptr(zone->pageset, i);
1674		seq_printf(m,
1675			   "\n    cpu: %i"
1676			   "\n              count: %i"
1677			   "\n              high:  %i"
1678			   "\n              batch: %i",
 
 
1679			   i,
1680			   pageset->pcp.count,
1681			   pageset->pcp.high,
1682			   pageset->pcp.batch);
 
 
1683#ifdef CONFIG_SMP
 
1684		seq_printf(m, "\n  vm stats threshold: %d",
1685				pageset->stat_threshold);
1686#endif
1687	}
1688	seq_printf(m,
1689		   "\n  node_unreclaimable:  %u"
1690		   "\n  start_pfn:           %lu",
1691		   pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES,
1692		   zone->zone_start_pfn);
1693	seq_putc(m, '\n');
1694}
1695
1696/*
1697 * Output information about zones in @pgdat.  All zones are printed regardless
1698 * of whether they are populated or not: lowmem_reserve_ratio operates on the
1699 * set of all zones and userspace would not be aware of such zones if they are
1700 * suppressed here (zoneinfo displays the effect of lowmem_reserve_ratio).
1701 */
1702static int zoneinfo_show(struct seq_file *m, void *arg)
1703{
1704	pg_data_t *pgdat = (pg_data_t *)arg;
1705	walk_zones_in_node(m, pgdat, false, false, zoneinfo_show_print);
1706	return 0;
1707}
1708
1709static const struct seq_operations zoneinfo_op = {
1710	.start	= frag_start, /* iterate over all zones. The same as in
1711			       * fragmentation. */
1712	.next	= frag_next,
1713	.stop	= frag_stop,
1714	.show	= zoneinfo_show,
1715};
1716
1717#define NR_VMSTAT_ITEMS (NR_VM_ZONE_STAT_ITEMS + \
1718			 NR_VM_NUMA_STAT_ITEMS + \
1719			 NR_VM_NODE_STAT_ITEMS + \
1720			 NR_VM_WRITEBACK_STAT_ITEMS + \
1721			 (IS_ENABLED(CONFIG_VM_EVENT_COUNTERS) ? \
1722			  NR_VM_EVENT_ITEMS : 0))
1723
1724static void *vmstat_start(struct seq_file *m, loff_t *pos)
1725{
1726	unsigned long *v;
1727	int i;
1728
1729	if (*pos >= NR_VMSTAT_ITEMS)
1730		return NULL;
1731
1732	BUILD_BUG_ON(ARRAY_SIZE(vmstat_text) < NR_VMSTAT_ITEMS);
 
1733	v = kmalloc_array(NR_VMSTAT_ITEMS, sizeof(unsigned long), GFP_KERNEL);
1734	m->private = v;
1735	if (!v)
1736		return ERR_PTR(-ENOMEM);
1737	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1738		v[i] = global_zone_page_state(i);
1739	v += NR_VM_ZONE_STAT_ITEMS;
1740
1741#ifdef CONFIG_NUMA
1742	for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
1743		v[i] = global_numa_state(i);
1744	v += NR_VM_NUMA_STAT_ITEMS;
1745#endif
1746
1747	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
1748		v[i] = global_node_page_state_pages(i);
 
 
 
1749	v += NR_VM_NODE_STAT_ITEMS;
1750
1751	global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1752			    v + NR_DIRTY_THRESHOLD);
1753	v += NR_VM_WRITEBACK_STAT_ITEMS;
 
 
1754
1755#ifdef CONFIG_VM_EVENT_COUNTERS
1756	all_vm_events(v);
1757	v[PGPGIN] /= 2;		/* sectors -> kbytes */
1758	v[PGPGOUT] /= 2;
1759#endif
1760	return (unsigned long *)m->private + *pos;
1761}
1762
1763static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1764{
1765	(*pos)++;
1766	if (*pos >= NR_VMSTAT_ITEMS)
1767		return NULL;
1768	return (unsigned long *)m->private + *pos;
1769}
1770
1771static int vmstat_show(struct seq_file *m, void *arg)
1772{
1773	unsigned long *l = arg;
1774	unsigned long off = l - (unsigned long *)m->private;
1775
1776	seq_puts(m, vmstat_text[off]);
1777	seq_put_decimal_ull(m, " ", *l);
1778	seq_putc(m, '\n');
1779
1780	if (off == NR_VMSTAT_ITEMS - 1) {
1781		/*
1782		 * We've come to the end - add any deprecated counters to avoid
1783		 * breaking userspace which might depend on them being present.
1784		 */
1785		seq_puts(m, "nr_unstable 0\n");
1786	}
1787	return 0;
1788}
1789
1790static void vmstat_stop(struct seq_file *m, void *arg)
1791{
1792	kfree(m->private);
1793	m->private = NULL;
1794}
1795
1796static const struct seq_operations vmstat_op = {
1797	.start	= vmstat_start,
1798	.next	= vmstat_next,
1799	.stop	= vmstat_stop,
1800	.show	= vmstat_show,
1801};
1802#endif /* CONFIG_PROC_FS */
1803
1804#ifdef CONFIG_SMP
1805static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1806int sysctl_stat_interval __read_mostly = HZ;
 
1807
1808#ifdef CONFIG_PROC_FS
1809static void refresh_vm_stats(struct work_struct *work)
1810{
1811	refresh_cpu_vm_stats(true);
1812}
1813
1814int vmstat_refresh(struct ctl_table *table, int write,
1815		   void *buffer, size_t *lenp, loff_t *ppos)
1816{
1817	long val;
1818	int err;
1819	int i;
1820
1821	/*
1822	 * The regular update, every sysctl_stat_interval, may come later
1823	 * than expected: leaving a significant amount in per_cpu buckets.
1824	 * This is particularly misleading when checking a quantity of HUGE
1825	 * pages, immediately after running a test.  /proc/sys/vm/stat_refresh,
1826	 * which can equally be echo'ed to or cat'ted from (by root),
1827	 * can be used to update the stats just before reading them.
1828	 *
1829	 * Oh, and since global_zone_page_state() etc. are so careful to hide
1830	 * transiently negative values, report an error here if any of
1831	 * the stats is negative, so we know to go looking for imbalance.
1832	 */
1833	err = schedule_on_each_cpu(refresh_vm_stats);
1834	if (err)
1835		return err;
1836	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
 
 
 
 
 
 
 
 
1837		val = atomic_long_read(&vm_zone_stat[i]);
1838		if (val < 0) {
1839			pr_warn("%s: %s %ld\n",
1840				__func__, zone_stat_name(i), val);
1841			err = -EINVAL;
1842		}
1843	}
1844#ifdef CONFIG_NUMA
1845	for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) {
1846		val = atomic_long_read(&vm_numa_stat[i]);
 
 
 
 
 
 
1847		if (val < 0) {
1848			pr_warn("%s: %s %ld\n",
1849				__func__, numa_stat_name(i), val);
1850			err = -EINVAL;
1851		}
1852	}
1853#endif
1854	if (err)
1855		return err;
1856	if (write)
1857		*ppos += *lenp;
1858	else
1859		*lenp = 0;
1860	return 0;
1861}
1862#endif /* CONFIG_PROC_FS */
1863
1864static void vmstat_update(struct work_struct *w)
1865{
1866	if (refresh_cpu_vm_stats(true)) {
1867		/*
1868		 * Counters were updated so we expect more updates
1869		 * to occur in the future. Keep on running the
1870		 * update worker thread.
1871		 */
1872		queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
1873				this_cpu_ptr(&vmstat_work),
1874				round_jiffies_relative(sysctl_stat_interval));
1875	}
1876}
1877
1878/*
1879 * Switch off vmstat processing and then fold all the remaining differentials
1880 * until the diffs stay at zero. The function is used by NOHZ and can only be
1881 * invoked when tick processing is not active.
1882 */
1883/*
1884 * Check if the diffs for a certain cpu indicate that
1885 * an update is needed.
1886 */
1887static bool need_update(int cpu)
1888{
 
1889	struct zone *zone;
1890
1891	for_each_populated_zone(zone) {
1892		struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
1893
1894		BUILD_BUG_ON(sizeof(p->vm_stat_diff[0]) != 1);
1895#ifdef CONFIG_NUMA
1896		BUILD_BUG_ON(sizeof(p->vm_numa_stat_diff[0]) != 2);
1897#endif
1898
1899		/*
1900		 * The fast way of checking if there are any vmstat diffs.
1901		 */
1902		if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS *
1903			       sizeof(p->vm_stat_diff[0])))
1904			return true;
1905#ifdef CONFIG_NUMA
1906		if (memchr_inv(p->vm_numa_stat_diff, 0, NR_VM_NUMA_STAT_ITEMS *
1907			       sizeof(p->vm_numa_stat_diff[0])))
 
 
 
1908			return true;
1909#endif
1910	}
1911	return false;
1912}
1913
1914/*
1915 * Switch off vmstat processing and then fold all the remaining differentials
1916 * until the diffs stay at zero. The function is used by NOHZ and can only be
1917 * invoked when tick processing is not active.
1918 */
1919void quiet_vmstat(void)
1920{
1921	if (system_state != SYSTEM_RUNNING)
1922		return;
1923
1924	if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
1925		return;
1926
1927	if (!need_update(smp_processor_id()))
1928		return;
1929
1930	/*
1931	 * Just refresh counters and do not care about the pending delayed
1932	 * vmstat_update. It doesn't fire that often to matter and canceling
1933	 * it would be too expensive from this path.
1934	 * vmstat_shepherd will take care about that for us.
1935	 */
1936	refresh_cpu_vm_stats(false);
1937}
1938
1939/*
1940 * Shepherd worker thread that checks the
1941 * differentials of processors that have their worker
1942 * threads for vm statistics updates disabled because of
1943 * inactivity.
1944 */
1945static void vmstat_shepherd(struct work_struct *w);
1946
1947static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
1948
1949static void vmstat_shepherd(struct work_struct *w)
1950{
1951	int cpu;
1952
1953	get_online_cpus();
1954	/* Check processors whose vmstat worker threads have been disabled */
1955	for_each_online_cpu(cpu) {
1956		struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
1957
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1958		if (!delayed_work_pending(dw) && need_update(cpu))
1959			queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
 
 
1960	}
1961	put_online_cpus();
1962
1963	schedule_delayed_work(&shepherd,
1964		round_jiffies_relative(sysctl_stat_interval));
1965}
1966
1967static void __init start_shepherd_timer(void)
1968{
1969	int cpu;
1970
1971	for_each_possible_cpu(cpu)
1972		INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
1973			vmstat_update);
1974
 
 
 
 
 
 
 
 
 
 
1975	schedule_delayed_work(&shepherd,
1976		round_jiffies_relative(sysctl_stat_interval));
1977}
1978
1979static void __init init_cpu_node_state(void)
1980{
1981	int node;
1982
1983	for_each_online_node(node) {
1984		if (cpumask_weight(cpumask_of_node(node)) > 0)
1985			node_set_state(node, N_CPU);
1986	}
1987}
1988
1989static int vmstat_cpu_online(unsigned int cpu)
1990{
1991	refresh_zone_stat_thresholds();
1992	node_set_state(cpu_to_node(cpu), N_CPU);
 
 
 
 
 
 
1993	return 0;
1994}
1995
1996static int vmstat_cpu_down_prep(unsigned int cpu)
1997{
1998	cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
1999	return 0;
2000}
2001
2002static int vmstat_cpu_dead(unsigned int cpu)
2003{
2004	const struct cpumask *node_cpus;
2005	int node;
2006
2007	node = cpu_to_node(cpu);
2008
2009	refresh_zone_stat_thresholds();
2010	node_cpus = cpumask_of_node(node);
2011	if (cpumask_weight(node_cpus) > 0)
2012		return 0;
2013
2014	node_clear_state(node, N_CPU);
 
2015	return 0;
2016}
2017
 
 
 
 
 
 
 
 
2018#endif
2019
2020struct workqueue_struct *mm_percpu_wq;
2021
2022void __init init_mm_internals(void)
2023{
2024	int ret __maybe_unused;
2025
2026	mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0);
2027
2028#ifdef CONFIG_SMP
2029	ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
2030					NULL, vmstat_cpu_dead);
2031	if (ret < 0)
2032		pr_err("vmstat: failed to register 'dead' hotplug state\n");
2033
2034	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online",
2035					vmstat_cpu_online,
2036					vmstat_cpu_down_prep);
2037	if (ret < 0)
2038		pr_err("vmstat: failed to register 'online' hotplug state\n");
2039
2040	get_online_cpus();
2041	init_cpu_node_state();
2042	put_online_cpus();
2043
2044	start_shepherd_timer();
2045#endif
2046#ifdef CONFIG_PROC_FS
2047	proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op);
2048	proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op);
2049	proc_create_seq("vmstat", 0444, NULL, &vmstat_op);
2050	proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op);
2051#endif
2052}
2053
2054#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
2055
2056/*
2057 * Return an index indicating how much of the available free memory is
2058 * unusable for an allocation of the requested size.
2059 */
2060static int unusable_free_index(unsigned int order,
2061				struct contig_page_info *info)
2062{
2063	/* No free memory is interpreted as all free memory is unusable */
2064	if (info->free_pages == 0)
2065		return 1000;
2066
2067	/*
2068	 * Index should be a value between 0 and 1. Return a value to 3
2069	 * decimal places.
2070	 *
2071	 * 0 => no fragmentation
2072	 * 1 => high fragmentation
2073	 */
2074	return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
2075
2076}
2077
2078static void unusable_show_print(struct seq_file *m,
2079					pg_data_t *pgdat, struct zone *zone)
2080{
2081	unsigned int order;
2082	int index;
2083	struct contig_page_info info;
2084
2085	seq_printf(m, "Node %d, zone %8s ",
2086				pgdat->node_id,
2087				zone->name);
2088	for (order = 0; order < MAX_ORDER; ++order) {
2089		fill_contig_page_info(zone, order, &info);
2090		index = unusable_free_index(order, &info);
2091		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2092	}
2093
2094	seq_putc(m, '\n');
2095}
2096
2097/*
2098 * Display unusable free space index
2099 *
2100 * The unusable free space index measures how much of the available free
2101 * memory cannot be used to satisfy an allocation of a given size and is a
2102 * value between 0 and 1. The higher the value, the more of free memory is
2103 * unusable and by implication, the worse the external fragmentation is. This
2104 * can be expressed as a percentage by multiplying by 100.
2105 */
2106static int unusable_show(struct seq_file *m, void *arg)
2107{
2108	pg_data_t *pgdat = (pg_data_t *)arg;
2109
2110	/* check memoryless node */
2111	if (!node_state(pgdat->node_id, N_MEMORY))
2112		return 0;
2113
2114	walk_zones_in_node(m, pgdat, true, false, unusable_show_print);
2115
2116	return 0;
2117}
2118
2119static const struct seq_operations unusable_sops = {
2120	.start	= frag_start,
2121	.next	= frag_next,
2122	.stop	= frag_stop,
2123	.show	= unusable_show,
2124};
2125
2126DEFINE_SEQ_ATTRIBUTE(unusable);
2127
2128static void extfrag_show_print(struct seq_file *m,
2129					pg_data_t *pgdat, struct zone *zone)
2130{
2131	unsigned int order;
2132	int index;
2133
2134	/* Alloc on stack as interrupts are disabled for zone walk */
2135	struct contig_page_info info;
2136
2137	seq_printf(m, "Node %d, zone %8s ",
2138				pgdat->node_id,
2139				zone->name);
2140	for (order = 0; order < MAX_ORDER; ++order) {
2141		fill_contig_page_info(zone, order, &info);
2142		index = __fragmentation_index(order, &info);
2143		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2144	}
2145
2146	seq_putc(m, '\n');
2147}
2148
2149/*
2150 * Display fragmentation index for orders that allocations would fail for
2151 */
2152static int extfrag_show(struct seq_file *m, void *arg)
2153{
2154	pg_data_t *pgdat = (pg_data_t *)arg;
2155
2156	walk_zones_in_node(m, pgdat, true, false, extfrag_show_print);
2157
2158	return 0;
2159}
2160
2161static const struct seq_operations extfrag_sops = {
2162	.start	= frag_start,
2163	.next	= frag_next,
2164	.stop	= frag_stop,
2165	.show	= extfrag_show,
2166};
2167
2168DEFINE_SEQ_ATTRIBUTE(extfrag);
2169
2170static int __init extfrag_debug_init(void)
2171{
2172	struct dentry *extfrag_debug_root;
2173
2174	extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
2175
2176	debugfs_create_file("unusable_index", 0444, extfrag_debug_root, NULL,
2177			    &unusable_fops);
2178
2179	debugfs_create_file("extfrag_index", 0444, extfrag_debug_root, NULL,
2180			    &extfrag_fops);
2181
2182	return 0;
2183}
2184
2185module_init(extfrag_debug_init);
 
2186#endif
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/mm/vmstat.c
   4 *
   5 *  Manages VM statistics
   6 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   7 *
   8 *  zoned VM statistics
   9 *  Copyright (C) 2006 Silicon Graphics, Inc.,
  10 *		Christoph Lameter <christoph@lameter.com>
  11 *  Copyright (C) 2008-2014 Christoph Lameter
  12 */
  13#include <linux/fs.h>
  14#include <linux/mm.h>
  15#include <linux/err.h>
  16#include <linux/module.h>
  17#include <linux/slab.h>
  18#include <linux/cpu.h>
  19#include <linux/cpumask.h>
  20#include <linux/vmstat.h>
  21#include <linux/proc_fs.h>
  22#include <linux/seq_file.h>
  23#include <linux/debugfs.h>
  24#include <linux/sched.h>
  25#include <linux/math64.h>
  26#include <linux/writeback.h>
  27#include <linux/compaction.h>
  28#include <linux/mm_inline.h>
 
  29#include <linux/page_owner.h>
  30#include <linux/sched/isolation.h>
  31
  32#include "internal.h"
  33
 
 
  34#ifdef CONFIG_NUMA
  35int sysctl_vm_numa_stat = ENABLE_NUMA_STAT;
  36
  37/* zero numa counters within a zone */
  38static void zero_zone_numa_counters(struct zone *zone)
  39{
  40	int item, cpu;
  41
  42	for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++) {
  43		atomic_long_set(&zone->vm_numa_event[item], 0);
  44		for_each_online_cpu(cpu) {
  45			per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_numa_event[item]
  46						= 0;
  47		}
  48	}
  49}
  50
  51/* zero numa counters of all the populated zones */
  52static void zero_zones_numa_counters(void)
  53{
  54	struct zone *zone;
  55
  56	for_each_populated_zone(zone)
  57		zero_zone_numa_counters(zone);
  58}
  59
  60/* zero global numa counters */
  61static void zero_global_numa_counters(void)
  62{
  63	int item;
  64
  65	for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
  66		atomic_long_set(&vm_numa_event[item], 0);
  67}
  68
  69static void invalid_numa_statistics(void)
  70{
  71	zero_zones_numa_counters();
  72	zero_global_numa_counters();
  73}
  74
  75static DEFINE_MUTEX(vm_numa_stat_lock);
  76
  77int sysctl_vm_numa_stat_handler(const struct ctl_table *table, int write,
  78		void *buffer, size_t *length, loff_t *ppos)
  79{
  80	int ret, oldval;
  81
  82	mutex_lock(&vm_numa_stat_lock);
  83	if (write)
  84		oldval = sysctl_vm_numa_stat;
  85	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
  86	if (ret || !write)
  87		goto out;
  88
  89	if (oldval == sysctl_vm_numa_stat)
  90		goto out;
  91	else if (sysctl_vm_numa_stat == ENABLE_NUMA_STAT) {
  92		static_branch_enable(&vm_numa_stat_key);
  93		pr_info("enable numa statistics\n");
  94	} else {
  95		static_branch_disable(&vm_numa_stat_key);
  96		invalid_numa_statistics();
  97		pr_info("disable numa statistics, and clear numa counters\n");
  98	}
  99
 100out:
 101	mutex_unlock(&vm_numa_stat_lock);
 102	return ret;
 103}
 104#endif
 105
 106#ifdef CONFIG_VM_EVENT_COUNTERS
 107DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
 108EXPORT_PER_CPU_SYMBOL(vm_event_states);
 109
 110static void sum_vm_events(unsigned long *ret)
 111{
 112	int cpu;
 113	int i;
 114
 115	memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
 116
 117	for_each_online_cpu(cpu) {
 118		struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
 119
 120		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
 121			ret[i] += this->event[i];
 122	}
 123}
 124
 125/*
 126 * Accumulate the vm event counters across all CPUs.
 127 * The result is unavoidably approximate - it can change
 128 * during and after execution of this function.
 129*/
 130void all_vm_events(unsigned long *ret)
 131{
 132	cpus_read_lock();
 133	sum_vm_events(ret);
 134	cpus_read_unlock();
 135}
 136EXPORT_SYMBOL_GPL(all_vm_events);
 137
 138/*
 139 * Fold the foreign cpu events into our own.
 140 *
 141 * This is adding to the events on one processor
 142 * but keeps the global counts constant.
 143 */
 144void vm_events_fold_cpu(int cpu)
 145{
 146	struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
 147	int i;
 148
 149	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
 150		count_vm_events(i, fold_state->event[i]);
 151		fold_state->event[i] = 0;
 152	}
 153}
 154
 155#endif /* CONFIG_VM_EVENT_COUNTERS */
 156
 157/*
 158 * Manage combined zone based / global counters
 159 *
 160 * vm_stat contains the global counters
 161 */
 162atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
 
 163atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp;
 164atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS] __cacheline_aligned_in_smp;
 165EXPORT_SYMBOL(vm_zone_stat);
 
 166EXPORT_SYMBOL(vm_node_stat);
 167
 168#ifdef CONFIG_NUMA
 169static void fold_vm_zone_numa_events(struct zone *zone)
 170{
 171	unsigned long zone_numa_events[NR_VM_NUMA_EVENT_ITEMS] = { 0, };
 172	int cpu;
 173	enum numa_stat_item item;
 174
 175	for_each_online_cpu(cpu) {
 176		struct per_cpu_zonestat *pzstats;
 177
 178		pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
 179		for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
 180			zone_numa_events[item] += xchg(&pzstats->vm_numa_event[item], 0);
 181	}
 182
 183	for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
 184		zone_numa_event_add(zone_numa_events[item], zone, item);
 185}
 186
 187void fold_vm_numa_events(void)
 188{
 189	struct zone *zone;
 190
 191	for_each_populated_zone(zone)
 192		fold_vm_zone_numa_events(zone);
 193}
 194#endif
 195
 196#ifdef CONFIG_SMP
 197
 198int calculate_pressure_threshold(struct zone *zone)
 199{
 200	int threshold;
 201	int watermark_distance;
 202
 203	/*
 204	 * As vmstats are not up to date, there is drift between the estimated
 205	 * and real values. For high thresholds and a high number of CPUs, it
 206	 * is possible for the min watermark to be breached while the estimated
 207	 * value looks fine. The pressure threshold is a reduced value such
 208	 * that even the maximum amount of drift will not accidentally breach
 209	 * the min watermark
 210	 */
 211	watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
 212	threshold = max(1, (int)(watermark_distance / num_online_cpus()));
 213
 214	/*
 215	 * Maximum threshold is 125
 216	 */
 217	threshold = min(125, threshold);
 218
 219	return threshold;
 220}
 221
 222int calculate_normal_threshold(struct zone *zone)
 223{
 224	int threshold;
 225	int mem;	/* memory in 128 MB units */
 226
 227	/*
 228	 * The threshold scales with the number of processors and the amount
 229	 * of memory per zone. More memory means that we can defer updates for
 230	 * longer, more processors could lead to more contention.
 231 	 * fls() is used to have a cheap way of logarithmic scaling.
 232	 *
 233	 * Some sample thresholds:
 234	 *
 235	 * Threshold	Processors	(fls)	Zonesize	fls(mem)+1
 236	 * ------------------------------------------------------------------
 237	 * 8		1		1	0.9-1 GB	4
 238	 * 16		2		2	0.9-1 GB	4
 239	 * 20 		2		2	1-2 GB		5
 240	 * 24		2		2	2-4 GB		6
 241	 * 28		2		2	4-8 GB		7
 242	 * 32		2		2	8-16 GB		8
 243	 * 4		2		2	<128M		1
 244	 * 30		4		3	2-4 GB		5
 245	 * 48		4		3	8-16 GB		8
 246	 * 32		8		4	1-2 GB		4
 247	 * 32		8		4	0.9-1GB		4
 248	 * 10		16		5	<128M		1
 249	 * 40		16		5	900M		4
 250	 * 70		64		7	2-4 GB		5
 251	 * 84		64		7	4-8 GB		6
 252	 * 108		512		9	4-8 GB		6
 253	 * 125		1024		10	8-16 GB		8
 254	 * 125		1024		10	16-32 GB	9
 255	 */
 256
 257	mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT);
 258
 259	threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
 260
 261	/*
 262	 * Maximum threshold is 125
 263	 */
 264	threshold = min(125, threshold);
 265
 266	return threshold;
 267}
 268
 269/*
 270 * Refresh the thresholds for each zone.
 271 */
 272void refresh_zone_stat_thresholds(void)
 273{
 274	struct pglist_data *pgdat;
 275	struct zone *zone;
 276	int cpu;
 277	int threshold;
 278
 279	/* Zero current pgdat thresholds */
 280	for_each_online_pgdat(pgdat) {
 281		for_each_online_cpu(cpu) {
 282			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0;
 283		}
 284	}
 285
 286	for_each_populated_zone(zone) {
 287		struct pglist_data *pgdat = zone->zone_pgdat;
 288		unsigned long max_drift, tolerate_drift;
 289
 290		threshold = calculate_normal_threshold(zone);
 291
 292		for_each_online_cpu(cpu) {
 293			int pgdat_threshold;
 294
 295			per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
 296							= threshold;
 297
 298			/* Base nodestat threshold on the largest populated zone. */
 299			pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold;
 300			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold
 301				= max(threshold, pgdat_threshold);
 302		}
 303
 304		/*
 305		 * Only set percpu_drift_mark if there is a danger that
 306		 * NR_FREE_PAGES reports the low watermark is ok when in fact
 307		 * the min watermark could be breached by an allocation
 308		 */
 309		tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
 310		max_drift = num_online_cpus() * threshold;
 311		if (max_drift > tolerate_drift)
 312			zone->percpu_drift_mark = high_wmark_pages(zone) +
 313					max_drift;
 314	}
 315}
 316
 317void set_pgdat_percpu_threshold(pg_data_t *pgdat,
 318				int (*calculate_pressure)(struct zone *))
 319{
 320	struct zone *zone;
 321	int cpu;
 322	int threshold;
 323	int i;
 324
 325	for (i = 0; i < pgdat->nr_zones; i++) {
 326		zone = &pgdat->node_zones[i];
 327		if (!zone->percpu_drift_mark)
 328			continue;
 329
 330		threshold = (*calculate_pressure)(zone);
 331		for_each_online_cpu(cpu)
 332			per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
 333							= threshold;
 334	}
 335}
 336
 337/*
 338 * For use when we know that interrupts are disabled,
 339 * or when we know that preemption is disabled and that
 340 * particular counter cannot be updated from interrupt context.
 341 */
 342void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 343			   long delta)
 344{
 345	struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
 346	s8 __percpu *p = pcp->vm_stat_diff + item;
 347	long x;
 348	long t;
 349
 350	/*
 351	 * Accurate vmstat updates require a RMW. On !PREEMPT_RT kernels,
 352	 * atomicity is provided by IRQs being disabled -- either explicitly
 353	 * or via local_lock_irq. On PREEMPT_RT, local_lock_irq only disables
 354	 * CPU migrations and preemption potentially corrupts a counter so
 355	 * disable preemption.
 356	 */
 357	preempt_disable_nested();
 358
 359	x = delta + __this_cpu_read(*p);
 360
 361	t = __this_cpu_read(pcp->stat_threshold);
 362
 363	if (unlikely(abs(x) > t)) {
 364		zone_page_state_add(x, zone, item);
 365		x = 0;
 366	}
 367	__this_cpu_write(*p, x);
 368
 369	preempt_enable_nested();
 370}
 371EXPORT_SYMBOL(__mod_zone_page_state);
 372
 373void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
 374				long delta)
 375{
 376	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
 377	s8 __percpu *p = pcp->vm_node_stat_diff + item;
 378	long x;
 379	long t;
 380
 381	if (vmstat_item_in_bytes(item)) {
 382		/*
 383		 * Only cgroups use subpage accounting right now; at
 384		 * the global level, these items still change in
 385		 * multiples of whole pages. Store them as pages
 386		 * internally to keep the per-cpu counters compact.
 387		 */
 388		VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
 389		delta >>= PAGE_SHIFT;
 390	}
 391
 392	/* See __mod_node_page_state */
 393	preempt_disable_nested();
 394
 395	x = delta + __this_cpu_read(*p);
 396
 397	t = __this_cpu_read(pcp->stat_threshold);
 398
 399	if (unlikely(abs(x) > t)) {
 400		node_page_state_add(x, pgdat, item);
 401		x = 0;
 402	}
 403	__this_cpu_write(*p, x);
 404
 405	preempt_enable_nested();
 406}
 407EXPORT_SYMBOL(__mod_node_page_state);
 408
 409/*
 410 * Optimized increment and decrement functions.
 411 *
 412 * These are only for a single page and therefore can take a struct page *
 413 * argument instead of struct zone *. This allows the inclusion of the code
 414 * generated for page_zone(page) into the optimized functions.
 415 *
 416 * No overflow check is necessary and therefore the differential can be
 417 * incremented or decremented in place which may allow the compilers to
 418 * generate better code.
 419 * The increment or decrement is known and therefore one boundary check can
 420 * be omitted.
 421 *
 422 * NOTE: These functions are very performance sensitive. Change only
 423 * with care.
 424 *
 425 * Some processors have inc/dec instructions that are atomic vs an interrupt.
 426 * However, the code must first determine the differential location in a zone
 427 * based on the processor number and then inc/dec the counter. There is no
 428 * guarantee without disabling preemption that the processor will not change
 429 * in between and therefore the atomicity vs. interrupt cannot be exploited
 430 * in a useful way here.
 431 */
 432void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
 433{
 434	struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
 435	s8 __percpu *p = pcp->vm_stat_diff + item;
 436	s8 v, t;
 437
 438	/* See __mod_node_page_state */
 439	preempt_disable_nested();
 440
 441	v = __this_cpu_inc_return(*p);
 442	t = __this_cpu_read(pcp->stat_threshold);
 443	if (unlikely(v > t)) {
 444		s8 overstep = t >> 1;
 445
 446		zone_page_state_add(v + overstep, zone, item);
 447		__this_cpu_write(*p, -overstep);
 448	}
 449
 450	preempt_enable_nested();
 451}
 452
 453void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 454{
 455	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
 456	s8 __percpu *p = pcp->vm_node_stat_diff + item;
 457	s8 v, t;
 458
 459	VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
 460
 461	/* See __mod_node_page_state */
 462	preempt_disable_nested();
 463
 464	v = __this_cpu_inc_return(*p);
 465	t = __this_cpu_read(pcp->stat_threshold);
 466	if (unlikely(v > t)) {
 467		s8 overstep = t >> 1;
 468
 469		node_page_state_add(v + overstep, pgdat, item);
 470		__this_cpu_write(*p, -overstep);
 471	}
 472
 473	preempt_enable_nested();
 474}
 475
 476void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
 477{
 478	__inc_zone_state(page_zone(page), item);
 479}
 480EXPORT_SYMBOL(__inc_zone_page_state);
 481
 482void __inc_node_page_state(struct page *page, enum node_stat_item item)
 483{
 484	__inc_node_state(page_pgdat(page), item);
 485}
 486EXPORT_SYMBOL(__inc_node_page_state);
 487
 488void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
 489{
 490	struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
 491	s8 __percpu *p = pcp->vm_stat_diff + item;
 492	s8 v, t;
 493
 494	/* See __mod_node_page_state */
 495	preempt_disable_nested();
 496
 497	v = __this_cpu_dec_return(*p);
 498	t = __this_cpu_read(pcp->stat_threshold);
 499	if (unlikely(v < - t)) {
 500		s8 overstep = t >> 1;
 501
 502		zone_page_state_add(v - overstep, zone, item);
 503		__this_cpu_write(*p, overstep);
 504	}
 505
 506	preempt_enable_nested();
 507}
 508
 509void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 510{
 511	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
 512	s8 __percpu *p = pcp->vm_node_stat_diff + item;
 513	s8 v, t;
 514
 515	VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
 516
 517	/* See __mod_node_page_state */
 518	preempt_disable_nested();
 519
 520	v = __this_cpu_dec_return(*p);
 521	t = __this_cpu_read(pcp->stat_threshold);
 522	if (unlikely(v < - t)) {
 523		s8 overstep = t >> 1;
 524
 525		node_page_state_add(v - overstep, pgdat, item);
 526		__this_cpu_write(*p, overstep);
 527	}
 528
 529	preempt_enable_nested();
 530}
 531
 532void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
 533{
 534	__dec_zone_state(page_zone(page), item);
 535}
 536EXPORT_SYMBOL(__dec_zone_page_state);
 537
 538void __dec_node_page_state(struct page *page, enum node_stat_item item)
 539{
 540	__dec_node_state(page_pgdat(page), item);
 541}
 542EXPORT_SYMBOL(__dec_node_page_state);
 543
 544#ifdef CONFIG_HAVE_CMPXCHG_LOCAL
 545/*
 546 * If we have cmpxchg_local support then we do not need to incur the overhead
 547 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
 548 *
 549 * mod_state() modifies the zone counter state through atomic per cpu
 550 * operations.
 551 *
 552 * Overstep mode specifies how overstep should handled:
 553 *     0       No overstepping
 554 *     1       Overstepping half of threshold
 555 *     -1      Overstepping minus half of threshold
 556*/
 557static inline void mod_zone_state(struct zone *zone,
 558       enum zone_stat_item item, long delta, int overstep_mode)
 559{
 560	struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
 561	s8 __percpu *p = pcp->vm_stat_diff + item;
 562	long n, t, z;
 563	s8 o;
 564
 565	o = this_cpu_read(*p);
 566	do {
 567		z = 0;  /* overflow to zone counters */
 568
 569		/*
 570		 * The fetching of the stat_threshold is racy. We may apply
 571		 * a counter threshold to the wrong the cpu if we get
 572		 * rescheduled while executing here. However, the next
 573		 * counter update will apply the threshold again and
 574		 * therefore bring the counter under the threshold again.
 575		 *
 576		 * Most of the time the thresholds are the same anyways
 577		 * for all cpus in a zone.
 578		 */
 579		t = this_cpu_read(pcp->stat_threshold);
 580
 581		n = delta + (long)o;
 
 582
 583		if (abs(n) > t) {
 584			int os = overstep_mode * (t >> 1) ;
 585
 586			/* Overflow must be added to zone counters */
 587			z = n + os;
 588			n = -os;
 589		}
 590	} while (!this_cpu_try_cmpxchg(*p, &o, n));
 591
 592	if (z)
 593		zone_page_state_add(z, zone, item);
 594}
 595
 596void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 597			 long delta)
 598{
 599	mod_zone_state(zone, item, delta, 0);
 600}
 601EXPORT_SYMBOL(mod_zone_page_state);
 602
 603void inc_zone_page_state(struct page *page, enum zone_stat_item item)
 604{
 605	mod_zone_state(page_zone(page), item, 1, 1);
 606}
 607EXPORT_SYMBOL(inc_zone_page_state);
 608
 609void dec_zone_page_state(struct page *page, enum zone_stat_item item)
 610{
 611	mod_zone_state(page_zone(page), item, -1, -1);
 612}
 613EXPORT_SYMBOL(dec_zone_page_state);
 614
 615static inline void mod_node_state(struct pglist_data *pgdat,
 616       enum node_stat_item item, int delta, int overstep_mode)
 617{
 618	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
 619	s8 __percpu *p = pcp->vm_node_stat_diff + item;
 620	long n, t, z;
 621	s8 o;
 622
 623	if (vmstat_item_in_bytes(item)) {
 624		/*
 625		 * Only cgroups use subpage accounting right now; at
 626		 * the global level, these items still change in
 627		 * multiples of whole pages. Store them as pages
 628		 * internally to keep the per-cpu counters compact.
 629		 */
 630		VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
 631		delta >>= PAGE_SHIFT;
 632	}
 633
 634	o = this_cpu_read(*p);
 635	do {
 636		z = 0;  /* overflow to node counters */
 637
 638		/*
 639		 * The fetching of the stat_threshold is racy. We may apply
 640		 * a counter threshold to the wrong the cpu if we get
 641		 * rescheduled while executing here. However, the next
 642		 * counter update will apply the threshold again and
 643		 * therefore bring the counter under the threshold again.
 644		 *
 645		 * Most of the time the thresholds are the same anyways
 646		 * for all cpus in a node.
 647		 */
 648		t = this_cpu_read(pcp->stat_threshold);
 649
 650		n = delta + (long)o;
 
 651
 652		if (abs(n) > t) {
 653			int os = overstep_mode * (t >> 1) ;
 654
 655			/* Overflow must be added to node counters */
 656			z = n + os;
 657			n = -os;
 658		}
 659	} while (!this_cpu_try_cmpxchg(*p, &o, n));
 660
 661	if (z)
 662		node_page_state_add(z, pgdat, item);
 663}
 664
 665void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
 666					long delta)
 667{
 668	mod_node_state(pgdat, item, delta, 0);
 669}
 670EXPORT_SYMBOL(mod_node_page_state);
 671
 672void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 673{
 674	mod_node_state(pgdat, item, 1, 1);
 675}
 676
 677void inc_node_page_state(struct page *page, enum node_stat_item item)
 678{
 679	mod_node_state(page_pgdat(page), item, 1, 1);
 680}
 681EXPORT_SYMBOL(inc_node_page_state);
 682
 683void dec_node_page_state(struct page *page, enum node_stat_item item)
 684{
 685	mod_node_state(page_pgdat(page), item, -1, -1);
 686}
 687EXPORT_SYMBOL(dec_node_page_state);
 688#else
 689/*
 690 * Use interrupt disable to serialize counter updates
 691 */
 692void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 693			 long delta)
 694{
 695	unsigned long flags;
 696
 697	local_irq_save(flags);
 698	__mod_zone_page_state(zone, item, delta);
 699	local_irq_restore(flags);
 700}
 701EXPORT_SYMBOL(mod_zone_page_state);
 702
 703void inc_zone_page_state(struct page *page, enum zone_stat_item item)
 704{
 705	unsigned long flags;
 706	struct zone *zone;
 707
 708	zone = page_zone(page);
 709	local_irq_save(flags);
 710	__inc_zone_state(zone, item);
 711	local_irq_restore(flags);
 712}
 713EXPORT_SYMBOL(inc_zone_page_state);
 714
 715void dec_zone_page_state(struct page *page, enum zone_stat_item item)
 716{
 717	unsigned long flags;
 718
 719	local_irq_save(flags);
 720	__dec_zone_page_state(page, item);
 721	local_irq_restore(flags);
 722}
 723EXPORT_SYMBOL(dec_zone_page_state);
 724
 725void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 726{
 727	unsigned long flags;
 728
 729	local_irq_save(flags);
 730	__inc_node_state(pgdat, item);
 731	local_irq_restore(flags);
 732}
 733EXPORT_SYMBOL(inc_node_state);
 734
 735void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
 736					long delta)
 737{
 738	unsigned long flags;
 739
 740	local_irq_save(flags);
 741	__mod_node_page_state(pgdat, item, delta);
 742	local_irq_restore(flags);
 743}
 744EXPORT_SYMBOL(mod_node_page_state);
 745
 746void inc_node_page_state(struct page *page, enum node_stat_item item)
 747{
 748	unsigned long flags;
 749	struct pglist_data *pgdat;
 750
 751	pgdat = page_pgdat(page);
 752	local_irq_save(flags);
 753	__inc_node_state(pgdat, item);
 754	local_irq_restore(flags);
 755}
 756EXPORT_SYMBOL(inc_node_page_state);
 757
 758void dec_node_page_state(struct page *page, enum node_stat_item item)
 759{
 760	unsigned long flags;
 761
 762	local_irq_save(flags);
 763	__dec_node_page_state(page, item);
 764	local_irq_restore(flags);
 765}
 766EXPORT_SYMBOL(dec_node_page_state);
 767#endif
 768
 769/*
 770 * Fold a differential into the global counters.
 771 * Returns the number of counters updated.
 772 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 773static int fold_diff(int *zone_diff, int *node_diff)
 774{
 775	int i;
 776	int changes = 0;
 777
 778	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 779		if (zone_diff[i]) {
 780			atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
 781			changes++;
 782	}
 783
 784	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
 785		if (node_diff[i]) {
 786			atomic_long_add(node_diff[i], &vm_node_stat[i]);
 787			changes++;
 788	}
 789	return changes;
 790}
 
 791
 792/*
 793 * Update the zone counters for the current cpu.
 794 *
 795 * Note that refresh_cpu_vm_stats strives to only access
 796 * node local memory. The per cpu pagesets on remote zones are placed
 797 * in the memory local to the processor using that pageset. So the
 798 * loop over all zones will access a series of cachelines local to
 799 * the processor.
 800 *
 801 * The call to zone_page_state_add updates the cachelines with the
 802 * statistics in the remote zone struct as well as the global cachelines
 803 * with the global counters. These could cause remote node cache line
 804 * bouncing and will have to be only done when necessary.
 805 *
 806 * The function returns the number of global counters updated.
 807 */
 808static int refresh_cpu_vm_stats(bool do_pagesets)
 809{
 810	struct pglist_data *pgdat;
 811	struct zone *zone;
 812	int i;
 813	int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
 
 
 
 814	int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
 815	int changes = 0;
 816
 817	for_each_populated_zone(zone) {
 818		struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
 819		struct per_cpu_pages __percpu *pcp = zone->per_cpu_pageset;
 820
 821		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
 822			int v;
 823
 824			v = this_cpu_xchg(pzstats->vm_stat_diff[i], 0);
 825			if (v) {
 826
 827				atomic_long_add(v, &zone->vm_stat[i]);
 828				global_zone_diff[i] += v;
 829#ifdef CONFIG_NUMA
 830				/* 3 seconds idle till flush */
 831				__this_cpu_write(pcp->expire, 3);
 832#endif
 833			}
 834		}
 
 
 
 
 
 
 
 
 
 
 
 
 835
 836		if (do_pagesets) {
 837			cond_resched();
 838
 839			changes += decay_pcp_high(zone, this_cpu_ptr(pcp));
 840#ifdef CONFIG_NUMA
 841			/*
 842			 * Deal with draining the remote pageset of this
 843			 * processor
 844			 *
 845			 * Check if there are pages remaining in this pageset
 846			 * if not then there is nothing to expire.
 847			 */
 848			if (!__this_cpu_read(pcp->expire) ||
 849			       !__this_cpu_read(pcp->count))
 850				continue;
 851
 852			/*
 853			 * We never drain zones local to this processor.
 854			 */
 855			if (zone_to_nid(zone) == numa_node_id()) {
 856				__this_cpu_write(pcp->expire, 0);
 857				continue;
 858			}
 859
 860			if (__this_cpu_dec_return(pcp->expire)) {
 861				changes++;
 862				continue;
 863			}
 864
 865			if (__this_cpu_read(pcp->count)) {
 866				drain_zone_pages(zone, this_cpu_ptr(pcp));
 867				changes++;
 868			}
 
 869#endif
 870		}
 871	}
 872
 873	for_each_online_pgdat(pgdat) {
 874		struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;
 875
 876		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
 877			int v;
 878
 879			v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
 880			if (v) {
 881				atomic_long_add(v, &pgdat->vm_stat[i]);
 882				global_node_diff[i] += v;
 883			}
 884		}
 885	}
 886
 
 
 
 
 887	changes += fold_diff(global_zone_diff, global_node_diff);
 
 888	return changes;
 889}
 890
 891/*
 892 * Fold the data for an offline cpu into the global array.
 893 * There cannot be any access by the offline cpu and therefore
 894 * synchronization is simplified.
 895 */
 896void cpu_vm_stats_fold(int cpu)
 897{
 898	struct pglist_data *pgdat;
 899	struct zone *zone;
 900	int i;
 901	int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
 
 
 
 902	int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
 903
 904	for_each_populated_zone(zone) {
 905		struct per_cpu_zonestat *pzstats;
 906
 907		pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
 908
 909		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
 910			if (pzstats->vm_stat_diff[i]) {
 911				int v;
 912
 913				v = pzstats->vm_stat_diff[i];
 914				pzstats->vm_stat_diff[i] = 0;
 915				atomic_long_add(v, &zone->vm_stat[i]);
 916				global_zone_diff[i] += v;
 917			}
 918		}
 919#ifdef CONFIG_NUMA
 920		for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) {
 921			if (pzstats->vm_numa_event[i]) {
 922				unsigned long v;
 923
 924				v = pzstats->vm_numa_event[i];
 925				pzstats->vm_numa_event[i] = 0;
 926				zone_numa_event_add(v, zone, i);
 
 927			}
 928		}
 929#endif
 930	}
 931
 932	for_each_online_pgdat(pgdat) {
 933		struct per_cpu_nodestat *p;
 934
 935		p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
 936
 937		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
 938			if (p->vm_node_stat_diff[i]) {
 939				int v;
 940
 941				v = p->vm_node_stat_diff[i];
 942				p->vm_node_stat_diff[i] = 0;
 943				atomic_long_add(v, &pgdat->vm_stat[i]);
 944				global_node_diff[i] += v;
 945			}
 946	}
 947
 
 
 
 948	fold_diff(global_zone_diff, global_node_diff);
 
 949}
 950
 951/*
 952 * this is only called if !populated_zone(zone), which implies no other users of
 953 * pset->vm_stat_diff[] exist.
 954 */
 955void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *pzstats)
 956{
 957	unsigned long v;
 958	int i;
 959
 960	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
 961		if (pzstats->vm_stat_diff[i]) {
 962			v = pzstats->vm_stat_diff[i];
 963			pzstats->vm_stat_diff[i] = 0;
 964			zone_page_state_add(v, zone, i);
 
 965		}
 966	}
 967
 968#ifdef CONFIG_NUMA
 969	for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) {
 970		if (pzstats->vm_numa_event[i]) {
 971			v = pzstats->vm_numa_event[i];
 972			pzstats->vm_numa_event[i] = 0;
 973			zone_numa_event_add(v, zone, i);
 
 
 974		}
 975	}
 976#endif
 977}
 978#endif
 979
 980#ifdef CONFIG_NUMA
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 981/*
 982 * Determine the per node value of a stat item. This function
 983 * is called frequently in a NUMA machine, so try to be as
 984 * frugal as possible.
 985 */
 986unsigned long sum_zone_node_page_state(int node,
 987				 enum zone_stat_item item)
 988{
 989	struct zone *zones = NODE_DATA(node)->node_zones;
 990	int i;
 991	unsigned long count = 0;
 992
 993	for (i = 0; i < MAX_NR_ZONES; i++)
 994		count += zone_page_state(zones + i, item);
 995
 996	return count;
 997}
 998
 999/* Determine the per node value of a numa stat item. */
1000unsigned long sum_zone_numa_event_state(int node,
 
 
 
1001				 enum numa_stat_item item)
1002{
1003	struct zone *zones = NODE_DATA(node)->node_zones;
 
1004	unsigned long count = 0;
1005	int i;
1006
1007	for (i = 0; i < MAX_NR_ZONES; i++)
1008		count += zone_numa_event_state(zones + i, item);
1009
1010	return count;
1011}
1012
1013/*
1014 * Determine the per node value of a stat item.
1015 */
1016unsigned long node_page_state_pages(struct pglist_data *pgdat,
1017				    enum node_stat_item item)
1018{
1019	long x = atomic_long_read(&pgdat->vm_stat[item]);
1020#ifdef CONFIG_SMP
1021	if (x < 0)
1022		x = 0;
1023#endif
1024	return x;
1025}
1026
1027unsigned long node_page_state(struct pglist_data *pgdat,
1028			      enum node_stat_item item)
1029{
1030	VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
1031
1032	return node_page_state_pages(pgdat, item);
1033}
1034#endif
1035
1036/*
1037 * Count number of pages "struct page" and "struct page_ext" consume.
1038 * nr_memmap_boot_pages: # of pages allocated by boot allocator
1039 * nr_memmap_pages: # of pages that were allocated by buddy allocator
1040 */
1041static atomic_long_t nr_memmap_boot_pages = ATOMIC_LONG_INIT(0);
1042static atomic_long_t nr_memmap_pages = ATOMIC_LONG_INIT(0);
1043
1044void memmap_boot_pages_add(long delta)
1045{
1046	atomic_long_add(delta, &nr_memmap_boot_pages);
1047}
1048
1049void memmap_pages_add(long delta)
1050{
1051	atomic_long_add(delta, &nr_memmap_pages);
1052}
1053
1054#ifdef CONFIG_COMPACTION
1055
1056struct contig_page_info {
1057	unsigned long free_pages;
1058	unsigned long free_blocks_total;
1059	unsigned long free_blocks_suitable;
1060};
1061
1062/*
1063 * Calculate the number of free pages in a zone, how many contiguous
1064 * pages are free and how many are large enough to satisfy an allocation of
1065 * the target size. Note that this function makes no attempt to estimate
1066 * how many suitable free blocks there *might* be if MOVABLE pages were
1067 * migrated. Calculating that is possible, but expensive and can be
1068 * figured out from userspace
1069 */
1070static void fill_contig_page_info(struct zone *zone,
1071				unsigned int suitable_order,
1072				struct contig_page_info *info)
1073{
1074	unsigned int order;
1075
1076	info->free_pages = 0;
1077	info->free_blocks_total = 0;
1078	info->free_blocks_suitable = 0;
1079
1080	for (order = 0; order < NR_PAGE_ORDERS; order++) {
1081		unsigned long blocks;
1082
1083		/*
1084		 * Count number of free blocks.
1085		 *
1086		 * Access to nr_free is lockless as nr_free is used only for
1087		 * diagnostic purposes. Use data_race to avoid KCSAN warning.
1088		 */
1089		blocks = data_race(zone->free_area[order].nr_free);
1090		info->free_blocks_total += blocks;
1091
1092		/* Count free base pages */
1093		info->free_pages += blocks << order;
1094
1095		/* Count the suitable free blocks */
1096		if (order >= suitable_order)
1097			info->free_blocks_suitable += blocks <<
1098						(order - suitable_order);
1099	}
1100}
1101
1102/*
1103 * A fragmentation index only makes sense if an allocation of a requested
1104 * size would fail. If that is true, the fragmentation index indicates
1105 * whether external fragmentation or a lack of memory was the problem.
1106 * The value can be used to determine if page reclaim or compaction
1107 * should be used
1108 */
1109static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
1110{
1111	unsigned long requested = 1UL << order;
1112
1113	if (WARN_ON_ONCE(order > MAX_PAGE_ORDER))
1114		return 0;
1115
1116	if (!info->free_blocks_total)
1117		return 0;
1118
1119	/* Fragmentation index only makes sense when a request would fail */
1120	if (info->free_blocks_suitable)
1121		return -1000;
1122
1123	/*
1124	 * Index is between 0 and 1 so return within 3 decimal places
1125	 *
1126	 * 0 => allocation would fail due to lack of memory
1127	 * 1 => allocation would fail due to fragmentation
1128	 */
1129	return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
1130}
1131
1132/*
1133 * Calculates external fragmentation within a zone wrt the given order.
1134 * It is defined as the percentage of pages found in blocks of size
1135 * less than 1 << order. It returns values in range [0, 100].
1136 */
1137unsigned int extfrag_for_order(struct zone *zone, unsigned int order)
1138{
1139	struct contig_page_info info;
1140
1141	fill_contig_page_info(zone, order, &info);
1142	if (info.free_pages == 0)
1143		return 0;
1144
1145	return div_u64((info.free_pages -
1146			(info.free_blocks_suitable << order)) * 100,
1147			info.free_pages);
1148}
1149
1150/* Same as __fragmentation index but allocs contig_page_info on stack */
1151int fragmentation_index(struct zone *zone, unsigned int order)
1152{
1153	struct contig_page_info info;
1154
1155	fill_contig_page_info(zone, order, &info);
1156	return __fragmentation_index(order, &info);
1157}
1158#endif
1159
1160#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || \
1161    defined(CONFIG_NUMA) || defined(CONFIG_MEMCG)
1162#ifdef CONFIG_ZONE_DMA
1163#define TEXT_FOR_DMA(xx) xx "_dma",
1164#else
1165#define TEXT_FOR_DMA(xx)
1166#endif
1167
1168#ifdef CONFIG_ZONE_DMA32
1169#define TEXT_FOR_DMA32(xx) xx "_dma32",
1170#else
1171#define TEXT_FOR_DMA32(xx)
1172#endif
1173
1174#ifdef CONFIG_HIGHMEM
1175#define TEXT_FOR_HIGHMEM(xx) xx "_high",
1176#else
1177#define TEXT_FOR_HIGHMEM(xx)
1178#endif
1179
1180#ifdef CONFIG_ZONE_DEVICE
1181#define TEXT_FOR_DEVICE(xx) xx "_device",
1182#else
1183#define TEXT_FOR_DEVICE(xx)
1184#endif
1185
1186#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
1187					TEXT_FOR_HIGHMEM(xx) xx "_movable", \
1188					TEXT_FOR_DEVICE(xx)
1189
1190const char * const vmstat_text[] = {
1191	/* enum zone_stat_item counters */
1192	"nr_free_pages",
1193	"nr_zone_inactive_anon",
1194	"nr_zone_active_anon",
1195	"nr_zone_inactive_file",
1196	"nr_zone_active_file",
1197	"nr_zone_unevictable",
1198	"nr_zone_write_pending",
1199	"nr_mlock",
 
1200	"nr_bounce",
1201#if IS_ENABLED(CONFIG_ZSMALLOC)
1202	"nr_zspages",
1203#endif
1204	"nr_free_cma",
1205#ifdef CONFIG_UNACCEPTED_MEMORY
1206	"nr_unaccepted",
1207#endif
1208
1209	/* enum numa_stat_item counters */
1210#ifdef CONFIG_NUMA
1211	"numa_hit",
1212	"numa_miss",
1213	"numa_foreign",
1214	"numa_interleave",
1215	"numa_local",
1216	"numa_other",
1217#endif
1218
1219	/* enum node_stat_item counters */
1220	"nr_inactive_anon",
1221	"nr_active_anon",
1222	"nr_inactive_file",
1223	"nr_active_file",
1224	"nr_unevictable",
1225	"nr_slab_reclaimable",
1226	"nr_slab_unreclaimable",
1227	"nr_isolated_anon",
1228	"nr_isolated_file",
1229	"workingset_nodes",
1230	"workingset_refault_anon",
1231	"workingset_refault_file",
1232	"workingset_activate_anon",
1233	"workingset_activate_file",
1234	"workingset_restore_anon",
1235	"workingset_restore_file",
1236	"workingset_nodereclaim",
1237	"nr_anon_pages",
1238	"nr_mapped",
1239	"nr_file_pages",
1240	"nr_dirty",
1241	"nr_writeback",
1242	"nr_writeback_temp",
1243	"nr_shmem",
1244	"nr_shmem_hugepages",
1245	"nr_shmem_pmdmapped",
1246	"nr_file_hugepages",
1247	"nr_file_pmdmapped",
1248	"nr_anon_transparent_hugepages",
1249	"nr_vmscan_write",
1250	"nr_vmscan_immediate_reclaim",
1251	"nr_dirtied",
1252	"nr_written",
1253	"nr_throttled_written",
1254	"nr_kernel_misc_reclaimable",
1255	"nr_foll_pin_acquired",
1256	"nr_foll_pin_released",
1257	"nr_kernel_stack",
1258#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
1259	"nr_shadow_call_stack",
1260#endif
1261	"nr_page_table_pages",
1262	"nr_sec_page_table_pages",
1263#ifdef CONFIG_IOMMU_SUPPORT
1264	"nr_iommu_pages",
1265#endif
1266#ifdef CONFIG_SWAP
1267	"nr_swapcached",
1268#endif
1269#ifdef CONFIG_NUMA_BALANCING
1270	"pgpromote_success",
1271	"pgpromote_candidate",
1272#endif
1273	"pgdemote_kswapd",
1274	"pgdemote_direct",
1275	"pgdemote_khugepaged",
1276#ifdef CONFIG_HUGETLB_PAGE
1277	"nr_hugetlb",
1278#endif
1279	/* system-wide enum vm_stat_item counters */
1280	"nr_dirty_threshold",
1281	"nr_dirty_background_threshold",
1282	"nr_memmap_pages",
1283	"nr_memmap_boot_pages",
1284
1285#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
1286	/* enum vm_event_item counters */
1287	"pgpgin",
1288	"pgpgout",
1289	"pswpin",
1290	"pswpout",
1291
1292	TEXTS_FOR_ZONES("pgalloc")
1293	TEXTS_FOR_ZONES("allocstall")
1294	TEXTS_FOR_ZONES("pgskip")
1295
1296	"pgfree",
1297	"pgactivate",
1298	"pgdeactivate",
1299	"pglazyfree",
1300
1301	"pgfault",
1302	"pgmajfault",
1303	"pglazyfreed",
1304
1305	"pgrefill",
1306	"pgreuse",
1307	"pgsteal_kswapd",
1308	"pgsteal_direct",
1309	"pgsteal_khugepaged",
1310	"pgscan_kswapd",
1311	"pgscan_direct",
1312	"pgscan_khugepaged",
1313	"pgscan_direct_throttle",
1314	"pgscan_anon",
1315	"pgscan_file",
1316	"pgsteal_anon",
1317	"pgsteal_file",
1318
1319#ifdef CONFIG_NUMA
1320	"zone_reclaim_success",
1321	"zone_reclaim_failed",
1322#endif
1323	"pginodesteal",
1324	"slabs_scanned",
1325	"kswapd_inodesteal",
1326	"kswapd_low_wmark_hit_quickly",
1327	"kswapd_high_wmark_hit_quickly",
1328	"pageoutrun",
1329
1330	"pgrotated",
1331
1332	"drop_pagecache",
1333	"drop_slab",
1334	"oom_kill",
1335
1336#ifdef CONFIG_NUMA_BALANCING
1337	"numa_pte_updates",
1338	"numa_huge_pte_updates",
1339	"numa_hint_faults",
1340	"numa_hint_faults_local",
1341	"numa_pages_migrated",
1342#endif
1343#ifdef CONFIG_MIGRATION
1344	"pgmigrate_success",
1345	"pgmigrate_fail",
1346	"thp_migration_success",
1347	"thp_migration_fail",
1348	"thp_migration_split",
1349#endif
1350#ifdef CONFIG_COMPACTION
1351	"compact_migrate_scanned",
1352	"compact_free_scanned",
1353	"compact_isolated",
1354	"compact_stall",
1355	"compact_fail",
1356	"compact_success",
1357	"compact_daemon_wake",
1358	"compact_daemon_migrate_scanned",
1359	"compact_daemon_free_scanned",
1360#endif
1361
1362#ifdef CONFIG_HUGETLB_PAGE
1363	"htlb_buddy_alloc_success",
1364	"htlb_buddy_alloc_fail",
1365#endif
1366#ifdef CONFIG_CMA
1367	"cma_alloc_success",
1368	"cma_alloc_fail",
1369#endif
1370	"unevictable_pgs_culled",
1371	"unevictable_pgs_scanned",
1372	"unevictable_pgs_rescued",
1373	"unevictable_pgs_mlocked",
1374	"unevictable_pgs_munlocked",
1375	"unevictable_pgs_cleared",
1376	"unevictable_pgs_stranded",
1377
1378#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1379	"thp_fault_alloc",
1380	"thp_fault_fallback",
1381	"thp_fault_fallback_charge",
1382	"thp_collapse_alloc",
1383	"thp_collapse_alloc_failed",
1384	"thp_file_alloc",
1385	"thp_file_fallback",
1386	"thp_file_fallback_charge",
1387	"thp_file_mapped",
1388	"thp_split_page",
1389	"thp_split_page_failed",
1390	"thp_deferred_split_page",
1391	"thp_underused_split_page",
1392	"thp_split_pmd",
1393	"thp_scan_exceed_none_pte",
1394	"thp_scan_exceed_swap_pte",
1395	"thp_scan_exceed_share_pte",
1396#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1397	"thp_split_pud",
1398#endif
1399	"thp_zero_page_alloc",
1400	"thp_zero_page_alloc_failed",
1401	"thp_swpout",
1402	"thp_swpout_fallback",
1403#endif
1404#ifdef CONFIG_MEMORY_BALLOON
1405	"balloon_inflate",
1406	"balloon_deflate",
1407#ifdef CONFIG_BALLOON_COMPACTION
1408	"balloon_migrate",
1409#endif
1410#endif /* CONFIG_MEMORY_BALLOON */
1411#ifdef CONFIG_DEBUG_TLBFLUSH
1412	"nr_tlb_remote_flush",
1413	"nr_tlb_remote_flush_received",
1414	"nr_tlb_local_flush_all",
1415	"nr_tlb_local_flush_one",
1416#endif /* CONFIG_DEBUG_TLBFLUSH */
1417
 
 
 
 
1418#ifdef CONFIG_SWAP
1419	"swap_ra",
1420	"swap_ra_hit",
1421	"swpin_zero",
1422	"swpout_zero",
1423#ifdef CONFIG_KSM
1424	"ksm_swpin_copy",
1425#endif
1426#endif
1427#ifdef CONFIG_KSM
1428	"cow_ksm",
1429#endif
1430#ifdef CONFIG_ZSWAP
1431	"zswpin",
1432	"zswpout",
1433	"zswpwb",
1434#endif
1435#ifdef CONFIG_X86
1436	"direct_map_level2_splits",
1437	"direct_map_level3_splits",
1438#endif
1439#ifdef CONFIG_PER_VMA_LOCK_STATS
1440	"vma_lock_success",
1441	"vma_lock_abort",
1442	"vma_lock_retry",
1443	"vma_lock_miss",
1444#endif
1445#ifdef CONFIG_DEBUG_STACK_USAGE
1446	"kstack_1k",
1447#if THREAD_SIZE > 1024
1448	"kstack_2k",
1449#endif
1450#if THREAD_SIZE > 2048
1451	"kstack_4k",
1452#endif
1453#if THREAD_SIZE > 4096
1454	"kstack_8k",
1455#endif
1456#if THREAD_SIZE > 8192
1457	"kstack_16k",
1458#endif
1459#if THREAD_SIZE > 16384
1460	"kstack_32k",
1461#endif
1462#if THREAD_SIZE > 32768
1463	"kstack_64k",
1464#endif
1465#if THREAD_SIZE > 65536
1466	"kstack_rest",
1467#endif
1468#endif
1469#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
1470};
1471#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */
1472
1473#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
1474     defined(CONFIG_PROC_FS)
1475static void *frag_start(struct seq_file *m, loff_t *pos)
1476{
1477	pg_data_t *pgdat;
1478	loff_t node = *pos;
1479
1480	for (pgdat = first_online_pgdat();
1481	     pgdat && node;
1482	     pgdat = next_online_pgdat(pgdat))
1483		--node;
1484
1485	return pgdat;
1486}
1487
1488static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
1489{
1490	pg_data_t *pgdat = (pg_data_t *)arg;
1491
1492	(*pos)++;
1493	return next_online_pgdat(pgdat);
1494}
1495
1496static void frag_stop(struct seq_file *m, void *arg)
1497{
1498}
1499
1500/*
1501 * Walk zones in a node and print using a callback.
1502 * If @assert_populated is true, only use callback for zones that are populated.
1503 */
1504static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
1505		bool assert_populated, bool nolock,
1506		void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
1507{
1508	struct zone *zone;
1509	struct zone *node_zones = pgdat->node_zones;
1510	unsigned long flags;
1511
1512	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
1513		if (assert_populated && !populated_zone(zone))
1514			continue;
1515
1516		if (!nolock)
1517			spin_lock_irqsave(&zone->lock, flags);
1518		print(m, pgdat, zone);
1519		if (!nolock)
1520			spin_unlock_irqrestore(&zone->lock, flags);
1521	}
1522}
1523#endif
1524
1525#ifdef CONFIG_PROC_FS
1526static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
1527						struct zone *zone)
1528{
1529	int order;
1530
1531	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1532	for (order = 0; order < NR_PAGE_ORDERS; ++order)
1533		/*
1534		 * Access to nr_free is lockless as nr_free is used only for
1535		 * printing purposes. Use data_race to avoid KCSAN warning.
1536		 */
1537		seq_printf(m, "%6lu ", data_race(zone->free_area[order].nr_free));
1538	seq_putc(m, '\n');
1539}
1540
1541/*
1542 * This walks the free areas for each zone.
1543 */
1544static int frag_show(struct seq_file *m, void *arg)
1545{
1546	pg_data_t *pgdat = (pg_data_t *)arg;
1547	walk_zones_in_node(m, pgdat, true, false, frag_show_print);
1548	return 0;
1549}
1550
1551static void pagetypeinfo_showfree_print(struct seq_file *m,
1552					pg_data_t *pgdat, struct zone *zone)
1553{
1554	int order, mtype;
1555
1556	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
1557		seq_printf(m, "Node %4d, zone %8s, type %12s ",
1558					pgdat->node_id,
1559					zone->name,
1560					migratetype_names[mtype]);
1561		for (order = 0; order < NR_PAGE_ORDERS; ++order) {
1562			unsigned long freecount = 0;
1563			struct free_area *area;
1564			struct list_head *curr;
1565			bool overflow = false;
1566
1567			area = &(zone->free_area[order]);
1568
1569			list_for_each(curr, &area->free_list[mtype]) {
1570				/*
1571				 * Cap the free_list iteration because it might
1572				 * be really large and we are under a spinlock
1573				 * so a long time spent here could trigger a
1574				 * hard lockup detector. Anyway this is a
1575				 * debugging tool so knowing there is a handful
1576				 * of pages of this order should be more than
1577				 * sufficient.
1578				 */
1579				if (++freecount >= 100000) {
1580					overflow = true;
1581					break;
1582				}
1583			}
1584			seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount);
1585			spin_unlock_irq(&zone->lock);
1586			cond_resched();
1587			spin_lock_irq(&zone->lock);
1588		}
1589		seq_putc(m, '\n');
1590	}
1591}
1592
1593/* Print out the free pages at each order for each migatetype */
1594static void pagetypeinfo_showfree(struct seq_file *m, void *arg)
1595{
1596	int order;
1597	pg_data_t *pgdat = (pg_data_t *)arg;
1598
1599	/* Print header */
1600	seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
1601	for (order = 0; order < NR_PAGE_ORDERS; ++order)
1602		seq_printf(m, "%6d ", order);
1603	seq_putc(m, '\n');
1604
1605	walk_zones_in_node(m, pgdat, true, false, pagetypeinfo_showfree_print);
 
 
1606}
1607
1608static void pagetypeinfo_showblockcount_print(struct seq_file *m,
1609					pg_data_t *pgdat, struct zone *zone)
1610{
1611	int mtype;
1612	unsigned long pfn;
1613	unsigned long start_pfn = zone->zone_start_pfn;
1614	unsigned long end_pfn = zone_end_pfn(zone);
1615	unsigned long count[MIGRATE_TYPES] = { 0, };
1616
1617	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
1618		struct page *page;
1619
1620		page = pfn_to_online_page(pfn);
1621		if (!page)
1622			continue;
1623
 
 
 
 
1624		if (page_zone(page) != zone)
1625			continue;
1626
1627		mtype = get_pageblock_migratetype(page);
1628
1629		if (mtype < MIGRATE_TYPES)
1630			count[mtype]++;
1631	}
1632
1633	/* Print counts */
1634	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1635	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1636		seq_printf(m, "%12lu ", count[mtype]);
1637	seq_putc(m, '\n');
1638}
1639
1640/* Print out the number of pageblocks for each migratetype */
1641static void pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1642{
1643	int mtype;
1644	pg_data_t *pgdat = (pg_data_t *)arg;
1645
1646	seq_printf(m, "\n%-23s", "Number of blocks type ");
1647	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1648		seq_printf(m, "%12s ", migratetype_names[mtype]);
1649	seq_putc(m, '\n');
1650	walk_zones_in_node(m, pgdat, true, false,
1651		pagetypeinfo_showblockcount_print);
 
 
1652}
1653
1654/*
1655 * Print out the number of pageblocks for each migratetype that contain pages
1656 * of other types. This gives an indication of how well fallbacks are being
1657 * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1658 * to determine what is going on
1659 */
1660static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1661{
1662#ifdef CONFIG_PAGE_OWNER
1663	int mtype;
1664
1665	if (!static_branch_unlikely(&page_owner_inited))
1666		return;
1667
1668	drain_all_pages(NULL);
1669
1670	seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1671	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1672		seq_printf(m, "%12s ", migratetype_names[mtype]);
1673	seq_putc(m, '\n');
1674
1675	walk_zones_in_node(m, pgdat, true, true,
1676		pagetypeinfo_showmixedcount_print);
1677#endif /* CONFIG_PAGE_OWNER */
1678}
1679
1680/*
1681 * This prints out statistics in relation to grouping pages by mobility.
1682 * It is expensive to collect so do not constantly read the file.
1683 */
1684static int pagetypeinfo_show(struct seq_file *m, void *arg)
1685{
1686	pg_data_t *pgdat = (pg_data_t *)arg;
1687
1688	/* check memoryless node */
1689	if (!node_state(pgdat->node_id, N_MEMORY))
1690		return 0;
1691
1692	seq_printf(m, "Page block order: %d\n", pageblock_order);
1693	seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
1694	seq_putc(m, '\n');
1695	pagetypeinfo_showfree(m, pgdat);
1696	pagetypeinfo_showblockcount(m, pgdat);
1697	pagetypeinfo_showmixedcount(m, pgdat);
1698
1699	return 0;
1700}
1701
1702static const struct seq_operations fragmentation_op = {
1703	.start	= frag_start,
1704	.next	= frag_next,
1705	.stop	= frag_stop,
1706	.show	= frag_show,
1707};
1708
1709static const struct seq_operations pagetypeinfo_op = {
1710	.start	= frag_start,
1711	.next	= frag_next,
1712	.stop	= frag_stop,
1713	.show	= pagetypeinfo_show,
1714};
1715
1716static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
1717{
1718	int zid;
1719
1720	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1721		struct zone *compare = &pgdat->node_zones[zid];
1722
1723		if (populated_zone(compare))
1724			return zone == compare;
1725	}
1726
1727	return false;
1728}
1729
1730static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1731							struct zone *zone)
1732{
1733	int i;
1734	seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1735	if (is_zone_first_populated(pgdat, zone)) {
1736		seq_printf(m, "\n  per-node stats");
1737		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1738			unsigned long pages = node_page_state_pages(pgdat, i);
1739
1740			if (vmstat_item_print_in_thp(i))
1741				pages /= HPAGE_PMD_NR;
1742			seq_printf(m, "\n      %-12s %lu", node_stat_name(i),
1743				   pages);
1744		}
1745	}
1746	seq_printf(m,
1747		   "\n  pages free     %lu"
1748		   "\n        boost    %lu"
1749		   "\n        min      %lu"
1750		   "\n        low      %lu"
1751		   "\n        high     %lu"
1752		   "\n        promo    %lu"
1753		   "\n        spanned  %lu"
1754		   "\n        present  %lu"
1755		   "\n        managed  %lu"
1756		   "\n        cma      %lu",
1757		   zone_page_state(zone, NR_FREE_PAGES),
1758		   zone->watermark_boost,
1759		   min_wmark_pages(zone),
1760		   low_wmark_pages(zone),
1761		   high_wmark_pages(zone),
1762		   promo_wmark_pages(zone),
1763		   zone->spanned_pages,
1764		   zone->present_pages,
1765		   zone_managed_pages(zone),
1766		   zone_cma_pages(zone));
1767
1768	seq_printf(m,
1769		   "\n        protection: (%ld",
1770		   zone->lowmem_reserve[0]);
1771	for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1772		seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1773	seq_putc(m, ')');
1774
1775	/* If unpopulated, no other information is useful */
1776	if (!populated_zone(zone)) {
1777		seq_putc(m, '\n');
1778		return;
1779	}
1780
1781	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1782		seq_printf(m, "\n      %-12s %lu", zone_stat_name(i),
1783			   zone_page_state(zone, i));
1784
1785#ifdef CONFIG_NUMA
1786	fold_vm_zone_numa_events(zone);
1787	for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
1788		seq_printf(m, "\n      %-12s %lu", numa_stat_name(i),
1789			   zone_numa_event_state(zone, i));
1790#endif
1791
1792	seq_printf(m, "\n  pagesets");
1793	for_each_online_cpu(i) {
1794		struct per_cpu_pages *pcp;
1795		struct per_cpu_zonestat __maybe_unused *pzstats;
1796
1797		pcp = per_cpu_ptr(zone->per_cpu_pageset, i);
1798		seq_printf(m,
1799			   "\n    cpu: %i"
1800			   "\n              count:    %i"
1801			   "\n              high:     %i"
1802			   "\n              batch:    %i"
1803			   "\n              high_min: %i"
1804			   "\n              high_max: %i",
1805			   i,
1806			   pcp->count,
1807			   pcp->high,
1808			   pcp->batch,
1809			   pcp->high_min,
1810			   pcp->high_max);
1811#ifdef CONFIG_SMP
1812		pzstats = per_cpu_ptr(zone->per_cpu_zonestats, i);
1813		seq_printf(m, "\n  vm stats threshold: %d",
1814				pzstats->stat_threshold);
1815#endif
1816	}
1817	seq_printf(m,
1818		   "\n  node_unreclaimable:  %u"
1819		   "\n  start_pfn:           %lu",
1820		   pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES,
1821		   zone->zone_start_pfn);
1822	seq_putc(m, '\n');
1823}
1824
1825/*
1826 * Output information about zones in @pgdat.  All zones are printed regardless
1827 * of whether they are populated or not: lowmem_reserve_ratio operates on the
1828 * set of all zones and userspace would not be aware of such zones if they are
1829 * suppressed here (zoneinfo displays the effect of lowmem_reserve_ratio).
1830 */
1831static int zoneinfo_show(struct seq_file *m, void *arg)
1832{
1833	pg_data_t *pgdat = (pg_data_t *)arg;
1834	walk_zones_in_node(m, pgdat, false, false, zoneinfo_show_print);
1835	return 0;
1836}
1837
1838static const struct seq_operations zoneinfo_op = {
1839	.start	= frag_start, /* iterate over all zones. The same as in
1840			       * fragmentation. */
1841	.next	= frag_next,
1842	.stop	= frag_stop,
1843	.show	= zoneinfo_show,
1844};
1845
1846#define NR_VMSTAT_ITEMS (NR_VM_ZONE_STAT_ITEMS + \
1847			 NR_VM_NUMA_EVENT_ITEMS + \
1848			 NR_VM_NODE_STAT_ITEMS + \
1849			 NR_VM_STAT_ITEMS + \
1850			 (IS_ENABLED(CONFIG_VM_EVENT_COUNTERS) ? \
1851			  NR_VM_EVENT_ITEMS : 0))
1852
1853static void *vmstat_start(struct seq_file *m, loff_t *pos)
1854{
1855	unsigned long *v;
1856	int i;
1857
1858	if (*pos >= NR_VMSTAT_ITEMS)
1859		return NULL;
1860
1861	BUILD_BUG_ON(ARRAY_SIZE(vmstat_text) < NR_VMSTAT_ITEMS);
1862	fold_vm_numa_events();
1863	v = kmalloc_array(NR_VMSTAT_ITEMS, sizeof(unsigned long), GFP_KERNEL);
1864	m->private = v;
1865	if (!v)
1866		return ERR_PTR(-ENOMEM);
1867	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1868		v[i] = global_zone_page_state(i);
1869	v += NR_VM_ZONE_STAT_ITEMS;
1870
1871#ifdef CONFIG_NUMA
1872	for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
1873		v[i] = global_numa_event_state(i);
1874	v += NR_VM_NUMA_EVENT_ITEMS;
1875#endif
1876
1877	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1878		v[i] = global_node_page_state_pages(i);
1879		if (vmstat_item_print_in_thp(i))
1880			v[i] /= HPAGE_PMD_NR;
1881	}
1882	v += NR_VM_NODE_STAT_ITEMS;
1883
1884	global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1885			    v + NR_DIRTY_THRESHOLD);
1886	v[NR_MEMMAP_PAGES] = atomic_long_read(&nr_memmap_pages);
1887	v[NR_MEMMAP_BOOT_PAGES] = atomic_long_read(&nr_memmap_boot_pages);
1888	v += NR_VM_STAT_ITEMS;
1889
1890#ifdef CONFIG_VM_EVENT_COUNTERS
1891	all_vm_events(v);
1892	v[PGPGIN] /= 2;		/* sectors -> kbytes */
1893	v[PGPGOUT] /= 2;
1894#endif
1895	return (unsigned long *)m->private + *pos;
1896}
1897
1898static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1899{
1900	(*pos)++;
1901	if (*pos >= NR_VMSTAT_ITEMS)
1902		return NULL;
1903	return (unsigned long *)m->private + *pos;
1904}
1905
1906static int vmstat_show(struct seq_file *m, void *arg)
1907{
1908	unsigned long *l = arg;
1909	unsigned long off = l - (unsigned long *)m->private;
1910
1911	seq_puts(m, vmstat_text[off]);
1912	seq_put_decimal_ull(m, " ", *l);
1913	seq_putc(m, '\n');
1914
1915	if (off == NR_VMSTAT_ITEMS - 1) {
1916		/*
1917		 * We've come to the end - add any deprecated counters to avoid
1918		 * breaking userspace which might depend on them being present.
1919		 */
1920		seq_puts(m, "nr_unstable 0\n");
1921	}
1922	return 0;
1923}
1924
1925static void vmstat_stop(struct seq_file *m, void *arg)
1926{
1927	kfree(m->private);
1928	m->private = NULL;
1929}
1930
1931static const struct seq_operations vmstat_op = {
1932	.start	= vmstat_start,
1933	.next	= vmstat_next,
1934	.stop	= vmstat_stop,
1935	.show	= vmstat_show,
1936};
1937#endif /* CONFIG_PROC_FS */
1938
1939#ifdef CONFIG_SMP
1940static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1941int sysctl_stat_interval __read_mostly = HZ;
1942static int vmstat_late_init_done;
1943
1944#ifdef CONFIG_PROC_FS
1945static void refresh_vm_stats(struct work_struct *work)
1946{
1947	refresh_cpu_vm_stats(true);
1948}
1949
1950int vmstat_refresh(const struct ctl_table *table, int write,
1951		   void *buffer, size_t *lenp, loff_t *ppos)
1952{
1953	long val;
1954	int err;
1955	int i;
1956
1957	/*
1958	 * The regular update, every sysctl_stat_interval, may come later
1959	 * than expected: leaving a significant amount in per_cpu buckets.
1960	 * This is particularly misleading when checking a quantity of HUGE
1961	 * pages, immediately after running a test.  /proc/sys/vm/stat_refresh,
1962	 * which can equally be echo'ed to or cat'ted from (by root),
1963	 * can be used to update the stats just before reading them.
1964	 *
1965	 * Oh, and since global_zone_page_state() etc. are so careful to hide
1966	 * transiently negative values, report an error here if any of
1967	 * the stats is negative, so we know to go looking for imbalance.
1968	 */
1969	err = schedule_on_each_cpu(refresh_vm_stats);
1970	if (err)
1971		return err;
1972	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
1973		/*
1974		 * Skip checking stats known to go negative occasionally.
1975		 */
1976		switch (i) {
1977		case NR_ZONE_WRITE_PENDING:
1978		case NR_FREE_CMA_PAGES:
1979			continue;
1980		}
1981		val = atomic_long_read(&vm_zone_stat[i]);
1982		if (val < 0) {
1983			pr_warn("%s: %s %ld\n",
1984				__func__, zone_stat_name(i), val);
 
1985		}
1986	}
1987	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1988		/*
1989		 * Skip checking stats known to go negative occasionally.
1990		 */
1991		switch (i) {
1992		case NR_WRITEBACK:
1993			continue;
1994		}
1995		val = atomic_long_read(&vm_node_stat[i]);
1996		if (val < 0) {
1997			pr_warn("%s: %s %ld\n",
1998				__func__, node_stat_name(i), val);
 
1999		}
2000	}
 
 
 
2001	if (write)
2002		*ppos += *lenp;
2003	else
2004		*lenp = 0;
2005	return 0;
2006}
2007#endif /* CONFIG_PROC_FS */
2008
2009static void vmstat_update(struct work_struct *w)
2010{
2011	if (refresh_cpu_vm_stats(true)) {
2012		/*
2013		 * Counters were updated so we expect more updates
2014		 * to occur in the future. Keep on running the
2015		 * update worker thread.
2016		 */
2017		queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
2018				this_cpu_ptr(&vmstat_work),
2019				round_jiffies_relative(sysctl_stat_interval));
2020	}
2021}
2022
2023/*
 
 
 
 
 
2024 * Check if the diffs for a certain cpu indicate that
2025 * an update is needed.
2026 */
2027static bool need_update(int cpu)
2028{
2029	pg_data_t *last_pgdat = NULL;
2030	struct zone *zone;
2031
2032	for_each_populated_zone(zone) {
2033		struct per_cpu_zonestat *pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
2034		struct per_cpu_nodestat *n;
 
 
 
 
2035
2036		/*
2037		 * The fast way of checking if there are any vmstat diffs.
2038		 */
2039		if (memchr_inv(pzstats->vm_stat_diff, 0, sizeof(pzstats->vm_stat_diff)))
 
2040			return true;
2041
2042		if (last_pgdat == zone->zone_pgdat)
2043			continue;
2044		last_pgdat = zone->zone_pgdat;
2045		n = per_cpu_ptr(zone->zone_pgdat->per_cpu_nodestats, cpu);
2046		if (memchr_inv(n->vm_node_stat_diff, 0, sizeof(n->vm_node_stat_diff)))
2047			return true;
 
2048	}
2049	return false;
2050}
2051
2052/*
2053 * Switch off vmstat processing and then fold all the remaining differentials
2054 * until the diffs stay at zero. The function is used by NOHZ and can only be
2055 * invoked when tick processing is not active.
2056 */
2057void quiet_vmstat(void)
2058{
2059	if (system_state != SYSTEM_RUNNING)
2060		return;
2061
2062	if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
2063		return;
2064
2065	if (!need_update(smp_processor_id()))
2066		return;
2067
2068	/*
2069	 * Just refresh counters and do not care about the pending delayed
2070	 * vmstat_update. It doesn't fire that often to matter and canceling
2071	 * it would be too expensive from this path.
2072	 * vmstat_shepherd will take care about that for us.
2073	 */
2074	refresh_cpu_vm_stats(false);
2075}
2076
2077/*
2078 * Shepherd worker thread that checks the
2079 * differentials of processors that have their worker
2080 * threads for vm statistics updates disabled because of
2081 * inactivity.
2082 */
2083static void vmstat_shepherd(struct work_struct *w);
2084
2085static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
2086
2087static void vmstat_shepherd(struct work_struct *w)
2088{
2089	int cpu;
2090
2091	cpus_read_lock();
2092	/* Check processors whose vmstat worker threads have been disabled */
2093	for_each_online_cpu(cpu) {
2094		struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
2095
2096		/*
2097		 * In kernel users of vmstat counters either require the precise value and
2098		 * they are using zone_page_state_snapshot interface or they can live with
2099		 * an imprecision as the regular flushing can happen at arbitrary time and
2100		 * cumulative error can grow (see calculate_normal_threshold).
2101		 *
2102		 * From that POV the regular flushing can be postponed for CPUs that have
2103		 * been isolated from the kernel interference without critical
2104		 * infrastructure ever noticing. Skip regular flushing from vmstat_shepherd
2105		 * for all isolated CPUs to avoid interference with the isolated workload.
2106		 */
2107		if (cpu_is_isolated(cpu))
2108			continue;
2109
2110		if (!delayed_work_pending(dw) && need_update(cpu))
2111			queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
2112
2113		cond_resched();
2114	}
2115	cpus_read_unlock();
2116
2117	schedule_delayed_work(&shepherd,
2118		round_jiffies_relative(sysctl_stat_interval));
2119}
2120
2121static void __init start_shepherd_timer(void)
2122{
2123	int cpu;
2124
2125	for_each_possible_cpu(cpu) {
2126		INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
2127			vmstat_update);
2128
2129		/*
2130		 * For secondary CPUs during CPU hotplug scenarios,
2131		 * vmstat_cpu_online() will enable the work.
2132		 * mm/vmstat:online enables and disables vmstat_work
2133		 * symmetrically during CPU hotplug events.
2134		 */
2135		if (!cpu_online(cpu))
2136			disable_delayed_work_sync(&per_cpu(vmstat_work, cpu));
2137	}
2138
2139	schedule_delayed_work(&shepherd,
2140		round_jiffies_relative(sysctl_stat_interval));
2141}
2142
2143static void __init init_cpu_node_state(void)
2144{
2145	int node;
2146
2147	for_each_online_node(node) {
2148		if (!cpumask_empty(cpumask_of_node(node)))
2149			node_set_state(node, N_CPU);
2150	}
2151}
2152
2153static int vmstat_cpu_online(unsigned int cpu)
2154{
2155	if (vmstat_late_init_done)
2156		refresh_zone_stat_thresholds();
2157
2158	if (!node_state(cpu_to_node(cpu), N_CPU)) {
2159		node_set_state(cpu_to_node(cpu), N_CPU);
2160	}
2161	enable_delayed_work(&per_cpu(vmstat_work, cpu));
2162
2163	return 0;
2164}
2165
2166static int vmstat_cpu_down_prep(unsigned int cpu)
2167{
2168	disable_delayed_work_sync(&per_cpu(vmstat_work, cpu));
2169	return 0;
2170}
2171
2172static int vmstat_cpu_dead(unsigned int cpu)
2173{
2174	const struct cpumask *node_cpus;
2175	int node;
2176
2177	node = cpu_to_node(cpu);
2178
2179	refresh_zone_stat_thresholds();
2180	node_cpus = cpumask_of_node(node);
2181	if (!cpumask_empty(node_cpus))
2182		return 0;
2183
2184	node_clear_state(node, N_CPU);
2185
2186	return 0;
2187}
2188
2189static int __init vmstat_late_init(void)
2190{
2191	refresh_zone_stat_thresholds();
2192	vmstat_late_init_done = 1;
2193
2194	return 0;
2195}
2196late_initcall(vmstat_late_init);
2197#endif
2198
2199struct workqueue_struct *mm_percpu_wq;
2200
2201void __init init_mm_internals(void)
2202{
2203	int ret __maybe_unused;
2204
2205	mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0);
2206
2207#ifdef CONFIG_SMP
2208	ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
2209					NULL, vmstat_cpu_dead);
2210	if (ret < 0)
2211		pr_err("vmstat: failed to register 'dead' hotplug state\n");
2212
2213	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online",
2214					vmstat_cpu_online,
2215					vmstat_cpu_down_prep);
2216	if (ret < 0)
2217		pr_err("vmstat: failed to register 'online' hotplug state\n");
2218
2219	cpus_read_lock();
2220	init_cpu_node_state();
2221	cpus_read_unlock();
2222
2223	start_shepherd_timer();
2224#endif
2225#ifdef CONFIG_PROC_FS
2226	proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op);
2227	proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op);
2228	proc_create_seq("vmstat", 0444, NULL, &vmstat_op);
2229	proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op);
2230#endif
2231}
2232
2233#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
2234
2235/*
2236 * Return an index indicating how much of the available free memory is
2237 * unusable for an allocation of the requested size.
2238 */
2239static int unusable_free_index(unsigned int order,
2240				struct contig_page_info *info)
2241{
2242	/* No free memory is interpreted as all free memory is unusable */
2243	if (info->free_pages == 0)
2244		return 1000;
2245
2246	/*
2247	 * Index should be a value between 0 and 1. Return a value to 3
2248	 * decimal places.
2249	 *
2250	 * 0 => no fragmentation
2251	 * 1 => high fragmentation
2252	 */
2253	return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
2254
2255}
2256
2257static void unusable_show_print(struct seq_file *m,
2258					pg_data_t *pgdat, struct zone *zone)
2259{
2260	unsigned int order;
2261	int index;
2262	struct contig_page_info info;
2263
2264	seq_printf(m, "Node %d, zone %8s ",
2265				pgdat->node_id,
2266				zone->name);
2267	for (order = 0; order < NR_PAGE_ORDERS; ++order) {
2268		fill_contig_page_info(zone, order, &info);
2269		index = unusable_free_index(order, &info);
2270		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2271	}
2272
2273	seq_putc(m, '\n');
2274}
2275
2276/*
2277 * Display unusable free space index
2278 *
2279 * The unusable free space index measures how much of the available free
2280 * memory cannot be used to satisfy an allocation of a given size and is a
2281 * value between 0 and 1. The higher the value, the more of free memory is
2282 * unusable and by implication, the worse the external fragmentation is. This
2283 * can be expressed as a percentage by multiplying by 100.
2284 */
2285static int unusable_show(struct seq_file *m, void *arg)
2286{
2287	pg_data_t *pgdat = (pg_data_t *)arg;
2288
2289	/* check memoryless node */
2290	if (!node_state(pgdat->node_id, N_MEMORY))
2291		return 0;
2292
2293	walk_zones_in_node(m, pgdat, true, false, unusable_show_print);
2294
2295	return 0;
2296}
2297
2298static const struct seq_operations unusable_sops = {
2299	.start	= frag_start,
2300	.next	= frag_next,
2301	.stop	= frag_stop,
2302	.show	= unusable_show,
2303};
2304
2305DEFINE_SEQ_ATTRIBUTE(unusable);
2306
2307static void extfrag_show_print(struct seq_file *m,
2308					pg_data_t *pgdat, struct zone *zone)
2309{
2310	unsigned int order;
2311	int index;
2312
2313	/* Alloc on stack as interrupts are disabled for zone walk */
2314	struct contig_page_info info;
2315
2316	seq_printf(m, "Node %d, zone %8s ",
2317				pgdat->node_id,
2318				zone->name);
2319	for (order = 0; order < NR_PAGE_ORDERS; ++order) {
2320		fill_contig_page_info(zone, order, &info);
2321		index = __fragmentation_index(order, &info);
2322		seq_printf(m, "%2d.%03d ", index / 1000, index % 1000);
2323	}
2324
2325	seq_putc(m, '\n');
2326}
2327
2328/*
2329 * Display fragmentation index for orders that allocations would fail for
2330 */
2331static int extfrag_show(struct seq_file *m, void *arg)
2332{
2333	pg_data_t *pgdat = (pg_data_t *)arg;
2334
2335	walk_zones_in_node(m, pgdat, true, false, extfrag_show_print);
2336
2337	return 0;
2338}
2339
2340static const struct seq_operations extfrag_sops = {
2341	.start	= frag_start,
2342	.next	= frag_next,
2343	.stop	= frag_stop,
2344	.show	= extfrag_show,
2345};
2346
2347DEFINE_SEQ_ATTRIBUTE(extfrag);
2348
2349static int __init extfrag_debug_init(void)
2350{
2351	struct dentry *extfrag_debug_root;
2352
2353	extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
2354
2355	debugfs_create_file("unusable_index", 0444, extfrag_debug_root, NULL,
2356			    &unusable_fops);
2357
2358	debugfs_create_file("extfrag_index", 0444, extfrag_debug_root, NULL,
2359			    &extfrag_fops);
2360
2361	return 0;
2362}
2363
2364module_init(extfrag_debug_init);
2365
2366#endif