Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
v4.17
 
   1/*
   2 *  linux/mm/vmstat.c
   3 *
   4 *  Manages VM statistics
   5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   6 *
   7 *  zoned VM statistics
   8 *  Copyright (C) 2006 Silicon Graphics, Inc.,
   9 *		Christoph Lameter <christoph@lameter.com>
  10 *  Copyright (C) 2008-2014 Christoph Lameter
  11 */
  12#include <linux/fs.h>
  13#include <linux/mm.h>
  14#include <linux/err.h>
  15#include <linux/module.h>
  16#include <linux/slab.h>
  17#include <linux/cpu.h>
  18#include <linux/cpumask.h>
  19#include <linux/vmstat.h>
  20#include <linux/proc_fs.h>
  21#include <linux/seq_file.h>
  22#include <linux/debugfs.h>
  23#include <linux/sched.h>
  24#include <linux/math64.h>
  25#include <linux/writeback.h>
  26#include <linux/compaction.h>
  27#include <linux/mm_inline.h>
  28#include <linux/page_ext.h>
  29#include <linux/page_owner.h>
  30
  31#include "internal.h"
  32
  33#define NUMA_STATS_THRESHOLD (U16_MAX - 2)
  34
  35#ifdef CONFIG_NUMA
  36int sysctl_vm_numa_stat = ENABLE_NUMA_STAT;
  37
  38/* zero numa counters within a zone */
  39static void zero_zone_numa_counters(struct zone *zone)
  40{
  41	int item, cpu;
  42
  43	for (item = 0; item < NR_VM_NUMA_STAT_ITEMS; item++) {
  44		atomic_long_set(&zone->vm_numa_stat[item], 0);
  45		for_each_online_cpu(cpu)
  46			per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item]
  47						= 0;
  48	}
  49}
  50
  51/* zero numa counters of all the populated zones */
  52static void zero_zones_numa_counters(void)
  53{
  54	struct zone *zone;
  55
  56	for_each_populated_zone(zone)
  57		zero_zone_numa_counters(zone);
  58}
  59
  60/* zero global numa counters */
  61static void zero_global_numa_counters(void)
  62{
  63	int item;
  64
  65	for (item = 0; item < NR_VM_NUMA_STAT_ITEMS; item++)
  66		atomic_long_set(&vm_numa_stat[item], 0);
  67}
  68
  69static void invalid_numa_statistics(void)
  70{
  71	zero_zones_numa_counters();
  72	zero_global_numa_counters();
  73}
  74
  75static DEFINE_MUTEX(vm_numa_stat_lock);
  76
  77int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
  78		void __user *buffer, size_t *length, loff_t *ppos)
  79{
  80	int ret, oldval;
  81
  82	mutex_lock(&vm_numa_stat_lock);
  83	if (write)
  84		oldval = sysctl_vm_numa_stat;
  85	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
  86	if (ret || !write)
  87		goto out;
  88
  89	if (oldval == sysctl_vm_numa_stat)
  90		goto out;
  91	else if (sysctl_vm_numa_stat == ENABLE_NUMA_STAT) {
  92		static_branch_enable(&vm_numa_stat_key);
  93		pr_info("enable numa statistics\n");
  94	} else {
  95		static_branch_disable(&vm_numa_stat_key);
  96		invalid_numa_statistics();
  97		pr_info("disable numa statistics, and clear numa counters\n");
  98	}
  99
 100out:
 101	mutex_unlock(&vm_numa_stat_lock);
 102	return ret;
 103}
 104#endif
 105
 106#ifdef CONFIG_VM_EVENT_COUNTERS
 107DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
 108EXPORT_PER_CPU_SYMBOL(vm_event_states);
 109
 110static void sum_vm_events(unsigned long *ret)
 111{
 112	int cpu;
 113	int i;
 114
 115	memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
 116
 117	for_each_online_cpu(cpu) {
 118		struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
 119
 120		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
 121			ret[i] += this->event[i];
 122	}
 123}
 124
 125/*
 126 * Accumulate the vm event counters across all CPUs.
 127 * The result is unavoidably approximate - it can change
 128 * during and after execution of this function.
 129*/
 130void all_vm_events(unsigned long *ret)
 131{
 132	get_online_cpus();
 133	sum_vm_events(ret);
 134	put_online_cpus();
 135}
 136EXPORT_SYMBOL_GPL(all_vm_events);
 137
 138/*
 139 * Fold the foreign cpu events into our own.
 140 *
 141 * This is adding to the events on one processor
 142 * but keeps the global counts constant.
 143 */
 144void vm_events_fold_cpu(int cpu)
 145{
 146	struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
 147	int i;
 148
 149	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
 150		count_vm_events(i, fold_state->event[i]);
 151		fold_state->event[i] = 0;
 152	}
 153}
 154
 155#endif /* CONFIG_VM_EVENT_COUNTERS */
 156
 157/*
 158 * Manage combined zone based / global counters
 159 *
 160 * vm_stat contains the global counters
 161 */
 162atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
 163atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS] __cacheline_aligned_in_smp;
 164atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp;
 165EXPORT_SYMBOL(vm_zone_stat);
 166EXPORT_SYMBOL(vm_numa_stat);
 167EXPORT_SYMBOL(vm_node_stat);
 168
 169#ifdef CONFIG_SMP
 170
 171int calculate_pressure_threshold(struct zone *zone)
 172{
 173	int threshold;
 174	int watermark_distance;
 175
 176	/*
 177	 * As vmstats are not up to date, there is drift between the estimated
 178	 * and real values. For high thresholds and a high number of CPUs, it
 179	 * is possible for the min watermark to be breached while the estimated
 180	 * value looks fine. The pressure threshold is a reduced value such
 181	 * that even the maximum amount of drift will not accidentally breach
 182	 * the min watermark
 183	 */
 184	watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
 185	threshold = max(1, (int)(watermark_distance / num_online_cpus()));
 186
 187	/*
 188	 * Maximum threshold is 125
 189	 */
 190	threshold = min(125, threshold);
 191
 192	return threshold;
 193}
 194
 195int calculate_normal_threshold(struct zone *zone)
 196{
 197	int threshold;
 198	int mem;	/* memory in 128 MB units */
 199
 200	/*
 201	 * The threshold scales with the number of processors and the amount
 202	 * of memory per zone. More memory means that we can defer updates for
 203	 * longer, more processors could lead to more contention.
 204 	 * fls() is used to have a cheap way of logarithmic scaling.
 205	 *
 206	 * Some sample thresholds:
 207	 *
 208	 * Threshold	Processors	(fls)	Zonesize	fls(mem+1)
 209	 * ------------------------------------------------------------------
 210	 * 8		1		1	0.9-1 GB	4
 211	 * 16		2		2	0.9-1 GB	4
 212	 * 20 		2		2	1-2 GB		5
 213	 * 24		2		2	2-4 GB		6
 214	 * 28		2		2	4-8 GB		7
 215	 * 32		2		2	8-16 GB		8
 216	 * 4		2		2	<128M		1
 217	 * 30		4		3	2-4 GB		5
 218	 * 48		4		3	8-16 GB		8
 219	 * 32		8		4	1-2 GB		4
 220	 * 32		8		4	0.9-1GB		4
 221	 * 10		16		5	<128M		1
 222	 * 40		16		5	900M		4
 223	 * 70		64		7	2-4 GB		5
 224	 * 84		64		7	4-8 GB		6
 225	 * 108		512		9	4-8 GB		6
 226	 * 125		1024		10	8-16 GB		8
 227	 * 125		1024		10	16-32 GB	9
 228	 */
 229
 230	mem = zone->managed_pages >> (27 - PAGE_SHIFT);
 231
 232	threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
 233
 234	/*
 235	 * Maximum threshold is 125
 236	 */
 237	threshold = min(125, threshold);
 238
 239	return threshold;
 240}
 241
 242/*
 243 * Refresh the thresholds for each zone.
 244 */
 245void refresh_zone_stat_thresholds(void)
 246{
 247	struct pglist_data *pgdat;
 248	struct zone *zone;
 249	int cpu;
 250	int threshold;
 251
 252	/* Zero current pgdat thresholds */
 253	for_each_online_pgdat(pgdat) {
 254		for_each_online_cpu(cpu) {
 255			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0;
 256		}
 257	}
 258
 259	for_each_populated_zone(zone) {
 260		struct pglist_data *pgdat = zone->zone_pgdat;
 261		unsigned long max_drift, tolerate_drift;
 262
 263		threshold = calculate_normal_threshold(zone);
 264
 265		for_each_online_cpu(cpu) {
 266			int pgdat_threshold;
 267
 268			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
 269							= threshold;
 270
 271			/* Base nodestat threshold on the largest populated zone. */
 272			pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold;
 273			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold
 274				= max(threshold, pgdat_threshold);
 275		}
 276
 277		/*
 278		 * Only set percpu_drift_mark if there is a danger that
 279		 * NR_FREE_PAGES reports the low watermark is ok when in fact
 280		 * the min watermark could be breached by an allocation
 281		 */
 282		tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
 283		max_drift = num_online_cpus() * threshold;
 284		if (max_drift > tolerate_drift)
 285			zone->percpu_drift_mark = high_wmark_pages(zone) +
 286					max_drift;
 287	}
 288}
 289
 290void set_pgdat_percpu_threshold(pg_data_t *pgdat,
 291				int (*calculate_pressure)(struct zone *))
 292{
 293	struct zone *zone;
 294	int cpu;
 295	int threshold;
 296	int i;
 297
 298	for (i = 0; i < pgdat->nr_zones; i++) {
 299		zone = &pgdat->node_zones[i];
 300		if (!zone->percpu_drift_mark)
 301			continue;
 302
 303		threshold = (*calculate_pressure)(zone);
 304		for_each_online_cpu(cpu)
 305			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
 306							= threshold;
 307	}
 308}
 309
 310/*
 311 * For use when we know that interrupts are disabled,
 312 * or when we know that preemption is disabled and that
 313 * particular counter cannot be updated from interrupt context.
 314 */
 315void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 316			   long delta)
 317{
 318	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 319	s8 __percpu *p = pcp->vm_stat_diff + item;
 320	long x;
 321	long t;
 322
 323	x = delta + __this_cpu_read(*p);
 324
 325	t = __this_cpu_read(pcp->stat_threshold);
 326
 327	if (unlikely(x > t || x < -t)) {
 328		zone_page_state_add(x, zone, item);
 329		x = 0;
 330	}
 331	__this_cpu_write(*p, x);
 332}
 333EXPORT_SYMBOL(__mod_zone_page_state);
 334
 335void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
 336				long delta)
 337{
 338	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
 339	s8 __percpu *p = pcp->vm_node_stat_diff + item;
 340	long x;
 341	long t;
 342
 343	x = delta + __this_cpu_read(*p);
 344
 345	t = __this_cpu_read(pcp->stat_threshold);
 346
 347	if (unlikely(x > t || x < -t)) {
 348		node_page_state_add(x, pgdat, item);
 349		x = 0;
 350	}
 351	__this_cpu_write(*p, x);
 352}
 353EXPORT_SYMBOL(__mod_node_page_state);
 354
 355/*
 356 * Optimized increment and decrement functions.
 357 *
 358 * These are only for a single page and therefore can take a struct page *
 359 * argument instead of struct zone *. This allows the inclusion of the code
 360 * generated for page_zone(page) into the optimized functions.
 361 *
 362 * No overflow check is necessary and therefore the differential can be
 363 * incremented or decremented in place which may allow the compilers to
 364 * generate better code.
 365 * The increment or decrement is known and therefore one boundary check can
 366 * be omitted.
 367 *
 368 * NOTE: These functions are very performance sensitive. Change only
 369 * with care.
 370 *
 371 * Some processors have inc/dec instructions that are atomic vs an interrupt.
 372 * However, the code must first determine the differential location in a zone
 373 * based on the processor number and then inc/dec the counter. There is no
 374 * guarantee without disabling preemption that the processor will not change
 375 * in between and therefore the atomicity vs. interrupt cannot be exploited
 376 * in a useful way here.
 377 */
 378void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
 379{
 380	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 381	s8 __percpu *p = pcp->vm_stat_diff + item;
 382	s8 v, t;
 383
 384	v = __this_cpu_inc_return(*p);
 385	t = __this_cpu_read(pcp->stat_threshold);
 386	if (unlikely(v > t)) {
 387		s8 overstep = t >> 1;
 388
 389		zone_page_state_add(v + overstep, zone, item);
 390		__this_cpu_write(*p, -overstep);
 391	}
 392}
 393
 394void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 395{
 396	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
 397	s8 __percpu *p = pcp->vm_node_stat_diff + item;
 398	s8 v, t;
 399
 400	v = __this_cpu_inc_return(*p);
 401	t = __this_cpu_read(pcp->stat_threshold);
 402	if (unlikely(v > t)) {
 403		s8 overstep = t >> 1;
 404
 405		node_page_state_add(v + overstep, pgdat, item);
 406		__this_cpu_write(*p, -overstep);
 407	}
 408}
 409
 410void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
 411{
 412	__inc_zone_state(page_zone(page), item);
 413}
 414EXPORT_SYMBOL(__inc_zone_page_state);
 415
 416void __inc_node_page_state(struct page *page, enum node_stat_item item)
 417{
 418	__inc_node_state(page_pgdat(page), item);
 419}
 420EXPORT_SYMBOL(__inc_node_page_state);
 421
 422void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
 423{
 424	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 425	s8 __percpu *p = pcp->vm_stat_diff + item;
 426	s8 v, t;
 427
 428	v = __this_cpu_dec_return(*p);
 429	t = __this_cpu_read(pcp->stat_threshold);
 430	if (unlikely(v < - t)) {
 431		s8 overstep = t >> 1;
 432
 433		zone_page_state_add(v - overstep, zone, item);
 434		__this_cpu_write(*p, overstep);
 435	}
 436}
 437
 438void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 439{
 440	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
 441	s8 __percpu *p = pcp->vm_node_stat_diff + item;
 442	s8 v, t;
 443
 444	v = __this_cpu_dec_return(*p);
 445	t = __this_cpu_read(pcp->stat_threshold);
 446	if (unlikely(v < - t)) {
 447		s8 overstep = t >> 1;
 448
 449		node_page_state_add(v - overstep, pgdat, item);
 450		__this_cpu_write(*p, overstep);
 451	}
 452}
 453
 454void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
 455{
 456	__dec_zone_state(page_zone(page), item);
 457}
 458EXPORT_SYMBOL(__dec_zone_page_state);
 459
 460void __dec_node_page_state(struct page *page, enum node_stat_item item)
 461{
 462	__dec_node_state(page_pgdat(page), item);
 463}
 464EXPORT_SYMBOL(__dec_node_page_state);
 465
 466#ifdef CONFIG_HAVE_CMPXCHG_LOCAL
 467/*
 468 * If we have cmpxchg_local support then we do not need to incur the overhead
 469 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
 470 *
 471 * mod_state() modifies the zone counter state through atomic per cpu
 472 * operations.
 473 *
 474 * Overstep mode specifies how overstep should handled:
 475 *     0       No overstepping
 476 *     1       Overstepping half of threshold
 477 *     -1      Overstepping minus half of threshold
 478*/
 479static inline void mod_zone_state(struct zone *zone,
 480       enum zone_stat_item item, long delta, int overstep_mode)
 481{
 482	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 483	s8 __percpu *p = pcp->vm_stat_diff + item;
 484	long o, n, t, z;
 485
 486	do {
 487		z = 0;  /* overflow to zone counters */
 488
 489		/*
 490		 * The fetching of the stat_threshold is racy. We may apply
 491		 * a counter threshold to the wrong the cpu if we get
 492		 * rescheduled while executing here. However, the next
 493		 * counter update will apply the threshold again and
 494		 * therefore bring the counter under the threshold again.
 495		 *
 496		 * Most of the time the thresholds are the same anyways
 497		 * for all cpus in a zone.
 498		 */
 499		t = this_cpu_read(pcp->stat_threshold);
 500
 501		o = this_cpu_read(*p);
 502		n = delta + o;
 503
 504		if (n > t || n < -t) {
 505			int os = overstep_mode * (t >> 1) ;
 506
 507			/* Overflow must be added to zone counters */
 508			z = n + os;
 509			n = -os;
 510		}
 511	} while (this_cpu_cmpxchg(*p, o, n) != o);
 512
 513	if (z)
 514		zone_page_state_add(z, zone, item);
 515}
 516
 517void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 518			 long delta)
 519{
 520	mod_zone_state(zone, item, delta, 0);
 521}
 522EXPORT_SYMBOL(mod_zone_page_state);
 523
 524void inc_zone_page_state(struct page *page, enum zone_stat_item item)
 525{
 526	mod_zone_state(page_zone(page), item, 1, 1);
 527}
 528EXPORT_SYMBOL(inc_zone_page_state);
 529
 530void dec_zone_page_state(struct page *page, enum zone_stat_item item)
 531{
 532	mod_zone_state(page_zone(page), item, -1, -1);
 533}
 534EXPORT_SYMBOL(dec_zone_page_state);
 535
 536static inline void mod_node_state(struct pglist_data *pgdat,
 537       enum node_stat_item item, int delta, int overstep_mode)
 538{
 539	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
 540	s8 __percpu *p = pcp->vm_node_stat_diff + item;
 541	long o, n, t, z;
 542
 543	do {
 544		z = 0;  /* overflow to node counters */
 545
 546		/*
 547		 * The fetching of the stat_threshold is racy. We may apply
 548		 * a counter threshold to the wrong the cpu if we get
 549		 * rescheduled while executing here. However, the next
 550		 * counter update will apply the threshold again and
 551		 * therefore bring the counter under the threshold again.
 552		 *
 553		 * Most of the time the thresholds are the same anyways
 554		 * for all cpus in a node.
 555		 */
 556		t = this_cpu_read(pcp->stat_threshold);
 557
 558		o = this_cpu_read(*p);
 559		n = delta + o;
 560
 561		if (n > t || n < -t) {
 562			int os = overstep_mode * (t >> 1) ;
 563
 564			/* Overflow must be added to node counters */
 565			z = n + os;
 566			n = -os;
 567		}
 568	} while (this_cpu_cmpxchg(*p, o, n) != o);
 569
 570	if (z)
 571		node_page_state_add(z, pgdat, item);
 572}
 573
 574void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
 575					long delta)
 576{
 577	mod_node_state(pgdat, item, delta, 0);
 578}
 579EXPORT_SYMBOL(mod_node_page_state);
 580
 581void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 582{
 583	mod_node_state(pgdat, item, 1, 1);
 584}
 585
 586void inc_node_page_state(struct page *page, enum node_stat_item item)
 587{
 588	mod_node_state(page_pgdat(page), item, 1, 1);
 589}
 590EXPORT_SYMBOL(inc_node_page_state);
 591
 592void dec_node_page_state(struct page *page, enum node_stat_item item)
 593{
 594	mod_node_state(page_pgdat(page), item, -1, -1);
 595}
 596EXPORT_SYMBOL(dec_node_page_state);
 597#else
 598/*
 599 * Use interrupt disable to serialize counter updates
 600 */
 601void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 602			 long delta)
 603{
 604	unsigned long flags;
 605
 606	local_irq_save(flags);
 607	__mod_zone_page_state(zone, item, delta);
 608	local_irq_restore(flags);
 609}
 610EXPORT_SYMBOL(mod_zone_page_state);
 611
 612void inc_zone_page_state(struct page *page, enum zone_stat_item item)
 613{
 614	unsigned long flags;
 615	struct zone *zone;
 616
 617	zone = page_zone(page);
 618	local_irq_save(flags);
 619	__inc_zone_state(zone, item);
 620	local_irq_restore(flags);
 621}
 622EXPORT_SYMBOL(inc_zone_page_state);
 623
 624void dec_zone_page_state(struct page *page, enum zone_stat_item item)
 625{
 626	unsigned long flags;
 627
 628	local_irq_save(flags);
 629	__dec_zone_page_state(page, item);
 630	local_irq_restore(flags);
 631}
 632EXPORT_SYMBOL(dec_zone_page_state);
 633
 634void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 635{
 636	unsigned long flags;
 637
 638	local_irq_save(flags);
 639	__inc_node_state(pgdat, item);
 640	local_irq_restore(flags);
 641}
 642EXPORT_SYMBOL(inc_node_state);
 643
 644void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
 645					long delta)
 646{
 647	unsigned long flags;
 648
 649	local_irq_save(flags);
 650	__mod_node_page_state(pgdat, item, delta);
 651	local_irq_restore(flags);
 652}
 653EXPORT_SYMBOL(mod_node_page_state);
 654
 655void inc_node_page_state(struct page *page, enum node_stat_item item)
 656{
 657	unsigned long flags;
 658	struct pglist_data *pgdat;
 659
 660	pgdat = page_pgdat(page);
 661	local_irq_save(flags);
 662	__inc_node_state(pgdat, item);
 663	local_irq_restore(flags);
 664}
 665EXPORT_SYMBOL(inc_node_page_state);
 666
 667void dec_node_page_state(struct page *page, enum node_stat_item item)
 668{
 669	unsigned long flags;
 670
 671	local_irq_save(flags);
 672	__dec_node_page_state(page, item);
 673	local_irq_restore(flags);
 674}
 675EXPORT_SYMBOL(dec_node_page_state);
 676#endif
 677
 678/*
 679 * Fold a differential into the global counters.
 680 * Returns the number of counters updated.
 681 */
 682#ifdef CONFIG_NUMA
 683static int fold_diff(int *zone_diff, int *numa_diff, int *node_diff)
 684{
 685	int i;
 686	int changes = 0;
 687
 688	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 689		if (zone_diff[i]) {
 690			atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
 691			changes++;
 692	}
 693
 694	for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
 695		if (numa_diff[i]) {
 696			atomic_long_add(numa_diff[i], &vm_numa_stat[i]);
 697			changes++;
 698	}
 699
 700	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
 701		if (node_diff[i]) {
 702			atomic_long_add(node_diff[i], &vm_node_stat[i]);
 703			changes++;
 704	}
 705	return changes;
 706}
 707#else
 708static int fold_diff(int *zone_diff, int *node_diff)
 709{
 710	int i;
 711	int changes = 0;
 712
 713	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 714		if (zone_diff[i]) {
 715			atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
 716			changes++;
 717	}
 718
 719	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
 720		if (node_diff[i]) {
 721			atomic_long_add(node_diff[i], &vm_node_stat[i]);
 722			changes++;
 723	}
 724	return changes;
 725}
 726#endif /* CONFIG_NUMA */
 727
 728/*
 729 * Update the zone counters for the current cpu.
 730 *
 731 * Note that refresh_cpu_vm_stats strives to only access
 732 * node local memory. The per cpu pagesets on remote zones are placed
 733 * in the memory local to the processor using that pageset. So the
 734 * loop over all zones will access a series of cachelines local to
 735 * the processor.
 736 *
 737 * The call to zone_page_state_add updates the cachelines with the
 738 * statistics in the remote zone struct as well as the global cachelines
 739 * with the global counters. These could cause remote node cache line
 740 * bouncing and will have to be only done when necessary.
 741 *
 742 * The function returns the number of global counters updated.
 743 */
 744static int refresh_cpu_vm_stats(bool do_pagesets)
 745{
 746	struct pglist_data *pgdat;
 747	struct zone *zone;
 748	int i;
 749	int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
 750#ifdef CONFIG_NUMA
 751	int global_numa_diff[NR_VM_NUMA_STAT_ITEMS] = { 0, };
 752#endif
 753	int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
 754	int changes = 0;
 755
 756	for_each_populated_zone(zone) {
 757		struct per_cpu_pageset __percpu *p = zone->pageset;
 758
 759		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
 760			int v;
 761
 762			v = this_cpu_xchg(p->vm_stat_diff[i], 0);
 763			if (v) {
 764
 765				atomic_long_add(v, &zone->vm_stat[i]);
 766				global_zone_diff[i] += v;
 767#ifdef CONFIG_NUMA
 768				/* 3 seconds idle till flush */
 769				__this_cpu_write(p->expire, 3);
 770#endif
 771			}
 772		}
 773#ifdef CONFIG_NUMA
 774		for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) {
 775			int v;
 776
 777			v = this_cpu_xchg(p->vm_numa_stat_diff[i], 0);
 778			if (v) {
 779
 780				atomic_long_add(v, &zone->vm_numa_stat[i]);
 781				global_numa_diff[i] += v;
 782				__this_cpu_write(p->expire, 3);
 783			}
 784		}
 785
 786		if (do_pagesets) {
 787			cond_resched();
 788			/*
 789			 * Deal with draining the remote pageset of this
 790			 * processor
 791			 *
 792			 * Check if there are pages remaining in this pageset
 793			 * if not then there is nothing to expire.
 794			 */
 795			if (!__this_cpu_read(p->expire) ||
 796			       !__this_cpu_read(p->pcp.count))
 797				continue;
 798
 799			/*
 800			 * We never drain zones local to this processor.
 801			 */
 802			if (zone_to_nid(zone) == numa_node_id()) {
 803				__this_cpu_write(p->expire, 0);
 804				continue;
 805			}
 806
 807			if (__this_cpu_dec_return(p->expire))
 808				continue;
 809
 810			if (__this_cpu_read(p->pcp.count)) {
 811				drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
 812				changes++;
 813			}
 814		}
 815#endif
 816	}
 817
 818	for_each_online_pgdat(pgdat) {
 819		struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;
 820
 821		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
 822			int v;
 823
 824			v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
 825			if (v) {
 826				atomic_long_add(v, &pgdat->vm_stat[i]);
 827				global_node_diff[i] += v;
 828			}
 829		}
 830	}
 831
 832#ifdef CONFIG_NUMA
 833	changes += fold_diff(global_zone_diff, global_numa_diff,
 834			     global_node_diff);
 835#else
 836	changes += fold_diff(global_zone_diff, global_node_diff);
 837#endif
 838	return changes;
 839}
 840
 841/*
 842 * Fold the data for an offline cpu into the global array.
 843 * There cannot be any access by the offline cpu and therefore
 844 * synchronization is simplified.
 845 */
 846void cpu_vm_stats_fold(int cpu)
 847{
 848	struct pglist_data *pgdat;
 849	struct zone *zone;
 850	int i;
 851	int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
 852#ifdef CONFIG_NUMA
 853	int global_numa_diff[NR_VM_NUMA_STAT_ITEMS] = { 0, };
 854#endif
 855	int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
 856
 857	for_each_populated_zone(zone) {
 858		struct per_cpu_pageset *p;
 859
 860		p = per_cpu_ptr(zone->pageset, cpu);
 861
 862		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 863			if (p->vm_stat_diff[i]) {
 864				int v;
 865
 866				v = p->vm_stat_diff[i];
 867				p->vm_stat_diff[i] = 0;
 868				atomic_long_add(v, &zone->vm_stat[i]);
 869				global_zone_diff[i] += v;
 870			}
 871
 872#ifdef CONFIG_NUMA
 873		for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
 874			if (p->vm_numa_stat_diff[i]) {
 875				int v;
 876
 877				v = p->vm_numa_stat_diff[i];
 878				p->vm_numa_stat_diff[i] = 0;
 879				atomic_long_add(v, &zone->vm_numa_stat[i]);
 880				global_numa_diff[i] += v;
 881			}
 882#endif
 883	}
 884
 885	for_each_online_pgdat(pgdat) {
 886		struct per_cpu_nodestat *p;
 887
 888		p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
 889
 890		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
 891			if (p->vm_node_stat_diff[i]) {
 892				int v;
 893
 894				v = p->vm_node_stat_diff[i];
 895				p->vm_node_stat_diff[i] = 0;
 896				atomic_long_add(v, &pgdat->vm_stat[i]);
 897				global_node_diff[i] += v;
 898			}
 899	}
 900
 901#ifdef CONFIG_NUMA
 902	fold_diff(global_zone_diff, global_numa_diff, global_node_diff);
 903#else
 904	fold_diff(global_zone_diff, global_node_diff);
 905#endif
 906}
 907
 908/*
 909 * this is only called if !populated_zone(zone), which implies no other users of
 910 * pset->vm_stat_diff[] exsist.
 911 */
 912void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
 913{
 914	int i;
 915
 916	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 917		if (pset->vm_stat_diff[i]) {
 918			int v = pset->vm_stat_diff[i];
 919			pset->vm_stat_diff[i] = 0;
 920			atomic_long_add(v, &zone->vm_stat[i]);
 921			atomic_long_add(v, &vm_zone_stat[i]);
 922		}
 923
 924#ifdef CONFIG_NUMA
 925	for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
 926		if (pset->vm_numa_stat_diff[i]) {
 927			int v = pset->vm_numa_stat_diff[i];
 928
 929			pset->vm_numa_stat_diff[i] = 0;
 930			atomic_long_add(v, &zone->vm_numa_stat[i]);
 931			atomic_long_add(v, &vm_numa_stat[i]);
 932		}
 933#endif
 934}
 935#endif
 936
 937#ifdef CONFIG_NUMA
 938void __inc_numa_state(struct zone *zone,
 939				 enum numa_stat_item item)
 940{
 941	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 942	u16 __percpu *p = pcp->vm_numa_stat_diff + item;
 943	u16 v;
 944
 945	v = __this_cpu_inc_return(*p);
 946
 947	if (unlikely(v > NUMA_STATS_THRESHOLD)) {
 948		zone_numa_state_add(v, zone, item);
 949		__this_cpu_write(*p, 0);
 950	}
 951}
 952
 953/*
 954 * Determine the per node value of a stat item. This function
 955 * is called frequently in a NUMA machine, so try to be as
 956 * frugal as possible.
 957 */
 958unsigned long sum_zone_node_page_state(int node,
 959				 enum zone_stat_item item)
 960{
 961	struct zone *zones = NODE_DATA(node)->node_zones;
 962	int i;
 963	unsigned long count = 0;
 964
 965	for (i = 0; i < MAX_NR_ZONES; i++)
 966		count += zone_page_state(zones + i, item);
 967
 968	return count;
 969}
 970
 971/*
 972 * Determine the per node value of a numa stat item. To avoid deviation,
 973 * the per cpu stat number in vm_numa_stat_diff[] is also included.
 974 */
 975unsigned long sum_zone_numa_state(int node,
 976				 enum numa_stat_item item)
 977{
 978	struct zone *zones = NODE_DATA(node)->node_zones;
 979	int i;
 980	unsigned long count = 0;
 981
 982	for (i = 0; i < MAX_NR_ZONES; i++)
 983		count += zone_numa_state_snapshot(zones + i, item);
 984
 985	return count;
 986}
 987
 988/*
 989 * Determine the per node value of a stat item.
 990 */
 991unsigned long node_page_state(struct pglist_data *pgdat,
 992				enum node_stat_item item)
 993{
 994	long x = atomic_long_read(&pgdat->vm_stat[item]);
 995#ifdef CONFIG_SMP
 996	if (x < 0)
 997		x = 0;
 998#endif
 999	return x;
1000}
1001#endif
1002
1003#ifdef CONFIG_COMPACTION
1004
1005struct contig_page_info {
1006	unsigned long free_pages;
1007	unsigned long free_blocks_total;
1008	unsigned long free_blocks_suitable;
1009};
1010
1011/*
1012 * Calculate the number of free pages in a zone, how many contiguous
1013 * pages are free and how many are large enough to satisfy an allocation of
1014 * the target size. Note that this function makes no attempt to estimate
1015 * how many suitable free blocks there *might* be if MOVABLE pages were
1016 * migrated. Calculating that is possible, but expensive and can be
1017 * figured out from userspace
1018 */
1019static void fill_contig_page_info(struct zone *zone,
1020				unsigned int suitable_order,
1021				struct contig_page_info *info)
1022{
1023	unsigned int order;
1024
1025	info->free_pages = 0;
1026	info->free_blocks_total = 0;
1027	info->free_blocks_suitable = 0;
1028
1029	for (order = 0; order < MAX_ORDER; order++) {
1030		unsigned long blocks;
1031
1032		/* Count number of free blocks */
1033		blocks = zone->free_area[order].nr_free;
1034		info->free_blocks_total += blocks;
1035
1036		/* Count free base pages */
1037		info->free_pages += blocks << order;
1038
1039		/* Count the suitable free blocks */
1040		if (order >= suitable_order)
1041			info->free_blocks_suitable += blocks <<
1042						(order - suitable_order);
1043	}
1044}
1045
1046/*
1047 * A fragmentation index only makes sense if an allocation of a requested
1048 * size would fail. If that is true, the fragmentation index indicates
1049 * whether external fragmentation or a lack of memory was the problem.
1050 * The value can be used to determine if page reclaim or compaction
1051 * should be used
1052 */
1053static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
1054{
1055	unsigned long requested = 1UL << order;
1056
1057	if (WARN_ON_ONCE(order >= MAX_ORDER))
1058		return 0;
1059
1060	if (!info->free_blocks_total)
1061		return 0;
1062
1063	/* Fragmentation index only makes sense when a request would fail */
1064	if (info->free_blocks_suitable)
1065		return -1000;
1066
1067	/*
1068	 * Index is between 0 and 1 so return within 3 decimal places
1069	 *
1070	 * 0 => allocation would fail due to lack of memory
1071	 * 1 => allocation would fail due to fragmentation
1072	 */
1073	return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
1074}
1075
1076/* Same as __fragmentation index but allocs contig_page_info on stack */
1077int fragmentation_index(struct zone *zone, unsigned int order)
1078{
1079	struct contig_page_info info;
1080
1081	fill_contig_page_info(zone, order, &info);
1082	return __fragmentation_index(order, &info);
1083}
1084#endif
1085
1086#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
1087#ifdef CONFIG_ZONE_DMA
1088#define TEXT_FOR_DMA(xx) xx "_dma",
1089#else
1090#define TEXT_FOR_DMA(xx)
1091#endif
1092
1093#ifdef CONFIG_ZONE_DMA32
1094#define TEXT_FOR_DMA32(xx) xx "_dma32",
1095#else
1096#define TEXT_FOR_DMA32(xx)
1097#endif
1098
1099#ifdef CONFIG_HIGHMEM
1100#define TEXT_FOR_HIGHMEM(xx) xx "_high",
1101#else
1102#define TEXT_FOR_HIGHMEM(xx)
1103#endif
1104
1105#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
1106					TEXT_FOR_HIGHMEM(xx) xx "_movable",
1107
1108const char * const vmstat_text[] = {
1109	/* enum zone_stat_item countes */
1110	"nr_free_pages",
1111	"nr_zone_inactive_anon",
1112	"nr_zone_active_anon",
1113	"nr_zone_inactive_file",
1114	"nr_zone_active_file",
1115	"nr_zone_unevictable",
1116	"nr_zone_write_pending",
1117	"nr_mlock",
1118	"nr_page_table_pages",
1119	"nr_kernel_stack",
1120	"nr_bounce",
1121#if IS_ENABLED(CONFIG_ZSMALLOC)
1122	"nr_zspages",
1123#endif
1124	"nr_free_cma",
1125
1126	/* enum numa_stat_item counters */
1127#ifdef CONFIG_NUMA
1128	"numa_hit",
1129	"numa_miss",
1130	"numa_foreign",
1131	"numa_interleave",
1132	"numa_local",
1133	"numa_other",
1134#endif
1135
1136	/* Node-based counters */
1137	"nr_inactive_anon",
1138	"nr_active_anon",
1139	"nr_inactive_file",
1140	"nr_active_file",
1141	"nr_unevictable",
1142	"nr_slab_reclaimable",
1143	"nr_slab_unreclaimable",
1144	"nr_isolated_anon",
1145	"nr_isolated_file",
 
1146	"workingset_refault",
1147	"workingset_activate",
 
1148	"workingset_nodereclaim",
1149	"nr_anon_pages",
1150	"nr_mapped",
1151	"nr_file_pages",
1152	"nr_dirty",
1153	"nr_writeback",
1154	"nr_writeback_temp",
1155	"nr_shmem",
1156	"nr_shmem_hugepages",
1157	"nr_shmem_pmdmapped",
 
 
1158	"nr_anon_transparent_hugepages",
1159	"nr_unstable",
1160	"nr_vmscan_write",
1161	"nr_vmscan_immediate_reclaim",
1162	"nr_dirtied",
1163	"nr_written",
1164	"", /* nr_indirectly_reclaimable */
1165
1166	/* enum writeback_stat_item counters */
1167	"nr_dirty_threshold",
1168	"nr_dirty_background_threshold",
1169
1170#ifdef CONFIG_VM_EVENT_COUNTERS
1171	/* enum vm_event_item counters */
1172	"pgpgin",
1173	"pgpgout",
1174	"pswpin",
1175	"pswpout",
1176
1177	TEXTS_FOR_ZONES("pgalloc")
1178	TEXTS_FOR_ZONES("allocstall")
1179	TEXTS_FOR_ZONES("pgskip")
1180
1181	"pgfree",
1182	"pgactivate",
1183	"pgdeactivate",
1184	"pglazyfree",
1185
1186	"pgfault",
1187	"pgmajfault",
1188	"pglazyfreed",
1189
1190	"pgrefill",
1191	"pgsteal_kswapd",
1192	"pgsteal_direct",
1193	"pgscan_kswapd",
1194	"pgscan_direct",
1195	"pgscan_direct_throttle",
1196
1197#ifdef CONFIG_NUMA
1198	"zone_reclaim_failed",
1199#endif
1200	"pginodesteal",
1201	"slabs_scanned",
1202	"kswapd_inodesteal",
1203	"kswapd_low_wmark_hit_quickly",
1204	"kswapd_high_wmark_hit_quickly",
1205	"pageoutrun",
1206
1207	"pgrotated",
1208
1209	"drop_pagecache",
1210	"drop_slab",
1211	"oom_kill",
1212
1213#ifdef CONFIG_NUMA_BALANCING
1214	"numa_pte_updates",
1215	"numa_huge_pte_updates",
1216	"numa_hint_faults",
1217	"numa_hint_faults_local",
1218	"numa_pages_migrated",
1219#endif
1220#ifdef CONFIG_MIGRATION
1221	"pgmigrate_success",
1222	"pgmigrate_fail",
1223#endif
1224#ifdef CONFIG_COMPACTION
1225	"compact_migrate_scanned",
1226	"compact_free_scanned",
1227	"compact_isolated",
1228	"compact_stall",
1229	"compact_fail",
1230	"compact_success",
1231	"compact_daemon_wake",
1232	"compact_daemon_migrate_scanned",
1233	"compact_daemon_free_scanned",
1234#endif
1235
1236#ifdef CONFIG_HUGETLB_PAGE
1237	"htlb_buddy_alloc_success",
1238	"htlb_buddy_alloc_fail",
1239#endif
1240	"unevictable_pgs_culled",
1241	"unevictable_pgs_scanned",
1242	"unevictable_pgs_rescued",
1243	"unevictable_pgs_mlocked",
1244	"unevictable_pgs_munlocked",
1245	"unevictable_pgs_cleared",
1246	"unevictable_pgs_stranded",
1247
1248#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1249	"thp_fault_alloc",
1250	"thp_fault_fallback",
1251	"thp_collapse_alloc",
1252	"thp_collapse_alloc_failed",
1253	"thp_file_alloc",
1254	"thp_file_mapped",
1255	"thp_split_page",
1256	"thp_split_page_failed",
1257	"thp_deferred_split_page",
1258	"thp_split_pmd",
1259#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1260	"thp_split_pud",
1261#endif
1262	"thp_zero_page_alloc",
1263	"thp_zero_page_alloc_failed",
1264	"thp_swpout",
1265	"thp_swpout_fallback",
1266#endif
1267#ifdef CONFIG_MEMORY_BALLOON
1268	"balloon_inflate",
1269	"balloon_deflate",
1270#ifdef CONFIG_BALLOON_COMPACTION
1271	"balloon_migrate",
1272#endif
1273#endif /* CONFIG_MEMORY_BALLOON */
1274#ifdef CONFIG_DEBUG_TLBFLUSH
1275#ifdef CONFIG_SMP
1276	"nr_tlb_remote_flush",
1277	"nr_tlb_remote_flush_received",
1278#endif /* CONFIG_SMP */
1279	"nr_tlb_local_flush_all",
1280	"nr_tlb_local_flush_one",
1281#endif /* CONFIG_DEBUG_TLBFLUSH */
1282
1283#ifdef CONFIG_DEBUG_VM_VMACACHE
1284	"vmacache_find_calls",
1285	"vmacache_find_hits",
1286	"vmacache_full_flushes",
1287#endif
1288#ifdef CONFIG_SWAP
1289	"swap_ra",
1290	"swap_ra_hit",
1291#endif
1292#endif /* CONFIG_VM_EVENTS_COUNTERS */
1293};
1294#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
1295
1296#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
1297     defined(CONFIG_PROC_FS)
1298static void *frag_start(struct seq_file *m, loff_t *pos)
1299{
1300	pg_data_t *pgdat;
1301	loff_t node = *pos;
1302
1303	for (pgdat = first_online_pgdat();
1304	     pgdat && node;
1305	     pgdat = next_online_pgdat(pgdat))
1306		--node;
1307
1308	return pgdat;
1309}
1310
1311static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
1312{
1313	pg_data_t *pgdat = (pg_data_t *)arg;
1314
1315	(*pos)++;
1316	return next_online_pgdat(pgdat);
1317}
1318
1319static void frag_stop(struct seq_file *m, void *arg)
1320{
1321}
1322
1323/*
1324 * Walk zones in a node and print using a callback.
1325 * If @assert_populated is true, only use callback for zones that are populated.
1326 */
1327static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
1328		bool assert_populated, bool nolock,
1329		void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
1330{
1331	struct zone *zone;
1332	struct zone *node_zones = pgdat->node_zones;
1333	unsigned long flags;
1334
1335	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
1336		if (assert_populated && !populated_zone(zone))
1337			continue;
1338
1339		if (!nolock)
1340			spin_lock_irqsave(&zone->lock, flags);
1341		print(m, pgdat, zone);
1342		if (!nolock)
1343			spin_unlock_irqrestore(&zone->lock, flags);
1344	}
1345}
1346#endif
1347
1348#ifdef CONFIG_PROC_FS
1349static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
1350						struct zone *zone)
1351{
1352	int order;
1353
1354	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1355	for (order = 0; order < MAX_ORDER; ++order)
1356		seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
1357	seq_putc(m, '\n');
1358}
1359
1360/*
1361 * This walks the free areas for each zone.
1362 */
1363static int frag_show(struct seq_file *m, void *arg)
1364{
1365	pg_data_t *pgdat = (pg_data_t *)arg;
1366	walk_zones_in_node(m, pgdat, true, false, frag_show_print);
1367	return 0;
1368}
1369
1370static void pagetypeinfo_showfree_print(struct seq_file *m,
1371					pg_data_t *pgdat, struct zone *zone)
1372{
1373	int order, mtype;
1374
1375	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
1376		seq_printf(m, "Node %4d, zone %8s, type %12s ",
1377					pgdat->node_id,
1378					zone->name,
1379					migratetype_names[mtype]);
1380		for (order = 0; order < MAX_ORDER; ++order) {
1381			unsigned long freecount = 0;
1382			struct free_area *area;
1383			struct list_head *curr;
 
1384
1385			area = &(zone->free_area[order]);
1386
1387			list_for_each(curr, &area->free_list[mtype])
1388				freecount++;
1389			seq_printf(m, "%6lu ", freecount);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1390		}
1391		seq_putc(m, '\n');
1392	}
1393}
1394
1395/* Print out the free pages at each order for each migatetype */
1396static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
1397{
1398	int order;
1399	pg_data_t *pgdat = (pg_data_t *)arg;
1400
1401	/* Print header */
1402	seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
1403	for (order = 0; order < MAX_ORDER; ++order)
1404		seq_printf(m, "%6d ", order);
1405	seq_putc(m, '\n');
1406
1407	walk_zones_in_node(m, pgdat, true, false, pagetypeinfo_showfree_print);
1408
1409	return 0;
1410}
1411
1412static void pagetypeinfo_showblockcount_print(struct seq_file *m,
1413					pg_data_t *pgdat, struct zone *zone)
1414{
1415	int mtype;
1416	unsigned long pfn;
1417	unsigned long start_pfn = zone->zone_start_pfn;
1418	unsigned long end_pfn = zone_end_pfn(zone);
1419	unsigned long count[MIGRATE_TYPES] = { 0, };
1420
1421	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
1422		struct page *page;
1423
1424		page = pfn_to_online_page(pfn);
1425		if (!page)
1426			continue;
1427
1428		/* Watch for unexpected holes punched in the memmap */
1429		if (!memmap_valid_within(pfn, page, zone))
1430			continue;
1431
1432		if (page_zone(page) != zone)
1433			continue;
1434
1435		mtype = get_pageblock_migratetype(page);
1436
1437		if (mtype < MIGRATE_TYPES)
1438			count[mtype]++;
1439	}
1440
1441	/* Print counts */
1442	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1443	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1444		seq_printf(m, "%12lu ", count[mtype]);
1445	seq_putc(m, '\n');
1446}
1447
1448/* Print out the number of pageblocks for each migratetype */
1449static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1450{
1451	int mtype;
1452	pg_data_t *pgdat = (pg_data_t *)arg;
1453
1454	seq_printf(m, "\n%-23s", "Number of blocks type ");
1455	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1456		seq_printf(m, "%12s ", migratetype_names[mtype]);
1457	seq_putc(m, '\n');
1458	walk_zones_in_node(m, pgdat, true, false,
1459		pagetypeinfo_showblockcount_print);
1460
1461	return 0;
1462}
1463
1464/*
1465 * Print out the number of pageblocks for each migratetype that contain pages
1466 * of other types. This gives an indication of how well fallbacks are being
1467 * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1468 * to determine what is going on
1469 */
1470static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1471{
1472#ifdef CONFIG_PAGE_OWNER
1473	int mtype;
1474
1475	if (!static_branch_unlikely(&page_owner_inited))
1476		return;
1477
1478	drain_all_pages(NULL);
1479
1480	seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1481	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1482		seq_printf(m, "%12s ", migratetype_names[mtype]);
1483	seq_putc(m, '\n');
1484
1485	walk_zones_in_node(m, pgdat, true, true,
1486		pagetypeinfo_showmixedcount_print);
1487#endif /* CONFIG_PAGE_OWNER */
1488}
1489
1490/*
1491 * This prints out statistics in relation to grouping pages by mobility.
1492 * It is expensive to collect so do not constantly read the file.
1493 */
1494static int pagetypeinfo_show(struct seq_file *m, void *arg)
1495{
1496	pg_data_t *pgdat = (pg_data_t *)arg;
1497
1498	/* check memoryless node */
1499	if (!node_state(pgdat->node_id, N_MEMORY))
1500		return 0;
1501
1502	seq_printf(m, "Page block order: %d\n", pageblock_order);
1503	seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
1504	seq_putc(m, '\n');
1505	pagetypeinfo_showfree(m, pgdat);
1506	pagetypeinfo_showblockcount(m, pgdat);
1507	pagetypeinfo_showmixedcount(m, pgdat);
1508
1509	return 0;
1510}
1511
1512static const struct seq_operations fragmentation_op = {
1513	.start	= frag_start,
1514	.next	= frag_next,
1515	.stop	= frag_stop,
1516	.show	= frag_show,
1517};
1518
1519static int fragmentation_open(struct inode *inode, struct file *file)
1520{
1521	return seq_open(file, &fragmentation_op);
1522}
1523
1524static const struct file_operations buddyinfo_file_operations = {
1525	.open		= fragmentation_open,
1526	.read		= seq_read,
1527	.llseek		= seq_lseek,
1528	.release	= seq_release,
1529};
1530
1531static const struct seq_operations pagetypeinfo_op = {
1532	.start	= frag_start,
1533	.next	= frag_next,
1534	.stop	= frag_stop,
1535	.show	= pagetypeinfo_show,
1536};
1537
1538static int pagetypeinfo_open(struct inode *inode, struct file *file)
1539{
1540	return seq_open(file, &pagetypeinfo_op);
1541}
1542
1543static const struct file_operations pagetypeinfo_file_operations = {
1544	.open		= pagetypeinfo_open,
1545	.read		= seq_read,
1546	.llseek		= seq_lseek,
1547	.release	= seq_release,
1548};
1549
1550static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
1551{
1552	int zid;
1553
1554	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1555		struct zone *compare = &pgdat->node_zones[zid];
1556
1557		if (populated_zone(compare))
1558			return zone == compare;
1559	}
1560
1561	return false;
1562}
1563
1564static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1565							struct zone *zone)
1566{
1567	int i;
1568	seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1569	if (is_zone_first_populated(pgdat, zone)) {
1570		seq_printf(m, "\n  per-node stats");
1571		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1572			seq_printf(m, "\n      %-12s %lu",
1573				vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
1574				NR_VM_NUMA_STAT_ITEMS],
1575				node_page_state(pgdat, i));
1576		}
1577	}
1578	seq_printf(m,
1579		   "\n  pages free     %lu"
1580		   "\n        min      %lu"
1581		   "\n        low      %lu"
1582		   "\n        high     %lu"
1583		   "\n        spanned  %lu"
1584		   "\n        present  %lu"
1585		   "\n        managed  %lu",
1586		   zone_page_state(zone, NR_FREE_PAGES),
1587		   min_wmark_pages(zone),
1588		   low_wmark_pages(zone),
1589		   high_wmark_pages(zone),
1590		   zone->spanned_pages,
1591		   zone->present_pages,
1592		   zone->managed_pages);
1593
1594	seq_printf(m,
1595		   "\n        protection: (%ld",
1596		   zone->lowmem_reserve[0]);
1597	for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1598		seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1599	seq_putc(m, ')');
1600
1601	/* If unpopulated, no other information is useful */
1602	if (!populated_zone(zone)) {
1603		seq_putc(m, '\n');
1604		return;
1605	}
1606
1607	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1608		seq_printf(m, "\n      %-12s %lu", vmstat_text[i],
1609				zone_page_state(zone, i));
1610
1611#ifdef CONFIG_NUMA
1612	for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
1613		seq_printf(m, "\n      %-12s %lu",
1614				vmstat_text[i + NR_VM_ZONE_STAT_ITEMS],
1615				zone_numa_state_snapshot(zone, i));
1616#endif
1617
1618	seq_printf(m, "\n  pagesets");
1619	for_each_online_cpu(i) {
1620		struct per_cpu_pageset *pageset;
1621
1622		pageset = per_cpu_ptr(zone->pageset, i);
1623		seq_printf(m,
1624			   "\n    cpu: %i"
1625			   "\n              count: %i"
1626			   "\n              high:  %i"
1627			   "\n              batch: %i",
1628			   i,
1629			   pageset->pcp.count,
1630			   pageset->pcp.high,
1631			   pageset->pcp.batch);
1632#ifdef CONFIG_SMP
1633		seq_printf(m, "\n  vm stats threshold: %d",
1634				pageset->stat_threshold);
1635#endif
1636	}
1637	seq_printf(m,
1638		   "\n  node_unreclaimable:  %u"
1639		   "\n  start_pfn:           %lu",
1640		   pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES,
1641		   zone->zone_start_pfn);
1642	seq_putc(m, '\n');
1643}
1644
1645/*
1646 * Output information about zones in @pgdat.  All zones are printed regardless
1647 * of whether they are populated or not: lowmem_reserve_ratio operates on the
1648 * set of all zones and userspace would not be aware of such zones if they are
1649 * suppressed here (zoneinfo displays the effect of lowmem_reserve_ratio).
1650 */
1651static int zoneinfo_show(struct seq_file *m, void *arg)
1652{
1653	pg_data_t *pgdat = (pg_data_t *)arg;
1654	walk_zones_in_node(m, pgdat, false, false, zoneinfo_show_print);
1655	return 0;
1656}
1657
1658static const struct seq_operations zoneinfo_op = {
1659	.start	= frag_start, /* iterate over all zones. The same as in
1660			       * fragmentation. */
1661	.next	= frag_next,
1662	.stop	= frag_stop,
1663	.show	= zoneinfo_show,
1664};
1665
1666static int zoneinfo_open(struct inode *inode, struct file *file)
1667{
1668	return seq_open(file, &zoneinfo_op);
1669}
1670
1671static const struct file_operations zoneinfo_file_operations = {
1672	.open		= zoneinfo_open,
1673	.read		= seq_read,
1674	.llseek		= seq_lseek,
1675	.release	= seq_release,
1676};
1677
1678enum writeback_stat_item {
1679	NR_DIRTY_THRESHOLD,
1680	NR_DIRTY_BG_THRESHOLD,
1681	NR_VM_WRITEBACK_STAT_ITEMS,
1682};
1683
1684static void *vmstat_start(struct seq_file *m, loff_t *pos)
1685{
1686	unsigned long *v;
1687	int i, stat_items_size;
1688
1689	if (*pos >= ARRAY_SIZE(vmstat_text))
1690		return NULL;
1691	stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
1692			  NR_VM_NUMA_STAT_ITEMS * sizeof(unsigned long) +
1693			  NR_VM_NODE_STAT_ITEMS * sizeof(unsigned long) +
1694			  NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
1695
1696#ifdef CONFIG_VM_EVENT_COUNTERS
1697	stat_items_size += sizeof(struct vm_event_state);
1698#endif
1699
 
 
1700	v = kmalloc(stat_items_size, GFP_KERNEL);
1701	m->private = v;
1702	if (!v)
1703		return ERR_PTR(-ENOMEM);
1704	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1705		v[i] = global_zone_page_state(i);
1706	v += NR_VM_ZONE_STAT_ITEMS;
1707
1708#ifdef CONFIG_NUMA
1709	for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
1710		v[i] = global_numa_state(i);
1711	v += NR_VM_NUMA_STAT_ITEMS;
1712#endif
1713
1714	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
1715		v[i] = global_node_page_state(i);
1716	v += NR_VM_NODE_STAT_ITEMS;
1717
1718	global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1719			    v + NR_DIRTY_THRESHOLD);
1720	v += NR_VM_WRITEBACK_STAT_ITEMS;
1721
1722#ifdef CONFIG_VM_EVENT_COUNTERS
1723	all_vm_events(v);
1724	v[PGPGIN] /= 2;		/* sectors -> kbytes */
1725	v[PGPGOUT] /= 2;
1726#endif
1727	return (unsigned long *)m->private + *pos;
1728}
1729
1730static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1731{
1732	(*pos)++;
1733	if (*pos >= ARRAY_SIZE(vmstat_text))
1734		return NULL;
1735	return (unsigned long *)m->private + *pos;
1736}
1737
1738static int vmstat_show(struct seq_file *m, void *arg)
1739{
1740	unsigned long *l = arg;
1741	unsigned long off = l - (unsigned long *)m->private;
1742
1743	/* Skip hidden vmstat items. */
1744	if (*vmstat_text[off] == '\0')
1745		return 0;
1746
1747	seq_puts(m, vmstat_text[off]);
1748	seq_put_decimal_ull(m, " ", *l);
1749	seq_putc(m, '\n');
1750	return 0;
1751}
1752
1753static void vmstat_stop(struct seq_file *m, void *arg)
1754{
1755	kfree(m->private);
1756	m->private = NULL;
1757}
1758
1759static const struct seq_operations vmstat_op = {
1760	.start	= vmstat_start,
1761	.next	= vmstat_next,
1762	.stop	= vmstat_stop,
1763	.show	= vmstat_show,
1764};
1765
1766static int vmstat_open(struct inode *inode, struct file *file)
1767{
1768	return seq_open(file, &vmstat_op);
1769}
1770
1771static const struct file_operations vmstat_file_operations = {
1772	.open		= vmstat_open,
1773	.read		= seq_read,
1774	.llseek		= seq_lseek,
1775	.release	= seq_release,
1776};
1777#endif /* CONFIG_PROC_FS */
1778
1779#ifdef CONFIG_SMP
1780static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1781int sysctl_stat_interval __read_mostly = HZ;
1782
1783#ifdef CONFIG_PROC_FS
1784static void refresh_vm_stats(struct work_struct *work)
1785{
1786	refresh_cpu_vm_stats(true);
1787}
1788
1789int vmstat_refresh(struct ctl_table *table, int write,
1790		   void __user *buffer, size_t *lenp, loff_t *ppos)
1791{
1792	long val;
1793	int err;
1794	int i;
1795
1796	/*
1797	 * The regular update, every sysctl_stat_interval, may come later
1798	 * than expected: leaving a significant amount in per_cpu buckets.
1799	 * This is particularly misleading when checking a quantity of HUGE
1800	 * pages, immediately after running a test.  /proc/sys/vm/stat_refresh,
1801	 * which can equally be echo'ed to or cat'ted from (by root),
1802	 * can be used to update the stats just before reading them.
1803	 *
1804	 * Oh, and since global_zone_page_state() etc. are so careful to hide
1805	 * transiently negative values, report an error here if any of
1806	 * the stats is negative, so we know to go looking for imbalance.
1807	 */
1808	err = schedule_on_each_cpu(refresh_vm_stats);
1809	if (err)
1810		return err;
1811	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
1812		val = atomic_long_read(&vm_zone_stat[i]);
1813		if (val < 0) {
1814			pr_warn("%s: %s %ld\n",
1815				__func__, vmstat_text[i], val);
1816			err = -EINVAL;
1817		}
1818	}
1819#ifdef CONFIG_NUMA
1820	for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) {
1821		val = atomic_long_read(&vm_numa_stat[i]);
1822		if (val < 0) {
1823			pr_warn("%s: %s %ld\n",
1824				__func__, vmstat_text[i + NR_VM_ZONE_STAT_ITEMS], val);
1825			err = -EINVAL;
1826		}
1827	}
1828#endif
1829	if (err)
1830		return err;
1831	if (write)
1832		*ppos += *lenp;
1833	else
1834		*lenp = 0;
1835	return 0;
1836}
1837#endif /* CONFIG_PROC_FS */
1838
1839static void vmstat_update(struct work_struct *w)
1840{
1841	if (refresh_cpu_vm_stats(true)) {
1842		/*
1843		 * Counters were updated so we expect more updates
1844		 * to occur in the future. Keep on running the
1845		 * update worker thread.
1846		 */
1847		preempt_disable();
1848		queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
1849				this_cpu_ptr(&vmstat_work),
1850				round_jiffies_relative(sysctl_stat_interval));
1851		preempt_enable();
1852	}
1853}
1854
1855/*
1856 * Switch off vmstat processing and then fold all the remaining differentials
1857 * until the diffs stay at zero. The function is used by NOHZ and can only be
1858 * invoked when tick processing is not active.
1859 */
1860/*
1861 * Check if the diffs for a certain cpu indicate that
1862 * an update is needed.
1863 */
1864static bool need_update(int cpu)
1865{
1866	struct zone *zone;
1867
1868	for_each_populated_zone(zone) {
1869		struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
1870
1871		BUILD_BUG_ON(sizeof(p->vm_stat_diff[0]) != 1);
1872#ifdef CONFIG_NUMA
1873		BUILD_BUG_ON(sizeof(p->vm_numa_stat_diff[0]) != 2);
1874#endif
1875
1876		/*
1877		 * The fast way of checking if there are any vmstat diffs.
1878		 * This works because the diffs are byte sized items.
1879		 */
1880		if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS))
 
1881			return true;
1882#ifdef CONFIG_NUMA
1883		if (memchr_inv(p->vm_numa_stat_diff, 0, NR_VM_NUMA_STAT_ITEMS))
 
1884			return true;
1885#endif
1886	}
1887	return false;
1888}
1889
1890/*
1891 * Switch off vmstat processing and then fold all the remaining differentials
1892 * until the diffs stay at zero. The function is used by NOHZ and can only be
1893 * invoked when tick processing is not active.
1894 */
1895void quiet_vmstat(void)
1896{
1897	if (system_state != SYSTEM_RUNNING)
1898		return;
1899
1900	if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
1901		return;
1902
1903	if (!need_update(smp_processor_id()))
1904		return;
1905
1906	/*
1907	 * Just refresh counters and do not care about the pending delayed
1908	 * vmstat_update. It doesn't fire that often to matter and canceling
1909	 * it would be too expensive from this path.
1910	 * vmstat_shepherd will take care about that for us.
1911	 */
1912	refresh_cpu_vm_stats(false);
1913}
1914
1915/*
1916 * Shepherd worker thread that checks the
1917 * differentials of processors that have their worker
1918 * threads for vm statistics updates disabled because of
1919 * inactivity.
1920 */
1921static void vmstat_shepherd(struct work_struct *w);
1922
1923static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
1924
1925static void vmstat_shepherd(struct work_struct *w)
1926{
1927	int cpu;
1928
1929	get_online_cpus();
1930	/* Check processors whose vmstat worker threads have been disabled */
1931	for_each_online_cpu(cpu) {
1932		struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
1933
1934		if (!delayed_work_pending(dw) && need_update(cpu))
1935			queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
1936	}
1937	put_online_cpus();
1938
1939	schedule_delayed_work(&shepherd,
1940		round_jiffies_relative(sysctl_stat_interval));
1941}
1942
1943static void __init start_shepherd_timer(void)
1944{
1945	int cpu;
1946
1947	for_each_possible_cpu(cpu)
1948		INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
1949			vmstat_update);
1950
1951	schedule_delayed_work(&shepherd,
1952		round_jiffies_relative(sysctl_stat_interval));
1953}
1954
1955static void __init init_cpu_node_state(void)
1956{
1957	int node;
1958
1959	for_each_online_node(node) {
1960		if (cpumask_weight(cpumask_of_node(node)) > 0)
1961			node_set_state(node, N_CPU);
1962	}
1963}
1964
1965static int vmstat_cpu_online(unsigned int cpu)
1966{
1967	refresh_zone_stat_thresholds();
1968	node_set_state(cpu_to_node(cpu), N_CPU);
1969	return 0;
1970}
1971
1972static int vmstat_cpu_down_prep(unsigned int cpu)
1973{
1974	cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
1975	return 0;
1976}
1977
1978static int vmstat_cpu_dead(unsigned int cpu)
1979{
1980	const struct cpumask *node_cpus;
1981	int node;
1982
1983	node = cpu_to_node(cpu);
1984
1985	refresh_zone_stat_thresholds();
1986	node_cpus = cpumask_of_node(node);
1987	if (cpumask_weight(node_cpus) > 0)
1988		return 0;
1989
1990	node_clear_state(node, N_CPU);
1991	return 0;
1992}
1993
1994#endif
1995
1996struct workqueue_struct *mm_percpu_wq;
1997
1998void __init init_mm_internals(void)
1999{
2000	int ret __maybe_unused;
2001
2002	mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0);
2003
2004#ifdef CONFIG_SMP
2005	ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
2006					NULL, vmstat_cpu_dead);
2007	if (ret < 0)
2008		pr_err("vmstat: failed to register 'dead' hotplug state\n");
2009
2010	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online",
2011					vmstat_cpu_online,
2012					vmstat_cpu_down_prep);
2013	if (ret < 0)
2014		pr_err("vmstat: failed to register 'online' hotplug state\n");
2015
2016	get_online_cpus();
2017	init_cpu_node_state();
2018	put_online_cpus();
2019
2020	start_shepherd_timer();
2021#endif
2022#ifdef CONFIG_PROC_FS
2023	proc_create("buddyinfo", 0444, NULL, &buddyinfo_file_operations);
2024	proc_create("pagetypeinfo", 0444, NULL, &pagetypeinfo_file_operations);
2025	proc_create("vmstat", 0444, NULL, &vmstat_file_operations);
2026	proc_create("zoneinfo", 0444, NULL, &zoneinfo_file_operations);
2027#endif
2028}
2029
2030#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
2031
2032/*
2033 * Return an index indicating how much of the available free memory is
2034 * unusable for an allocation of the requested size.
2035 */
2036static int unusable_free_index(unsigned int order,
2037				struct contig_page_info *info)
2038{
2039	/* No free memory is interpreted as all free memory is unusable */
2040	if (info->free_pages == 0)
2041		return 1000;
2042
2043	/*
2044	 * Index should be a value between 0 and 1. Return a value to 3
2045	 * decimal places.
2046	 *
2047	 * 0 => no fragmentation
2048	 * 1 => high fragmentation
2049	 */
2050	return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
2051
2052}
2053
2054static void unusable_show_print(struct seq_file *m,
2055					pg_data_t *pgdat, struct zone *zone)
2056{
2057	unsigned int order;
2058	int index;
2059	struct contig_page_info info;
2060
2061	seq_printf(m, "Node %d, zone %8s ",
2062				pgdat->node_id,
2063				zone->name);
2064	for (order = 0; order < MAX_ORDER; ++order) {
2065		fill_contig_page_info(zone, order, &info);
2066		index = unusable_free_index(order, &info);
2067		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2068	}
2069
2070	seq_putc(m, '\n');
2071}
2072
2073/*
2074 * Display unusable free space index
2075 *
2076 * The unusable free space index measures how much of the available free
2077 * memory cannot be used to satisfy an allocation of a given size and is a
2078 * value between 0 and 1. The higher the value, the more of free memory is
2079 * unusable and by implication, the worse the external fragmentation is. This
2080 * can be expressed as a percentage by multiplying by 100.
2081 */
2082static int unusable_show(struct seq_file *m, void *arg)
2083{
2084	pg_data_t *pgdat = (pg_data_t *)arg;
2085
2086	/* check memoryless node */
2087	if (!node_state(pgdat->node_id, N_MEMORY))
2088		return 0;
2089
2090	walk_zones_in_node(m, pgdat, true, false, unusable_show_print);
2091
2092	return 0;
2093}
2094
2095static const struct seq_operations unusable_op = {
2096	.start	= frag_start,
2097	.next	= frag_next,
2098	.stop	= frag_stop,
2099	.show	= unusable_show,
2100};
2101
2102static int unusable_open(struct inode *inode, struct file *file)
2103{
2104	return seq_open(file, &unusable_op);
2105}
2106
2107static const struct file_operations unusable_file_ops = {
2108	.open		= unusable_open,
2109	.read		= seq_read,
2110	.llseek		= seq_lseek,
2111	.release	= seq_release,
2112};
2113
2114static void extfrag_show_print(struct seq_file *m,
2115					pg_data_t *pgdat, struct zone *zone)
2116{
2117	unsigned int order;
2118	int index;
2119
2120	/* Alloc on stack as interrupts are disabled for zone walk */
2121	struct contig_page_info info;
2122
2123	seq_printf(m, "Node %d, zone %8s ",
2124				pgdat->node_id,
2125				zone->name);
2126	for (order = 0; order < MAX_ORDER; ++order) {
2127		fill_contig_page_info(zone, order, &info);
2128		index = __fragmentation_index(order, &info);
2129		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2130	}
2131
2132	seq_putc(m, '\n');
2133}
2134
2135/*
2136 * Display fragmentation index for orders that allocations would fail for
2137 */
2138static int extfrag_show(struct seq_file *m, void *arg)
2139{
2140	pg_data_t *pgdat = (pg_data_t *)arg;
2141
2142	walk_zones_in_node(m, pgdat, true, false, extfrag_show_print);
2143
2144	return 0;
2145}
2146
2147static const struct seq_operations extfrag_op = {
2148	.start	= frag_start,
2149	.next	= frag_next,
2150	.stop	= frag_stop,
2151	.show	= extfrag_show,
2152};
2153
2154static int extfrag_open(struct inode *inode, struct file *file)
2155{
2156	return seq_open(file, &extfrag_op);
2157}
2158
2159static const struct file_operations extfrag_file_ops = {
2160	.open		= extfrag_open,
2161	.read		= seq_read,
2162	.llseek		= seq_lseek,
2163	.release	= seq_release,
2164};
2165
2166static int __init extfrag_debug_init(void)
2167{
2168	struct dentry *extfrag_debug_root;
2169
2170	extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
2171	if (!extfrag_debug_root)
2172		return -ENOMEM;
2173
2174	if (!debugfs_create_file("unusable_index", 0444,
2175			extfrag_debug_root, NULL, &unusable_file_ops))
2176		goto fail;
2177
2178	if (!debugfs_create_file("extfrag_index", 0444,
2179			extfrag_debug_root, NULL, &extfrag_file_ops))
2180		goto fail;
2181
2182	return 0;
2183fail:
2184	debugfs_remove_recursive(extfrag_debug_root);
2185	return -ENOMEM;
2186}
2187
2188module_init(extfrag_debug_init);
2189#endif
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/mm/vmstat.c
   4 *
   5 *  Manages VM statistics
   6 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   7 *
   8 *  zoned VM statistics
   9 *  Copyright (C) 2006 Silicon Graphics, Inc.,
  10 *		Christoph Lameter <christoph@lameter.com>
  11 *  Copyright (C) 2008-2014 Christoph Lameter
  12 */
  13#include <linux/fs.h>
  14#include <linux/mm.h>
  15#include <linux/err.h>
  16#include <linux/module.h>
  17#include <linux/slab.h>
  18#include <linux/cpu.h>
  19#include <linux/cpumask.h>
  20#include <linux/vmstat.h>
  21#include <linux/proc_fs.h>
  22#include <linux/seq_file.h>
  23#include <linux/debugfs.h>
  24#include <linux/sched.h>
  25#include <linux/math64.h>
  26#include <linux/writeback.h>
  27#include <linux/compaction.h>
  28#include <linux/mm_inline.h>
  29#include <linux/page_ext.h>
  30#include <linux/page_owner.h>
  31
  32#include "internal.h"
  33
  34#define NUMA_STATS_THRESHOLD (U16_MAX - 2)
  35
  36#ifdef CONFIG_NUMA
  37int sysctl_vm_numa_stat = ENABLE_NUMA_STAT;
  38
  39/* zero numa counters within a zone */
  40static void zero_zone_numa_counters(struct zone *zone)
  41{
  42	int item, cpu;
  43
  44	for (item = 0; item < NR_VM_NUMA_STAT_ITEMS; item++) {
  45		atomic_long_set(&zone->vm_numa_stat[item], 0);
  46		for_each_online_cpu(cpu)
  47			per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item]
  48						= 0;
  49	}
  50}
  51
  52/* zero numa counters of all the populated zones */
  53static void zero_zones_numa_counters(void)
  54{
  55	struct zone *zone;
  56
  57	for_each_populated_zone(zone)
  58		zero_zone_numa_counters(zone);
  59}
  60
  61/* zero global numa counters */
  62static void zero_global_numa_counters(void)
  63{
  64	int item;
  65
  66	for (item = 0; item < NR_VM_NUMA_STAT_ITEMS; item++)
  67		atomic_long_set(&vm_numa_stat[item], 0);
  68}
  69
  70static void invalid_numa_statistics(void)
  71{
  72	zero_zones_numa_counters();
  73	zero_global_numa_counters();
  74}
  75
  76static DEFINE_MUTEX(vm_numa_stat_lock);
  77
  78int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
  79		void __user *buffer, size_t *length, loff_t *ppos)
  80{
  81	int ret, oldval;
  82
  83	mutex_lock(&vm_numa_stat_lock);
  84	if (write)
  85		oldval = sysctl_vm_numa_stat;
  86	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
  87	if (ret || !write)
  88		goto out;
  89
  90	if (oldval == sysctl_vm_numa_stat)
  91		goto out;
  92	else if (sysctl_vm_numa_stat == ENABLE_NUMA_STAT) {
  93		static_branch_enable(&vm_numa_stat_key);
  94		pr_info("enable numa statistics\n");
  95	} else {
  96		static_branch_disable(&vm_numa_stat_key);
  97		invalid_numa_statistics();
  98		pr_info("disable numa statistics, and clear numa counters\n");
  99	}
 100
 101out:
 102	mutex_unlock(&vm_numa_stat_lock);
 103	return ret;
 104}
 105#endif
 106
 107#ifdef CONFIG_VM_EVENT_COUNTERS
 108DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
 109EXPORT_PER_CPU_SYMBOL(vm_event_states);
 110
 111static void sum_vm_events(unsigned long *ret)
 112{
 113	int cpu;
 114	int i;
 115
 116	memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
 117
 118	for_each_online_cpu(cpu) {
 119		struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
 120
 121		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
 122			ret[i] += this->event[i];
 123	}
 124}
 125
 126/*
 127 * Accumulate the vm event counters across all CPUs.
 128 * The result is unavoidably approximate - it can change
 129 * during and after execution of this function.
 130*/
 131void all_vm_events(unsigned long *ret)
 132{
 133	get_online_cpus();
 134	sum_vm_events(ret);
 135	put_online_cpus();
 136}
 137EXPORT_SYMBOL_GPL(all_vm_events);
 138
 139/*
 140 * Fold the foreign cpu events into our own.
 141 *
 142 * This is adding to the events on one processor
 143 * but keeps the global counts constant.
 144 */
 145void vm_events_fold_cpu(int cpu)
 146{
 147	struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
 148	int i;
 149
 150	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
 151		count_vm_events(i, fold_state->event[i]);
 152		fold_state->event[i] = 0;
 153	}
 154}
 155
 156#endif /* CONFIG_VM_EVENT_COUNTERS */
 157
 158/*
 159 * Manage combined zone based / global counters
 160 *
 161 * vm_stat contains the global counters
 162 */
 163atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
 164atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS] __cacheline_aligned_in_smp;
 165atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp;
 166EXPORT_SYMBOL(vm_zone_stat);
 167EXPORT_SYMBOL(vm_numa_stat);
 168EXPORT_SYMBOL(vm_node_stat);
 169
 170#ifdef CONFIG_SMP
 171
 172int calculate_pressure_threshold(struct zone *zone)
 173{
 174	int threshold;
 175	int watermark_distance;
 176
 177	/*
 178	 * As vmstats are not up to date, there is drift between the estimated
 179	 * and real values. For high thresholds and a high number of CPUs, it
 180	 * is possible for the min watermark to be breached while the estimated
 181	 * value looks fine. The pressure threshold is a reduced value such
 182	 * that even the maximum amount of drift will not accidentally breach
 183	 * the min watermark
 184	 */
 185	watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
 186	threshold = max(1, (int)(watermark_distance / num_online_cpus()));
 187
 188	/*
 189	 * Maximum threshold is 125
 190	 */
 191	threshold = min(125, threshold);
 192
 193	return threshold;
 194}
 195
 196int calculate_normal_threshold(struct zone *zone)
 197{
 198	int threshold;
 199	int mem;	/* memory in 128 MB units */
 200
 201	/*
 202	 * The threshold scales with the number of processors and the amount
 203	 * of memory per zone. More memory means that we can defer updates for
 204	 * longer, more processors could lead to more contention.
 205 	 * fls() is used to have a cheap way of logarithmic scaling.
 206	 *
 207	 * Some sample thresholds:
 208	 *
 209	 * Threshold	Processors	(fls)	Zonesize	fls(mem+1)
 210	 * ------------------------------------------------------------------
 211	 * 8		1		1	0.9-1 GB	4
 212	 * 16		2		2	0.9-1 GB	4
 213	 * 20 		2		2	1-2 GB		5
 214	 * 24		2		2	2-4 GB		6
 215	 * 28		2		2	4-8 GB		7
 216	 * 32		2		2	8-16 GB		8
 217	 * 4		2		2	<128M		1
 218	 * 30		4		3	2-4 GB		5
 219	 * 48		4		3	8-16 GB		8
 220	 * 32		8		4	1-2 GB		4
 221	 * 32		8		4	0.9-1GB		4
 222	 * 10		16		5	<128M		1
 223	 * 40		16		5	900M		4
 224	 * 70		64		7	2-4 GB		5
 225	 * 84		64		7	4-8 GB		6
 226	 * 108		512		9	4-8 GB		6
 227	 * 125		1024		10	8-16 GB		8
 228	 * 125		1024		10	16-32 GB	9
 229	 */
 230
 231	mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT);
 232
 233	threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
 234
 235	/*
 236	 * Maximum threshold is 125
 237	 */
 238	threshold = min(125, threshold);
 239
 240	return threshold;
 241}
 242
 243/*
 244 * Refresh the thresholds for each zone.
 245 */
 246void refresh_zone_stat_thresholds(void)
 247{
 248	struct pglist_data *pgdat;
 249	struct zone *zone;
 250	int cpu;
 251	int threshold;
 252
 253	/* Zero current pgdat thresholds */
 254	for_each_online_pgdat(pgdat) {
 255		for_each_online_cpu(cpu) {
 256			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0;
 257		}
 258	}
 259
 260	for_each_populated_zone(zone) {
 261		struct pglist_data *pgdat = zone->zone_pgdat;
 262		unsigned long max_drift, tolerate_drift;
 263
 264		threshold = calculate_normal_threshold(zone);
 265
 266		for_each_online_cpu(cpu) {
 267			int pgdat_threshold;
 268
 269			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
 270							= threshold;
 271
 272			/* Base nodestat threshold on the largest populated zone. */
 273			pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold;
 274			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold
 275				= max(threshold, pgdat_threshold);
 276		}
 277
 278		/*
 279		 * Only set percpu_drift_mark if there is a danger that
 280		 * NR_FREE_PAGES reports the low watermark is ok when in fact
 281		 * the min watermark could be breached by an allocation
 282		 */
 283		tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
 284		max_drift = num_online_cpus() * threshold;
 285		if (max_drift > tolerate_drift)
 286			zone->percpu_drift_mark = high_wmark_pages(zone) +
 287					max_drift;
 288	}
 289}
 290
 291void set_pgdat_percpu_threshold(pg_data_t *pgdat,
 292				int (*calculate_pressure)(struct zone *))
 293{
 294	struct zone *zone;
 295	int cpu;
 296	int threshold;
 297	int i;
 298
 299	for (i = 0; i < pgdat->nr_zones; i++) {
 300		zone = &pgdat->node_zones[i];
 301		if (!zone->percpu_drift_mark)
 302			continue;
 303
 304		threshold = (*calculate_pressure)(zone);
 305		for_each_online_cpu(cpu)
 306			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
 307							= threshold;
 308	}
 309}
 310
 311/*
 312 * For use when we know that interrupts are disabled,
 313 * or when we know that preemption is disabled and that
 314 * particular counter cannot be updated from interrupt context.
 315 */
 316void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 317			   long delta)
 318{
 319	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 320	s8 __percpu *p = pcp->vm_stat_diff + item;
 321	long x;
 322	long t;
 323
 324	x = delta + __this_cpu_read(*p);
 325
 326	t = __this_cpu_read(pcp->stat_threshold);
 327
 328	if (unlikely(x > t || x < -t)) {
 329		zone_page_state_add(x, zone, item);
 330		x = 0;
 331	}
 332	__this_cpu_write(*p, x);
 333}
 334EXPORT_SYMBOL(__mod_zone_page_state);
 335
 336void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
 337				long delta)
 338{
 339	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
 340	s8 __percpu *p = pcp->vm_node_stat_diff + item;
 341	long x;
 342	long t;
 343
 344	x = delta + __this_cpu_read(*p);
 345
 346	t = __this_cpu_read(pcp->stat_threshold);
 347
 348	if (unlikely(x > t || x < -t)) {
 349		node_page_state_add(x, pgdat, item);
 350		x = 0;
 351	}
 352	__this_cpu_write(*p, x);
 353}
 354EXPORT_SYMBOL(__mod_node_page_state);
 355
 356/*
 357 * Optimized increment and decrement functions.
 358 *
 359 * These are only for a single page and therefore can take a struct page *
 360 * argument instead of struct zone *. This allows the inclusion of the code
 361 * generated for page_zone(page) into the optimized functions.
 362 *
 363 * No overflow check is necessary and therefore the differential can be
 364 * incremented or decremented in place which may allow the compilers to
 365 * generate better code.
 366 * The increment or decrement is known and therefore one boundary check can
 367 * be omitted.
 368 *
 369 * NOTE: These functions are very performance sensitive. Change only
 370 * with care.
 371 *
 372 * Some processors have inc/dec instructions that are atomic vs an interrupt.
 373 * However, the code must first determine the differential location in a zone
 374 * based on the processor number and then inc/dec the counter. There is no
 375 * guarantee without disabling preemption that the processor will not change
 376 * in between and therefore the atomicity vs. interrupt cannot be exploited
 377 * in a useful way here.
 378 */
 379void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
 380{
 381	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 382	s8 __percpu *p = pcp->vm_stat_diff + item;
 383	s8 v, t;
 384
 385	v = __this_cpu_inc_return(*p);
 386	t = __this_cpu_read(pcp->stat_threshold);
 387	if (unlikely(v > t)) {
 388		s8 overstep = t >> 1;
 389
 390		zone_page_state_add(v + overstep, zone, item);
 391		__this_cpu_write(*p, -overstep);
 392	}
 393}
 394
 395void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 396{
 397	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
 398	s8 __percpu *p = pcp->vm_node_stat_diff + item;
 399	s8 v, t;
 400
 401	v = __this_cpu_inc_return(*p);
 402	t = __this_cpu_read(pcp->stat_threshold);
 403	if (unlikely(v > t)) {
 404		s8 overstep = t >> 1;
 405
 406		node_page_state_add(v + overstep, pgdat, item);
 407		__this_cpu_write(*p, -overstep);
 408	}
 409}
 410
 411void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
 412{
 413	__inc_zone_state(page_zone(page), item);
 414}
 415EXPORT_SYMBOL(__inc_zone_page_state);
 416
 417void __inc_node_page_state(struct page *page, enum node_stat_item item)
 418{
 419	__inc_node_state(page_pgdat(page), item);
 420}
 421EXPORT_SYMBOL(__inc_node_page_state);
 422
 423void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
 424{
 425	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 426	s8 __percpu *p = pcp->vm_stat_diff + item;
 427	s8 v, t;
 428
 429	v = __this_cpu_dec_return(*p);
 430	t = __this_cpu_read(pcp->stat_threshold);
 431	if (unlikely(v < - t)) {
 432		s8 overstep = t >> 1;
 433
 434		zone_page_state_add(v - overstep, zone, item);
 435		__this_cpu_write(*p, overstep);
 436	}
 437}
 438
 439void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 440{
 441	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
 442	s8 __percpu *p = pcp->vm_node_stat_diff + item;
 443	s8 v, t;
 444
 445	v = __this_cpu_dec_return(*p);
 446	t = __this_cpu_read(pcp->stat_threshold);
 447	if (unlikely(v < - t)) {
 448		s8 overstep = t >> 1;
 449
 450		node_page_state_add(v - overstep, pgdat, item);
 451		__this_cpu_write(*p, overstep);
 452	}
 453}
 454
 455void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
 456{
 457	__dec_zone_state(page_zone(page), item);
 458}
 459EXPORT_SYMBOL(__dec_zone_page_state);
 460
 461void __dec_node_page_state(struct page *page, enum node_stat_item item)
 462{
 463	__dec_node_state(page_pgdat(page), item);
 464}
 465EXPORT_SYMBOL(__dec_node_page_state);
 466
 467#ifdef CONFIG_HAVE_CMPXCHG_LOCAL
 468/*
 469 * If we have cmpxchg_local support then we do not need to incur the overhead
 470 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
 471 *
 472 * mod_state() modifies the zone counter state through atomic per cpu
 473 * operations.
 474 *
 475 * Overstep mode specifies how overstep should handled:
 476 *     0       No overstepping
 477 *     1       Overstepping half of threshold
 478 *     -1      Overstepping minus half of threshold
 479*/
 480static inline void mod_zone_state(struct zone *zone,
 481       enum zone_stat_item item, long delta, int overstep_mode)
 482{
 483	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 484	s8 __percpu *p = pcp->vm_stat_diff + item;
 485	long o, n, t, z;
 486
 487	do {
 488		z = 0;  /* overflow to zone counters */
 489
 490		/*
 491		 * The fetching of the stat_threshold is racy. We may apply
 492		 * a counter threshold to the wrong the cpu if we get
 493		 * rescheduled while executing here. However, the next
 494		 * counter update will apply the threshold again and
 495		 * therefore bring the counter under the threshold again.
 496		 *
 497		 * Most of the time the thresholds are the same anyways
 498		 * for all cpus in a zone.
 499		 */
 500		t = this_cpu_read(pcp->stat_threshold);
 501
 502		o = this_cpu_read(*p);
 503		n = delta + o;
 504
 505		if (n > t || n < -t) {
 506			int os = overstep_mode * (t >> 1) ;
 507
 508			/* Overflow must be added to zone counters */
 509			z = n + os;
 510			n = -os;
 511		}
 512	} while (this_cpu_cmpxchg(*p, o, n) != o);
 513
 514	if (z)
 515		zone_page_state_add(z, zone, item);
 516}
 517
 518void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 519			 long delta)
 520{
 521	mod_zone_state(zone, item, delta, 0);
 522}
 523EXPORT_SYMBOL(mod_zone_page_state);
 524
 525void inc_zone_page_state(struct page *page, enum zone_stat_item item)
 526{
 527	mod_zone_state(page_zone(page), item, 1, 1);
 528}
 529EXPORT_SYMBOL(inc_zone_page_state);
 530
 531void dec_zone_page_state(struct page *page, enum zone_stat_item item)
 532{
 533	mod_zone_state(page_zone(page), item, -1, -1);
 534}
 535EXPORT_SYMBOL(dec_zone_page_state);
 536
 537static inline void mod_node_state(struct pglist_data *pgdat,
 538       enum node_stat_item item, int delta, int overstep_mode)
 539{
 540	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
 541	s8 __percpu *p = pcp->vm_node_stat_diff + item;
 542	long o, n, t, z;
 543
 544	do {
 545		z = 0;  /* overflow to node counters */
 546
 547		/*
 548		 * The fetching of the stat_threshold is racy. We may apply
 549		 * a counter threshold to the wrong the cpu if we get
 550		 * rescheduled while executing here. However, the next
 551		 * counter update will apply the threshold again and
 552		 * therefore bring the counter under the threshold again.
 553		 *
 554		 * Most of the time the thresholds are the same anyways
 555		 * for all cpus in a node.
 556		 */
 557		t = this_cpu_read(pcp->stat_threshold);
 558
 559		o = this_cpu_read(*p);
 560		n = delta + o;
 561
 562		if (n > t || n < -t) {
 563			int os = overstep_mode * (t >> 1) ;
 564
 565			/* Overflow must be added to node counters */
 566			z = n + os;
 567			n = -os;
 568		}
 569	} while (this_cpu_cmpxchg(*p, o, n) != o);
 570
 571	if (z)
 572		node_page_state_add(z, pgdat, item);
 573}
 574
 575void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
 576					long delta)
 577{
 578	mod_node_state(pgdat, item, delta, 0);
 579}
 580EXPORT_SYMBOL(mod_node_page_state);
 581
 582void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 583{
 584	mod_node_state(pgdat, item, 1, 1);
 585}
 586
 587void inc_node_page_state(struct page *page, enum node_stat_item item)
 588{
 589	mod_node_state(page_pgdat(page), item, 1, 1);
 590}
 591EXPORT_SYMBOL(inc_node_page_state);
 592
 593void dec_node_page_state(struct page *page, enum node_stat_item item)
 594{
 595	mod_node_state(page_pgdat(page), item, -1, -1);
 596}
 597EXPORT_SYMBOL(dec_node_page_state);
 598#else
 599/*
 600 * Use interrupt disable to serialize counter updates
 601 */
 602void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 603			 long delta)
 604{
 605	unsigned long flags;
 606
 607	local_irq_save(flags);
 608	__mod_zone_page_state(zone, item, delta);
 609	local_irq_restore(flags);
 610}
 611EXPORT_SYMBOL(mod_zone_page_state);
 612
 613void inc_zone_page_state(struct page *page, enum zone_stat_item item)
 614{
 615	unsigned long flags;
 616	struct zone *zone;
 617
 618	zone = page_zone(page);
 619	local_irq_save(flags);
 620	__inc_zone_state(zone, item);
 621	local_irq_restore(flags);
 622}
 623EXPORT_SYMBOL(inc_zone_page_state);
 624
 625void dec_zone_page_state(struct page *page, enum zone_stat_item item)
 626{
 627	unsigned long flags;
 628
 629	local_irq_save(flags);
 630	__dec_zone_page_state(page, item);
 631	local_irq_restore(flags);
 632}
 633EXPORT_SYMBOL(dec_zone_page_state);
 634
 635void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 636{
 637	unsigned long flags;
 638
 639	local_irq_save(flags);
 640	__inc_node_state(pgdat, item);
 641	local_irq_restore(flags);
 642}
 643EXPORT_SYMBOL(inc_node_state);
 644
 645void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
 646					long delta)
 647{
 648	unsigned long flags;
 649
 650	local_irq_save(flags);
 651	__mod_node_page_state(pgdat, item, delta);
 652	local_irq_restore(flags);
 653}
 654EXPORT_SYMBOL(mod_node_page_state);
 655
 656void inc_node_page_state(struct page *page, enum node_stat_item item)
 657{
 658	unsigned long flags;
 659	struct pglist_data *pgdat;
 660
 661	pgdat = page_pgdat(page);
 662	local_irq_save(flags);
 663	__inc_node_state(pgdat, item);
 664	local_irq_restore(flags);
 665}
 666EXPORT_SYMBOL(inc_node_page_state);
 667
 668void dec_node_page_state(struct page *page, enum node_stat_item item)
 669{
 670	unsigned long flags;
 671
 672	local_irq_save(flags);
 673	__dec_node_page_state(page, item);
 674	local_irq_restore(flags);
 675}
 676EXPORT_SYMBOL(dec_node_page_state);
 677#endif
 678
 679/*
 680 * Fold a differential into the global counters.
 681 * Returns the number of counters updated.
 682 */
 683#ifdef CONFIG_NUMA
 684static int fold_diff(int *zone_diff, int *numa_diff, int *node_diff)
 685{
 686	int i;
 687	int changes = 0;
 688
 689	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 690		if (zone_diff[i]) {
 691			atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
 692			changes++;
 693	}
 694
 695	for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
 696		if (numa_diff[i]) {
 697			atomic_long_add(numa_diff[i], &vm_numa_stat[i]);
 698			changes++;
 699	}
 700
 701	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
 702		if (node_diff[i]) {
 703			atomic_long_add(node_diff[i], &vm_node_stat[i]);
 704			changes++;
 705	}
 706	return changes;
 707}
 708#else
 709static int fold_diff(int *zone_diff, int *node_diff)
 710{
 711	int i;
 712	int changes = 0;
 713
 714	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 715		if (zone_diff[i]) {
 716			atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
 717			changes++;
 718	}
 719
 720	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
 721		if (node_diff[i]) {
 722			atomic_long_add(node_diff[i], &vm_node_stat[i]);
 723			changes++;
 724	}
 725	return changes;
 726}
 727#endif /* CONFIG_NUMA */
 728
 729/*
 730 * Update the zone counters for the current cpu.
 731 *
 732 * Note that refresh_cpu_vm_stats strives to only access
 733 * node local memory. The per cpu pagesets on remote zones are placed
 734 * in the memory local to the processor using that pageset. So the
 735 * loop over all zones will access a series of cachelines local to
 736 * the processor.
 737 *
 738 * The call to zone_page_state_add updates the cachelines with the
 739 * statistics in the remote zone struct as well as the global cachelines
 740 * with the global counters. These could cause remote node cache line
 741 * bouncing and will have to be only done when necessary.
 742 *
 743 * The function returns the number of global counters updated.
 744 */
 745static int refresh_cpu_vm_stats(bool do_pagesets)
 746{
 747	struct pglist_data *pgdat;
 748	struct zone *zone;
 749	int i;
 750	int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
 751#ifdef CONFIG_NUMA
 752	int global_numa_diff[NR_VM_NUMA_STAT_ITEMS] = { 0, };
 753#endif
 754	int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
 755	int changes = 0;
 756
 757	for_each_populated_zone(zone) {
 758		struct per_cpu_pageset __percpu *p = zone->pageset;
 759
 760		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
 761			int v;
 762
 763			v = this_cpu_xchg(p->vm_stat_diff[i], 0);
 764			if (v) {
 765
 766				atomic_long_add(v, &zone->vm_stat[i]);
 767				global_zone_diff[i] += v;
 768#ifdef CONFIG_NUMA
 769				/* 3 seconds idle till flush */
 770				__this_cpu_write(p->expire, 3);
 771#endif
 772			}
 773		}
 774#ifdef CONFIG_NUMA
 775		for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) {
 776			int v;
 777
 778			v = this_cpu_xchg(p->vm_numa_stat_diff[i], 0);
 779			if (v) {
 780
 781				atomic_long_add(v, &zone->vm_numa_stat[i]);
 782				global_numa_diff[i] += v;
 783				__this_cpu_write(p->expire, 3);
 784			}
 785		}
 786
 787		if (do_pagesets) {
 788			cond_resched();
 789			/*
 790			 * Deal with draining the remote pageset of this
 791			 * processor
 792			 *
 793			 * Check if there are pages remaining in this pageset
 794			 * if not then there is nothing to expire.
 795			 */
 796			if (!__this_cpu_read(p->expire) ||
 797			       !__this_cpu_read(p->pcp.count))
 798				continue;
 799
 800			/*
 801			 * We never drain zones local to this processor.
 802			 */
 803			if (zone_to_nid(zone) == numa_node_id()) {
 804				__this_cpu_write(p->expire, 0);
 805				continue;
 806			}
 807
 808			if (__this_cpu_dec_return(p->expire))
 809				continue;
 810
 811			if (__this_cpu_read(p->pcp.count)) {
 812				drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
 813				changes++;
 814			}
 815		}
 816#endif
 817	}
 818
 819	for_each_online_pgdat(pgdat) {
 820		struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;
 821
 822		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
 823			int v;
 824
 825			v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
 826			if (v) {
 827				atomic_long_add(v, &pgdat->vm_stat[i]);
 828				global_node_diff[i] += v;
 829			}
 830		}
 831	}
 832
 833#ifdef CONFIG_NUMA
 834	changes += fold_diff(global_zone_diff, global_numa_diff,
 835			     global_node_diff);
 836#else
 837	changes += fold_diff(global_zone_diff, global_node_diff);
 838#endif
 839	return changes;
 840}
 841
 842/*
 843 * Fold the data for an offline cpu into the global array.
 844 * There cannot be any access by the offline cpu and therefore
 845 * synchronization is simplified.
 846 */
 847void cpu_vm_stats_fold(int cpu)
 848{
 849	struct pglist_data *pgdat;
 850	struct zone *zone;
 851	int i;
 852	int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
 853#ifdef CONFIG_NUMA
 854	int global_numa_diff[NR_VM_NUMA_STAT_ITEMS] = { 0, };
 855#endif
 856	int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
 857
 858	for_each_populated_zone(zone) {
 859		struct per_cpu_pageset *p;
 860
 861		p = per_cpu_ptr(zone->pageset, cpu);
 862
 863		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 864			if (p->vm_stat_diff[i]) {
 865				int v;
 866
 867				v = p->vm_stat_diff[i];
 868				p->vm_stat_diff[i] = 0;
 869				atomic_long_add(v, &zone->vm_stat[i]);
 870				global_zone_diff[i] += v;
 871			}
 872
 873#ifdef CONFIG_NUMA
 874		for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
 875			if (p->vm_numa_stat_diff[i]) {
 876				int v;
 877
 878				v = p->vm_numa_stat_diff[i];
 879				p->vm_numa_stat_diff[i] = 0;
 880				atomic_long_add(v, &zone->vm_numa_stat[i]);
 881				global_numa_diff[i] += v;
 882			}
 883#endif
 884	}
 885
 886	for_each_online_pgdat(pgdat) {
 887		struct per_cpu_nodestat *p;
 888
 889		p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
 890
 891		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
 892			if (p->vm_node_stat_diff[i]) {
 893				int v;
 894
 895				v = p->vm_node_stat_diff[i];
 896				p->vm_node_stat_diff[i] = 0;
 897				atomic_long_add(v, &pgdat->vm_stat[i]);
 898				global_node_diff[i] += v;
 899			}
 900	}
 901
 902#ifdef CONFIG_NUMA
 903	fold_diff(global_zone_diff, global_numa_diff, global_node_diff);
 904#else
 905	fold_diff(global_zone_diff, global_node_diff);
 906#endif
 907}
 908
 909/*
 910 * this is only called if !populated_zone(zone), which implies no other users of
 911 * pset->vm_stat_diff[] exsist.
 912 */
 913void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
 914{
 915	int i;
 916
 917	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 918		if (pset->vm_stat_diff[i]) {
 919			int v = pset->vm_stat_diff[i];
 920			pset->vm_stat_diff[i] = 0;
 921			atomic_long_add(v, &zone->vm_stat[i]);
 922			atomic_long_add(v, &vm_zone_stat[i]);
 923		}
 924
 925#ifdef CONFIG_NUMA
 926	for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
 927		if (pset->vm_numa_stat_diff[i]) {
 928			int v = pset->vm_numa_stat_diff[i];
 929
 930			pset->vm_numa_stat_diff[i] = 0;
 931			atomic_long_add(v, &zone->vm_numa_stat[i]);
 932			atomic_long_add(v, &vm_numa_stat[i]);
 933		}
 934#endif
 935}
 936#endif
 937
 938#ifdef CONFIG_NUMA
 939void __inc_numa_state(struct zone *zone,
 940				 enum numa_stat_item item)
 941{
 942	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 943	u16 __percpu *p = pcp->vm_numa_stat_diff + item;
 944	u16 v;
 945
 946	v = __this_cpu_inc_return(*p);
 947
 948	if (unlikely(v > NUMA_STATS_THRESHOLD)) {
 949		zone_numa_state_add(v, zone, item);
 950		__this_cpu_write(*p, 0);
 951	}
 952}
 953
 954/*
 955 * Determine the per node value of a stat item. This function
 956 * is called frequently in a NUMA machine, so try to be as
 957 * frugal as possible.
 958 */
 959unsigned long sum_zone_node_page_state(int node,
 960				 enum zone_stat_item item)
 961{
 962	struct zone *zones = NODE_DATA(node)->node_zones;
 963	int i;
 964	unsigned long count = 0;
 965
 966	for (i = 0; i < MAX_NR_ZONES; i++)
 967		count += zone_page_state(zones + i, item);
 968
 969	return count;
 970}
 971
 972/*
 973 * Determine the per node value of a numa stat item. To avoid deviation,
 974 * the per cpu stat number in vm_numa_stat_diff[] is also included.
 975 */
 976unsigned long sum_zone_numa_state(int node,
 977				 enum numa_stat_item item)
 978{
 979	struct zone *zones = NODE_DATA(node)->node_zones;
 980	int i;
 981	unsigned long count = 0;
 982
 983	for (i = 0; i < MAX_NR_ZONES; i++)
 984		count += zone_numa_state_snapshot(zones + i, item);
 985
 986	return count;
 987}
 988
 989/*
 990 * Determine the per node value of a stat item.
 991 */
 992unsigned long node_page_state(struct pglist_data *pgdat,
 993				enum node_stat_item item)
 994{
 995	long x = atomic_long_read(&pgdat->vm_stat[item]);
 996#ifdef CONFIG_SMP
 997	if (x < 0)
 998		x = 0;
 999#endif
1000	return x;
1001}
1002#endif
1003
1004#ifdef CONFIG_COMPACTION
1005
1006struct contig_page_info {
1007	unsigned long free_pages;
1008	unsigned long free_blocks_total;
1009	unsigned long free_blocks_suitable;
1010};
1011
1012/*
1013 * Calculate the number of free pages in a zone, how many contiguous
1014 * pages are free and how many are large enough to satisfy an allocation of
1015 * the target size. Note that this function makes no attempt to estimate
1016 * how many suitable free blocks there *might* be if MOVABLE pages were
1017 * migrated. Calculating that is possible, but expensive and can be
1018 * figured out from userspace
1019 */
1020static void fill_contig_page_info(struct zone *zone,
1021				unsigned int suitable_order,
1022				struct contig_page_info *info)
1023{
1024	unsigned int order;
1025
1026	info->free_pages = 0;
1027	info->free_blocks_total = 0;
1028	info->free_blocks_suitable = 0;
1029
1030	for (order = 0; order < MAX_ORDER; order++) {
1031		unsigned long blocks;
1032
1033		/* Count number of free blocks */
1034		blocks = zone->free_area[order].nr_free;
1035		info->free_blocks_total += blocks;
1036
1037		/* Count free base pages */
1038		info->free_pages += blocks << order;
1039
1040		/* Count the suitable free blocks */
1041		if (order >= suitable_order)
1042			info->free_blocks_suitable += blocks <<
1043						(order - suitable_order);
1044	}
1045}
1046
1047/*
1048 * A fragmentation index only makes sense if an allocation of a requested
1049 * size would fail. If that is true, the fragmentation index indicates
1050 * whether external fragmentation or a lack of memory was the problem.
1051 * The value can be used to determine if page reclaim or compaction
1052 * should be used
1053 */
1054static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
1055{
1056	unsigned long requested = 1UL << order;
1057
1058	if (WARN_ON_ONCE(order >= MAX_ORDER))
1059		return 0;
1060
1061	if (!info->free_blocks_total)
1062		return 0;
1063
1064	/* Fragmentation index only makes sense when a request would fail */
1065	if (info->free_blocks_suitable)
1066		return -1000;
1067
1068	/*
1069	 * Index is between 0 and 1 so return within 3 decimal places
1070	 *
1071	 * 0 => allocation would fail due to lack of memory
1072	 * 1 => allocation would fail due to fragmentation
1073	 */
1074	return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
1075}
1076
1077/* Same as __fragmentation index but allocs contig_page_info on stack */
1078int fragmentation_index(struct zone *zone, unsigned int order)
1079{
1080	struct contig_page_info info;
1081
1082	fill_contig_page_info(zone, order, &info);
1083	return __fragmentation_index(order, &info);
1084}
1085#endif
1086
1087#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
1088#ifdef CONFIG_ZONE_DMA
1089#define TEXT_FOR_DMA(xx) xx "_dma",
1090#else
1091#define TEXT_FOR_DMA(xx)
1092#endif
1093
1094#ifdef CONFIG_ZONE_DMA32
1095#define TEXT_FOR_DMA32(xx) xx "_dma32",
1096#else
1097#define TEXT_FOR_DMA32(xx)
1098#endif
1099
1100#ifdef CONFIG_HIGHMEM
1101#define TEXT_FOR_HIGHMEM(xx) xx "_high",
1102#else
1103#define TEXT_FOR_HIGHMEM(xx)
1104#endif
1105
1106#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
1107					TEXT_FOR_HIGHMEM(xx) xx "_movable",
1108
1109const char * const vmstat_text[] = {
1110	/* enum zone_stat_item countes */
1111	"nr_free_pages",
1112	"nr_zone_inactive_anon",
1113	"nr_zone_active_anon",
1114	"nr_zone_inactive_file",
1115	"nr_zone_active_file",
1116	"nr_zone_unevictable",
1117	"nr_zone_write_pending",
1118	"nr_mlock",
1119	"nr_page_table_pages",
1120	"nr_kernel_stack",
1121	"nr_bounce",
1122#if IS_ENABLED(CONFIG_ZSMALLOC)
1123	"nr_zspages",
1124#endif
1125	"nr_free_cma",
1126
1127	/* enum numa_stat_item counters */
1128#ifdef CONFIG_NUMA
1129	"numa_hit",
1130	"numa_miss",
1131	"numa_foreign",
1132	"numa_interleave",
1133	"numa_local",
1134	"numa_other",
1135#endif
1136
1137	/* Node-based counters */
1138	"nr_inactive_anon",
1139	"nr_active_anon",
1140	"nr_inactive_file",
1141	"nr_active_file",
1142	"nr_unevictable",
1143	"nr_slab_reclaimable",
1144	"nr_slab_unreclaimable",
1145	"nr_isolated_anon",
1146	"nr_isolated_file",
1147	"workingset_nodes",
1148	"workingset_refault",
1149	"workingset_activate",
1150	"workingset_restore",
1151	"workingset_nodereclaim",
1152	"nr_anon_pages",
1153	"nr_mapped",
1154	"nr_file_pages",
1155	"nr_dirty",
1156	"nr_writeback",
1157	"nr_writeback_temp",
1158	"nr_shmem",
1159	"nr_shmem_hugepages",
1160	"nr_shmem_pmdmapped",
1161	"nr_file_hugepages",
1162	"nr_file_pmdmapped",
1163	"nr_anon_transparent_hugepages",
1164	"nr_unstable",
1165	"nr_vmscan_write",
1166	"nr_vmscan_immediate_reclaim",
1167	"nr_dirtied",
1168	"nr_written",
1169	"nr_kernel_misc_reclaimable",
1170
1171	/* enum writeback_stat_item counters */
1172	"nr_dirty_threshold",
1173	"nr_dirty_background_threshold",
1174
1175#ifdef CONFIG_VM_EVENT_COUNTERS
1176	/* enum vm_event_item counters */
1177	"pgpgin",
1178	"pgpgout",
1179	"pswpin",
1180	"pswpout",
1181
1182	TEXTS_FOR_ZONES("pgalloc")
1183	TEXTS_FOR_ZONES("allocstall")
1184	TEXTS_FOR_ZONES("pgskip")
1185
1186	"pgfree",
1187	"pgactivate",
1188	"pgdeactivate",
1189	"pglazyfree",
1190
1191	"pgfault",
1192	"pgmajfault",
1193	"pglazyfreed",
1194
1195	"pgrefill",
1196	"pgsteal_kswapd",
1197	"pgsteal_direct",
1198	"pgscan_kswapd",
1199	"pgscan_direct",
1200	"pgscan_direct_throttle",
1201
1202#ifdef CONFIG_NUMA
1203	"zone_reclaim_failed",
1204#endif
1205	"pginodesteal",
1206	"slabs_scanned",
1207	"kswapd_inodesteal",
1208	"kswapd_low_wmark_hit_quickly",
1209	"kswapd_high_wmark_hit_quickly",
1210	"pageoutrun",
1211
1212	"pgrotated",
1213
1214	"drop_pagecache",
1215	"drop_slab",
1216	"oom_kill",
1217
1218#ifdef CONFIG_NUMA_BALANCING
1219	"numa_pte_updates",
1220	"numa_huge_pte_updates",
1221	"numa_hint_faults",
1222	"numa_hint_faults_local",
1223	"numa_pages_migrated",
1224#endif
1225#ifdef CONFIG_MIGRATION
1226	"pgmigrate_success",
1227	"pgmigrate_fail",
1228#endif
1229#ifdef CONFIG_COMPACTION
1230	"compact_migrate_scanned",
1231	"compact_free_scanned",
1232	"compact_isolated",
1233	"compact_stall",
1234	"compact_fail",
1235	"compact_success",
1236	"compact_daemon_wake",
1237	"compact_daemon_migrate_scanned",
1238	"compact_daemon_free_scanned",
1239#endif
1240
1241#ifdef CONFIG_HUGETLB_PAGE
1242	"htlb_buddy_alloc_success",
1243	"htlb_buddy_alloc_fail",
1244#endif
1245	"unevictable_pgs_culled",
1246	"unevictable_pgs_scanned",
1247	"unevictable_pgs_rescued",
1248	"unevictable_pgs_mlocked",
1249	"unevictable_pgs_munlocked",
1250	"unevictable_pgs_cleared",
1251	"unevictable_pgs_stranded",
1252
1253#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1254	"thp_fault_alloc",
1255	"thp_fault_fallback",
1256	"thp_collapse_alloc",
1257	"thp_collapse_alloc_failed",
1258	"thp_file_alloc",
1259	"thp_file_mapped",
1260	"thp_split_page",
1261	"thp_split_page_failed",
1262	"thp_deferred_split_page",
1263	"thp_split_pmd",
1264#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1265	"thp_split_pud",
1266#endif
1267	"thp_zero_page_alloc",
1268	"thp_zero_page_alloc_failed",
1269	"thp_swpout",
1270	"thp_swpout_fallback",
1271#endif
1272#ifdef CONFIG_MEMORY_BALLOON
1273	"balloon_inflate",
1274	"balloon_deflate",
1275#ifdef CONFIG_BALLOON_COMPACTION
1276	"balloon_migrate",
1277#endif
1278#endif /* CONFIG_MEMORY_BALLOON */
1279#ifdef CONFIG_DEBUG_TLBFLUSH
 
1280	"nr_tlb_remote_flush",
1281	"nr_tlb_remote_flush_received",
 
1282	"nr_tlb_local_flush_all",
1283	"nr_tlb_local_flush_one",
1284#endif /* CONFIG_DEBUG_TLBFLUSH */
1285
1286#ifdef CONFIG_DEBUG_VM_VMACACHE
1287	"vmacache_find_calls",
1288	"vmacache_find_hits",
 
1289#endif
1290#ifdef CONFIG_SWAP
1291	"swap_ra",
1292	"swap_ra_hit",
1293#endif
1294#endif /* CONFIG_VM_EVENTS_COUNTERS */
1295};
1296#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
1297
1298#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
1299     defined(CONFIG_PROC_FS)
1300static void *frag_start(struct seq_file *m, loff_t *pos)
1301{
1302	pg_data_t *pgdat;
1303	loff_t node = *pos;
1304
1305	for (pgdat = first_online_pgdat();
1306	     pgdat && node;
1307	     pgdat = next_online_pgdat(pgdat))
1308		--node;
1309
1310	return pgdat;
1311}
1312
1313static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
1314{
1315	pg_data_t *pgdat = (pg_data_t *)arg;
1316
1317	(*pos)++;
1318	return next_online_pgdat(pgdat);
1319}
1320
1321static void frag_stop(struct seq_file *m, void *arg)
1322{
1323}
1324
1325/*
1326 * Walk zones in a node and print using a callback.
1327 * If @assert_populated is true, only use callback for zones that are populated.
1328 */
1329static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
1330		bool assert_populated, bool nolock,
1331		void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
1332{
1333	struct zone *zone;
1334	struct zone *node_zones = pgdat->node_zones;
1335	unsigned long flags;
1336
1337	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
1338		if (assert_populated && !populated_zone(zone))
1339			continue;
1340
1341		if (!nolock)
1342			spin_lock_irqsave(&zone->lock, flags);
1343		print(m, pgdat, zone);
1344		if (!nolock)
1345			spin_unlock_irqrestore(&zone->lock, flags);
1346	}
1347}
1348#endif
1349
1350#ifdef CONFIG_PROC_FS
1351static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
1352						struct zone *zone)
1353{
1354	int order;
1355
1356	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1357	for (order = 0; order < MAX_ORDER; ++order)
1358		seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
1359	seq_putc(m, '\n');
1360}
1361
1362/*
1363 * This walks the free areas for each zone.
1364 */
1365static int frag_show(struct seq_file *m, void *arg)
1366{
1367	pg_data_t *pgdat = (pg_data_t *)arg;
1368	walk_zones_in_node(m, pgdat, true, false, frag_show_print);
1369	return 0;
1370}
1371
1372static void pagetypeinfo_showfree_print(struct seq_file *m,
1373					pg_data_t *pgdat, struct zone *zone)
1374{
1375	int order, mtype;
1376
1377	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
1378		seq_printf(m, "Node %4d, zone %8s, type %12s ",
1379					pgdat->node_id,
1380					zone->name,
1381					migratetype_names[mtype]);
1382		for (order = 0; order < MAX_ORDER; ++order) {
1383			unsigned long freecount = 0;
1384			struct free_area *area;
1385			struct list_head *curr;
1386			bool overflow = false;
1387
1388			area = &(zone->free_area[order]);
1389
1390			list_for_each(curr, &area->free_list[mtype]) {
1391				/*
1392				 * Cap the free_list iteration because it might
1393				 * be really large and we are under a spinlock
1394				 * so a long time spent here could trigger a
1395				 * hard lockup detector. Anyway this is a
1396				 * debugging tool so knowing there is a handful
1397				 * of pages of this order should be more than
1398				 * sufficient.
1399				 */
1400				if (++freecount >= 100000) {
1401					overflow = true;
1402					break;
1403				}
1404			}
1405			seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount);
1406			spin_unlock_irq(&zone->lock);
1407			cond_resched();
1408			spin_lock_irq(&zone->lock);
1409		}
1410		seq_putc(m, '\n');
1411	}
1412}
1413
1414/* Print out the free pages at each order for each migatetype */
1415static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
1416{
1417	int order;
1418	pg_data_t *pgdat = (pg_data_t *)arg;
1419
1420	/* Print header */
1421	seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
1422	for (order = 0; order < MAX_ORDER; ++order)
1423		seq_printf(m, "%6d ", order);
1424	seq_putc(m, '\n');
1425
1426	walk_zones_in_node(m, pgdat, true, false, pagetypeinfo_showfree_print);
1427
1428	return 0;
1429}
1430
1431static void pagetypeinfo_showblockcount_print(struct seq_file *m,
1432					pg_data_t *pgdat, struct zone *zone)
1433{
1434	int mtype;
1435	unsigned long pfn;
1436	unsigned long start_pfn = zone->zone_start_pfn;
1437	unsigned long end_pfn = zone_end_pfn(zone);
1438	unsigned long count[MIGRATE_TYPES] = { 0, };
1439
1440	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
1441		struct page *page;
1442
1443		page = pfn_to_online_page(pfn);
1444		if (!page)
1445			continue;
1446
1447		/* Watch for unexpected holes punched in the memmap */
1448		if (!memmap_valid_within(pfn, page, zone))
1449			continue;
1450
1451		if (page_zone(page) != zone)
1452			continue;
1453
1454		mtype = get_pageblock_migratetype(page);
1455
1456		if (mtype < MIGRATE_TYPES)
1457			count[mtype]++;
1458	}
1459
1460	/* Print counts */
1461	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1462	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1463		seq_printf(m, "%12lu ", count[mtype]);
1464	seq_putc(m, '\n');
1465}
1466
1467/* Print out the number of pageblocks for each migratetype */
1468static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1469{
1470	int mtype;
1471	pg_data_t *pgdat = (pg_data_t *)arg;
1472
1473	seq_printf(m, "\n%-23s", "Number of blocks type ");
1474	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1475		seq_printf(m, "%12s ", migratetype_names[mtype]);
1476	seq_putc(m, '\n');
1477	walk_zones_in_node(m, pgdat, true, false,
1478		pagetypeinfo_showblockcount_print);
1479
1480	return 0;
1481}
1482
1483/*
1484 * Print out the number of pageblocks for each migratetype that contain pages
1485 * of other types. This gives an indication of how well fallbacks are being
1486 * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1487 * to determine what is going on
1488 */
1489static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1490{
1491#ifdef CONFIG_PAGE_OWNER
1492	int mtype;
1493
1494	if (!static_branch_unlikely(&page_owner_inited))
1495		return;
1496
1497	drain_all_pages(NULL);
1498
1499	seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1500	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1501		seq_printf(m, "%12s ", migratetype_names[mtype]);
1502	seq_putc(m, '\n');
1503
1504	walk_zones_in_node(m, pgdat, true, true,
1505		pagetypeinfo_showmixedcount_print);
1506#endif /* CONFIG_PAGE_OWNER */
1507}
1508
1509/*
1510 * This prints out statistics in relation to grouping pages by mobility.
1511 * It is expensive to collect so do not constantly read the file.
1512 */
1513static int pagetypeinfo_show(struct seq_file *m, void *arg)
1514{
1515	pg_data_t *pgdat = (pg_data_t *)arg;
1516
1517	/* check memoryless node */
1518	if (!node_state(pgdat->node_id, N_MEMORY))
1519		return 0;
1520
1521	seq_printf(m, "Page block order: %d\n", pageblock_order);
1522	seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
1523	seq_putc(m, '\n');
1524	pagetypeinfo_showfree(m, pgdat);
1525	pagetypeinfo_showblockcount(m, pgdat);
1526	pagetypeinfo_showmixedcount(m, pgdat);
1527
1528	return 0;
1529}
1530
1531static const struct seq_operations fragmentation_op = {
1532	.start	= frag_start,
1533	.next	= frag_next,
1534	.stop	= frag_stop,
1535	.show	= frag_show,
1536};
1537
 
 
 
 
 
 
 
 
 
 
 
 
1538static const struct seq_operations pagetypeinfo_op = {
1539	.start	= frag_start,
1540	.next	= frag_next,
1541	.stop	= frag_stop,
1542	.show	= pagetypeinfo_show,
1543};
1544
 
 
 
 
 
 
 
 
 
 
 
 
1545static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
1546{
1547	int zid;
1548
1549	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1550		struct zone *compare = &pgdat->node_zones[zid];
1551
1552		if (populated_zone(compare))
1553			return zone == compare;
1554	}
1555
1556	return false;
1557}
1558
1559static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1560							struct zone *zone)
1561{
1562	int i;
1563	seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1564	if (is_zone_first_populated(pgdat, zone)) {
1565		seq_printf(m, "\n  per-node stats");
1566		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1567			seq_printf(m, "\n      %-12s %lu",
1568				vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
1569				NR_VM_NUMA_STAT_ITEMS],
1570				node_page_state(pgdat, i));
1571		}
1572	}
1573	seq_printf(m,
1574		   "\n  pages free     %lu"
1575		   "\n        min      %lu"
1576		   "\n        low      %lu"
1577		   "\n        high     %lu"
1578		   "\n        spanned  %lu"
1579		   "\n        present  %lu"
1580		   "\n        managed  %lu",
1581		   zone_page_state(zone, NR_FREE_PAGES),
1582		   min_wmark_pages(zone),
1583		   low_wmark_pages(zone),
1584		   high_wmark_pages(zone),
1585		   zone->spanned_pages,
1586		   zone->present_pages,
1587		   zone_managed_pages(zone));
1588
1589	seq_printf(m,
1590		   "\n        protection: (%ld",
1591		   zone->lowmem_reserve[0]);
1592	for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1593		seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1594	seq_putc(m, ')');
1595
1596	/* If unpopulated, no other information is useful */
1597	if (!populated_zone(zone)) {
1598		seq_putc(m, '\n');
1599		return;
1600	}
1601
1602	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1603		seq_printf(m, "\n      %-12s %lu", vmstat_text[i],
1604				zone_page_state(zone, i));
1605
1606#ifdef CONFIG_NUMA
1607	for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
1608		seq_printf(m, "\n      %-12s %lu",
1609				vmstat_text[i + NR_VM_ZONE_STAT_ITEMS],
1610				zone_numa_state_snapshot(zone, i));
1611#endif
1612
1613	seq_printf(m, "\n  pagesets");
1614	for_each_online_cpu(i) {
1615		struct per_cpu_pageset *pageset;
1616
1617		pageset = per_cpu_ptr(zone->pageset, i);
1618		seq_printf(m,
1619			   "\n    cpu: %i"
1620			   "\n              count: %i"
1621			   "\n              high:  %i"
1622			   "\n              batch: %i",
1623			   i,
1624			   pageset->pcp.count,
1625			   pageset->pcp.high,
1626			   pageset->pcp.batch);
1627#ifdef CONFIG_SMP
1628		seq_printf(m, "\n  vm stats threshold: %d",
1629				pageset->stat_threshold);
1630#endif
1631	}
1632	seq_printf(m,
1633		   "\n  node_unreclaimable:  %u"
1634		   "\n  start_pfn:           %lu",
1635		   pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES,
1636		   zone->zone_start_pfn);
1637	seq_putc(m, '\n');
1638}
1639
1640/*
1641 * Output information about zones in @pgdat.  All zones are printed regardless
1642 * of whether they are populated or not: lowmem_reserve_ratio operates on the
1643 * set of all zones and userspace would not be aware of such zones if they are
1644 * suppressed here (zoneinfo displays the effect of lowmem_reserve_ratio).
1645 */
1646static int zoneinfo_show(struct seq_file *m, void *arg)
1647{
1648	pg_data_t *pgdat = (pg_data_t *)arg;
1649	walk_zones_in_node(m, pgdat, false, false, zoneinfo_show_print);
1650	return 0;
1651}
1652
1653static const struct seq_operations zoneinfo_op = {
1654	.start	= frag_start, /* iterate over all zones. The same as in
1655			       * fragmentation. */
1656	.next	= frag_next,
1657	.stop	= frag_stop,
1658	.show	= zoneinfo_show,
1659};
1660
 
 
 
 
 
 
 
 
 
 
 
 
1661enum writeback_stat_item {
1662	NR_DIRTY_THRESHOLD,
1663	NR_DIRTY_BG_THRESHOLD,
1664	NR_VM_WRITEBACK_STAT_ITEMS,
1665};
1666
1667static void *vmstat_start(struct seq_file *m, loff_t *pos)
1668{
1669	unsigned long *v;
1670	int i, stat_items_size;
1671
1672	if (*pos >= ARRAY_SIZE(vmstat_text))
1673		return NULL;
1674	stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
1675			  NR_VM_NUMA_STAT_ITEMS * sizeof(unsigned long) +
1676			  NR_VM_NODE_STAT_ITEMS * sizeof(unsigned long) +
1677			  NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
1678
1679#ifdef CONFIG_VM_EVENT_COUNTERS
1680	stat_items_size += sizeof(struct vm_event_state);
1681#endif
1682
1683	BUILD_BUG_ON(stat_items_size !=
1684		     ARRAY_SIZE(vmstat_text) * sizeof(unsigned long));
1685	v = kmalloc(stat_items_size, GFP_KERNEL);
1686	m->private = v;
1687	if (!v)
1688		return ERR_PTR(-ENOMEM);
1689	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1690		v[i] = global_zone_page_state(i);
1691	v += NR_VM_ZONE_STAT_ITEMS;
1692
1693#ifdef CONFIG_NUMA
1694	for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
1695		v[i] = global_numa_state(i);
1696	v += NR_VM_NUMA_STAT_ITEMS;
1697#endif
1698
1699	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
1700		v[i] = global_node_page_state(i);
1701	v += NR_VM_NODE_STAT_ITEMS;
1702
1703	global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1704			    v + NR_DIRTY_THRESHOLD);
1705	v += NR_VM_WRITEBACK_STAT_ITEMS;
1706
1707#ifdef CONFIG_VM_EVENT_COUNTERS
1708	all_vm_events(v);
1709	v[PGPGIN] /= 2;		/* sectors -> kbytes */
1710	v[PGPGOUT] /= 2;
1711#endif
1712	return (unsigned long *)m->private + *pos;
1713}
1714
1715static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1716{
1717	(*pos)++;
1718	if (*pos >= ARRAY_SIZE(vmstat_text))
1719		return NULL;
1720	return (unsigned long *)m->private + *pos;
1721}
1722
1723static int vmstat_show(struct seq_file *m, void *arg)
1724{
1725	unsigned long *l = arg;
1726	unsigned long off = l - (unsigned long *)m->private;
1727
 
 
 
 
1728	seq_puts(m, vmstat_text[off]);
1729	seq_put_decimal_ull(m, " ", *l);
1730	seq_putc(m, '\n');
1731	return 0;
1732}
1733
1734static void vmstat_stop(struct seq_file *m, void *arg)
1735{
1736	kfree(m->private);
1737	m->private = NULL;
1738}
1739
1740static const struct seq_operations vmstat_op = {
1741	.start	= vmstat_start,
1742	.next	= vmstat_next,
1743	.stop	= vmstat_stop,
1744	.show	= vmstat_show,
1745};
 
 
 
 
 
 
 
 
 
 
 
 
1746#endif /* CONFIG_PROC_FS */
1747
1748#ifdef CONFIG_SMP
1749static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1750int sysctl_stat_interval __read_mostly = HZ;
1751
1752#ifdef CONFIG_PROC_FS
1753static void refresh_vm_stats(struct work_struct *work)
1754{
1755	refresh_cpu_vm_stats(true);
1756}
1757
1758int vmstat_refresh(struct ctl_table *table, int write,
1759		   void __user *buffer, size_t *lenp, loff_t *ppos)
1760{
1761	long val;
1762	int err;
1763	int i;
1764
1765	/*
1766	 * The regular update, every sysctl_stat_interval, may come later
1767	 * than expected: leaving a significant amount in per_cpu buckets.
1768	 * This is particularly misleading when checking a quantity of HUGE
1769	 * pages, immediately after running a test.  /proc/sys/vm/stat_refresh,
1770	 * which can equally be echo'ed to or cat'ted from (by root),
1771	 * can be used to update the stats just before reading them.
1772	 *
1773	 * Oh, and since global_zone_page_state() etc. are so careful to hide
1774	 * transiently negative values, report an error here if any of
1775	 * the stats is negative, so we know to go looking for imbalance.
1776	 */
1777	err = schedule_on_each_cpu(refresh_vm_stats);
1778	if (err)
1779		return err;
1780	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
1781		val = atomic_long_read(&vm_zone_stat[i]);
1782		if (val < 0) {
1783			pr_warn("%s: %s %ld\n",
1784				__func__, vmstat_text[i], val);
1785			err = -EINVAL;
1786		}
1787	}
1788#ifdef CONFIG_NUMA
1789	for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) {
1790		val = atomic_long_read(&vm_numa_stat[i]);
1791		if (val < 0) {
1792			pr_warn("%s: %s %ld\n",
1793				__func__, vmstat_text[i + NR_VM_ZONE_STAT_ITEMS], val);
1794			err = -EINVAL;
1795		}
1796	}
1797#endif
1798	if (err)
1799		return err;
1800	if (write)
1801		*ppos += *lenp;
1802	else
1803		*lenp = 0;
1804	return 0;
1805}
1806#endif /* CONFIG_PROC_FS */
1807
1808static void vmstat_update(struct work_struct *w)
1809{
1810	if (refresh_cpu_vm_stats(true)) {
1811		/*
1812		 * Counters were updated so we expect more updates
1813		 * to occur in the future. Keep on running the
1814		 * update worker thread.
1815		 */
 
1816		queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
1817				this_cpu_ptr(&vmstat_work),
1818				round_jiffies_relative(sysctl_stat_interval));
 
1819	}
1820}
1821
1822/*
1823 * Switch off vmstat processing and then fold all the remaining differentials
1824 * until the diffs stay at zero. The function is used by NOHZ and can only be
1825 * invoked when tick processing is not active.
1826 */
1827/*
1828 * Check if the diffs for a certain cpu indicate that
1829 * an update is needed.
1830 */
1831static bool need_update(int cpu)
1832{
1833	struct zone *zone;
1834
1835	for_each_populated_zone(zone) {
1836		struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
1837
1838		BUILD_BUG_ON(sizeof(p->vm_stat_diff[0]) != 1);
1839#ifdef CONFIG_NUMA
1840		BUILD_BUG_ON(sizeof(p->vm_numa_stat_diff[0]) != 2);
1841#endif
1842
1843		/*
1844		 * The fast way of checking if there are any vmstat diffs.
 
1845		 */
1846		if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS *
1847			       sizeof(p->vm_stat_diff[0])))
1848			return true;
1849#ifdef CONFIG_NUMA
1850		if (memchr_inv(p->vm_numa_stat_diff, 0, NR_VM_NUMA_STAT_ITEMS *
1851			       sizeof(p->vm_numa_stat_diff[0])))
1852			return true;
1853#endif
1854	}
1855	return false;
1856}
1857
1858/*
1859 * Switch off vmstat processing and then fold all the remaining differentials
1860 * until the diffs stay at zero. The function is used by NOHZ and can only be
1861 * invoked when tick processing is not active.
1862 */
1863void quiet_vmstat(void)
1864{
1865	if (system_state != SYSTEM_RUNNING)
1866		return;
1867
1868	if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
1869		return;
1870
1871	if (!need_update(smp_processor_id()))
1872		return;
1873
1874	/*
1875	 * Just refresh counters and do not care about the pending delayed
1876	 * vmstat_update. It doesn't fire that often to matter and canceling
1877	 * it would be too expensive from this path.
1878	 * vmstat_shepherd will take care about that for us.
1879	 */
1880	refresh_cpu_vm_stats(false);
1881}
1882
1883/*
1884 * Shepherd worker thread that checks the
1885 * differentials of processors that have their worker
1886 * threads for vm statistics updates disabled because of
1887 * inactivity.
1888 */
1889static void vmstat_shepherd(struct work_struct *w);
1890
1891static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
1892
1893static void vmstat_shepherd(struct work_struct *w)
1894{
1895	int cpu;
1896
1897	get_online_cpus();
1898	/* Check processors whose vmstat worker threads have been disabled */
1899	for_each_online_cpu(cpu) {
1900		struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
1901
1902		if (!delayed_work_pending(dw) && need_update(cpu))
1903			queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
1904	}
1905	put_online_cpus();
1906
1907	schedule_delayed_work(&shepherd,
1908		round_jiffies_relative(sysctl_stat_interval));
1909}
1910
1911static void __init start_shepherd_timer(void)
1912{
1913	int cpu;
1914
1915	for_each_possible_cpu(cpu)
1916		INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
1917			vmstat_update);
1918
1919	schedule_delayed_work(&shepherd,
1920		round_jiffies_relative(sysctl_stat_interval));
1921}
1922
1923static void __init init_cpu_node_state(void)
1924{
1925	int node;
1926
1927	for_each_online_node(node) {
1928		if (cpumask_weight(cpumask_of_node(node)) > 0)
1929			node_set_state(node, N_CPU);
1930	}
1931}
1932
1933static int vmstat_cpu_online(unsigned int cpu)
1934{
1935	refresh_zone_stat_thresholds();
1936	node_set_state(cpu_to_node(cpu), N_CPU);
1937	return 0;
1938}
1939
1940static int vmstat_cpu_down_prep(unsigned int cpu)
1941{
1942	cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
1943	return 0;
1944}
1945
1946static int vmstat_cpu_dead(unsigned int cpu)
1947{
1948	const struct cpumask *node_cpus;
1949	int node;
1950
1951	node = cpu_to_node(cpu);
1952
1953	refresh_zone_stat_thresholds();
1954	node_cpus = cpumask_of_node(node);
1955	if (cpumask_weight(node_cpus) > 0)
1956		return 0;
1957
1958	node_clear_state(node, N_CPU);
1959	return 0;
1960}
1961
1962#endif
1963
1964struct workqueue_struct *mm_percpu_wq;
1965
1966void __init init_mm_internals(void)
1967{
1968	int ret __maybe_unused;
1969
1970	mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0);
1971
1972#ifdef CONFIG_SMP
1973	ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
1974					NULL, vmstat_cpu_dead);
1975	if (ret < 0)
1976		pr_err("vmstat: failed to register 'dead' hotplug state\n");
1977
1978	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online",
1979					vmstat_cpu_online,
1980					vmstat_cpu_down_prep);
1981	if (ret < 0)
1982		pr_err("vmstat: failed to register 'online' hotplug state\n");
1983
1984	get_online_cpus();
1985	init_cpu_node_state();
1986	put_online_cpus();
1987
1988	start_shepherd_timer();
1989#endif
1990#ifdef CONFIG_PROC_FS
1991	proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op);
1992	proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op);
1993	proc_create_seq("vmstat", 0444, NULL, &vmstat_op);
1994	proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op);
1995#endif
1996}
1997
1998#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1999
2000/*
2001 * Return an index indicating how much of the available free memory is
2002 * unusable for an allocation of the requested size.
2003 */
2004static int unusable_free_index(unsigned int order,
2005				struct contig_page_info *info)
2006{
2007	/* No free memory is interpreted as all free memory is unusable */
2008	if (info->free_pages == 0)
2009		return 1000;
2010
2011	/*
2012	 * Index should be a value between 0 and 1. Return a value to 3
2013	 * decimal places.
2014	 *
2015	 * 0 => no fragmentation
2016	 * 1 => high fragmentation
2017	 */
2018	return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
2019
2020}
2021
2022static void unusable_show_print(struct seq_file *m,
2023					pg_data_t *pgdat, struct zone *zone)
2024{
2025	unsigned int order;
2026	int index;
2027	struct contig_page_info info;
2028
2029	seq_printf(m, "Node %d, zone %8s ",
2030				pgdat->node_id,
2031				zone->name);
2032	for (order = 0; order < MAX_ORDER; ++order) {
2033		fill_contig_page_info(zone, order, &info);
2034		index = unusable_free_index(order, &info);
2035		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2036	}
2037
2038	seq_putc(m, '\n');
2039}
2040
2041/*
2042 * Display unusable free space index
2043 *
2044 * The unusable free space index measures how much of the available free
2045 * memory cannot be used to satisfy an allocation of a given size and is a
2046 * value between 0 and 1. The higher the value, the more of free memory is
2047 * unusable and by implication, the worse the external fragmentation is. This
2048 * can be expressed as a percentage by multiplying by 100.
2049 */
2050static int unusable_show(struct seq_file *m, void *arg)
2051{
2052	pg_data_t *pgdat = (pg_data_t *)arg;
2053
2054	/* check memoryless node */
2055	if (!node_state(pgdat->node_id, N_MEMORY))
2056		return 0;
2057
2058	walk_zones_in_node(m, pgdat, true, false, unusable_show_print);
2059
2060	return 0;
2061}
2062
2063static const struct seq_operations unusable_op = {
2064	.start	= frag_start,
2065	.next	= frag_next,
2066	.stop	= frag_stop,
2067	.show	= unusable_show,
2068};
2069
2070static int unusable_open(struct inode *inode, struct file *file)
2071{
2072	return seq_open(file, &unusable_op);
2073}
2074
2075static const struct file_operations unusable_file_ops = {
2076	.open		= unusable_open,
2077	.read		= seq_read,
2078	.llseek		= seq_lseek,
2079	.release	= seq_release,
2080};
2081
2082static void extfrag_show_print(struct seq_file *m,
2083					pg_data_t *pgdat, struct zone *zone)
2084{
2085	unsigned int order;
2086	int index;
2087
2088	/* Alloc on stack as interrupts are disabled for zone walk */
2089	struct contig_page_info info;
2090
2091	seq_printf(m, "Node %d, zone %8s ",
2092				pgdat->node_id,
2093				zone->name);
2094	for (order = 0; order < MAX_ORDER; ++order) {
2095		fill_contig_page_info(zone, order, &info);
2096		index = __fragmentation_index(order, &info);
2097		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2098	}
2099
2100	seq_putc(m, '\n');
2101}
2102
2103/*
2104 * Display fragmentation index for orders that allocations would fail for
2105 */
2106static int extfrag_show(struct seq_file *m, void *arg)
2107{
2108	pg_data_t *pgdat = (pg_data_t *)arg;
2109
2110	walk_zones_in_node(m, pgdat, true, false, extfrag_show_print);
2111
2112	return 0;
2113}
2114
2115static const struct seq_operations extfrag_op = {
2116	.start	= frag_start,
2117	.next	= frag_next,
2118	.stop	= frag_stop,
2119	.show	= extfrag_show,
2120};
2121
2122static int extfrag_open(struct inode *inode, struct file *file)
2123{
2124	return seq_open(file, &extfrag_op);
2125}
2126
2127static const struct file_operations extfrag_file_ops = {
2128	.open		= extfrag_open,
2129	.read		= seq_read,
2130	.llseek		= seq_lseek,
2131	.release	= seq_release,
2132};
2133
2134static int __init extfrag_debug_init(void)
2135{
2136	struct dentry *extfrag_debug_root;
2137
2138	extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
 
 
2139
2140	debugfs_create_file("unusable_index", 0444, extfrag_debug_root, NULL,
2141			    &unusable_file_ops);
2142
2143	debugfs_create_file("extfrag_index", 0444, extfrag_debug_root, NULL,
2144			    &extfrag_file_ops);
 
 
2145
2146	return 0;
 
 
 
2147}
2148
2149module_init(extfrag_debug_init);
2150#endif