Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/mm/vmstat.c
   4 *
   5 *  Manages VM statistics
   6 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   7 *
   8 *  zoned VM statistics
   9 *  Copyright (C) 2006 Silicon Graphics, Inc.,
  10 *		Christoph Lameter <christoph@lameter.com>
  11 *  Copyright (C) 2008-2014 Christoph Lameter
  12 */
  13#include <linux/fs.h>
  14#include <linux/mm.h>
  15#include <linux/err.h>
  16#include <linux/module.h>
  17#include <linux/slab.h>
  18#include <linux/cpu.h>
  19#include <linux/cpumask.h>
  20#include <linux/vmstat.h>
  21#include <linux/proc_fs.h>
  22#include <linux/seq_file.h>
  23#include <linux/debugfs.h>
  24#include <linux/sched.h>
  25#include <linux/math64.h>
  26#include <linux/writeback.h>
  27#include <linux/compaction.h>
  28#include <linux/mm_inline.h>
  29#include <linux/page_ext.h>
  30#include <linux/page_owner.h>
  31
  32#include "internal.h"
  33
  34#ifdef CONFIG_NUMA
  35int sysctl_vm_numa_stat = ENABLE_NUMA_STAT;
  36
  37/* zero numa counters within a zone */
  38static void zero_zone_numa_counters(struct zone *zone)
  39{
  40	int item, cpu;
  41
  42	for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++) {
  43		atomic_long_set(&zone->vm_numa_event[item], 0);
  44		for_each_online_cpu(cpu) {
  45			per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_numa_event[item]
  46						= 0;
  47		}
  48	}
  49}
  50
  51/* zero numa counters of all the populated zones */
  52static void zero_zones_numa_counters(void)
  53{
  54	struct zone *zone;
  55
  56	for_each_populated_zone(zone)
  57		zero_zone_numa_counters(zone);
  58}
  59
  60/* zero global numa counters */
  61static void zero_global_numa_counters(void)
  62{
  63	int item;
  64
  65	for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
  66		atomic_long_set(&vm_numa_event[item], 0);
  67}
  68
  69static void invalid_numa_statistics(void)
  70{
  71	zero_zones_numa_counters();
  72	zero_global_numa_counters();
  73}
  74
  75static DEFINE_MUTEX(vm_numa_stat_lock);
  76
  77int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
  78		void *buffer, size_t *length, loff_t *ppos)
  79{
  80	int ret, oldval;
  81
  82	mutex_lock(&vm_numa_stat_lock);
  83	if (write)
  84		oldval = sysctl_vm_numa_stat;
  85	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
  86	if (ret || !write)
  87		goto out;
  88
  89	if (oldval == sysctl_vm_numa_stat)
  90		goto out;
  91	else if (sysctl_vm_numa_stat == ENABLE_NUMA_STAT) {
  92		static_branch_enable(&vm_numa_stat_key);
  93		pr_info("enable numa statistics\n");
  94	} else {
  95		static_branch_disable(&vm_numa_stat_key);
  96		invalid_numa_statistics();
  97		pr_info("disable numa statistics, and clear numa counters\n");
  98	}
  99
 100out:
 101	mutex_unlock(&vm_numa_stat_lock);
 102	return ret;
 103}
 104#endif
 105
 106#ifdef CONFIG_VM_EVENT_COUNTERS
 107DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
 108EXPORT_PER_CPU_SYMBOL(vm_event_states);
 109
 110static void sum_vm_events(unsigned long *ret)
 111{
 112	int cpu;
 113	int i;
 114
 115	memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
 116
 117	for_each_online_cpu(cpu) {
 118		struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
 119
 120		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
 121			ret[i] += this->event[i];
 122	}
 123}
 124
 125/*
 126 * Accumulate the vm event counters across all CPUs.
 127 * The result is unavoidably approximate - it can change
 128 * during and after execution of this function.
 129*/
 130void all_vm_events(unsigned long *ret)
 131{
 132	get_online_cpus();
 133	sum_vm_events(ret);
 134	put_online_cpus();
 135}
 136EXPORT_SYMBOL_GPL(all_vm_events);
 137
 138/*
 139 * Fold the foreign cpu events into our own.
 140 *
 141 * This is adding to the events on one processor
 142 * but keeps the global counts constant.
 143 */
 144void vm_events_fold_cpu(int cpu)
 145{
 146	struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
 147	int i;
 148
 149	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
 150		count_vm_events(i, fold_state->event[i]);
 151		fold_state->event[i] = 0;
 152	}
 153}
 154
 155#endif /* CONFIG_VM_EVENT_COUNTERS */
 156
 157/*
 158 * Manage combined zone based / global counters
 159 *
 160 * vm_stat contains the global counters
 161 */
 162atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
 163atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp;
 164atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS] __cacheline_aligned_in_smp;
 165EXPORT_SYMBOL(vm_zone_stat);
 166EXPORT_SYMBOL(vm_node_stat);
 167
 168#ifdef CONFIG_SMP
 169
 170int calculate_pressure_threshold(struct zone *zone)
 171{
 172	int threshold;
 173	int watermark_distance;
 174
 175	/*
 176	 * As vmstats are not up to date, there is drift between the estimated
 177	 * and real values. For high thresholds and a high number of CPUs, it
 178	 * is possible for the min watermark to be breached while the estimated
 179	 * value looks fine. The pressure threshold is a reduced value such
 180	 * that even the maximum amount of drift will not accidentally breach
 181	 * the min watermark
 182	 */
 183	watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
 184	threshold = max(1, (int)(watermark_distance / num_online_cpus()));
 185
 186	/*
 187	 * Maximum threshold is 125
 188	 */
 189	threshold = min(125, threshold);
 190
 191	return threshold;
 192}
 193
 194int calculate_normal_threshold(struct zone *zone)
 195{
 196	int threshold;
 197	int mem;	/* memory in 128 MB units */
 198
 199	/*
 200	 * The threshold scales with the number of processors and the amount
 201	 * of memory per zone. More memory means that we can defer updates for
 202	 * longer, more processors could lead to more contention.
 203 	 * fls() is used to have a cheap way of logarithmic scaling.
 204	 *
 205	 * Some sample thresholds:
 206	 *
 207	 * Threshold	Processors	(fls)	Zonesize	fls(mem+1)
 208	 * ------------------------------------------------------------------
 209	 * 8		1		1	0.9-1 GB	4
 210	 * 16		2		2	0.9-1 GB	4
 211	 * 20 		2		2	1-2 GB		5
 212	 * 24		2		2	2-4 GB		6
 213	 * 28		2		2	4-8 GB		7
 214	 * 32		2		2	8-16 GB		8
 215	 * 4		2		2	<128M		1
 216	 * 30		4		3	2-4 GB		5
 217	 * 48		4		3	8-16 GB		8
 218	 * 32		8		4	1-2 GB		4
 219	 * 32		8		4	0.9-1GB		4
 220	 * 10		16		5	<128M		1
 221	 * 40		16		5	900M		4
 222	 * 70		64		7	2-4 GB		5
 223	 * 84		64		7	4-8 GB		6
 224	 * 108		512		9	4-8 GB		6
 225	 * 125		1024		10	8-16 GB		8
 226	 * 125		1024		10	16-32 GB	9
 227	 */
 228
 229	mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT);
 230
 231	threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
 232
 233	/*
 234	 * Maximum threshold is 125
 235	 */
 236	threshold = min(125, threshold);
 237
 238	return threshold;
 239}
 240
 241/*
 242 * Refresh the thresholds for each zone.
 243 */
 244void refresh_zone_stat_thresholds(void)
 245{
 246	struct pglist_data *pgdat;
 247	struct zone *zone;
 248	int cpu;
 249	int threshold;
 250
 251	/* Zero current pgdat thresholds */
 252	for_each_online_pgdat(pgdat) {
 253		for_each_online_cpu(cpu) {
 254			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0;
 255		}
 256	}
 257
 258	for_each_populated_zone(zone) {
 259		struct pglist_data *pgdat = zone->zone_pgdat;
 260		unsigned long max_drift, tolerate_drift;
 261
 262		threshold = calculate_normal_threshold(zone);
 263
 264		for_each_online_cpu(cpu) {
 265			int pgdat_threshold;
 266
 267			per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
 268							= threshold;
 269
 270			/* Base nodestat threshold on the largest populated zone. */
 271			pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold;
 272			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold
 273				= max(threshold, pgdat_threshold);
 274		}
 275
 276		/*
 277		 * Only set percpu_drift_mark if there is a danger that
 278		 * NR_FREE_PAGES reports the low watermark is ok when in fact
 279		 * the min watermark could be breached by an allocation
 280		 */
 281		tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
 282		max_drift = num_online_cpus() * threshold;
 283		if (max_drift > tolerate_drift)
 284			zone->percpu_drift_mark = high_wmark_pages(zone) +
 285					max_drift;
 286	}
 287}
 288
 289void set_pgdat_percpu_threshold(pg_data_t *pgdat,
 290				int (*calculate_pressure)(struct zone *))
 291{
 292	struct zone *zone;
 293	int cpu;
 294	int threshold;
 295	int i;
 296
 297	for (i = 0; i < pgdat->nr_zones; i++) {
 298		zone = &pgdat->node_zones[i];
 299		if (!zone->percpu_drift_mark)
 300			continue;
 301
 302		threshold = (*calculate_pressure)(zone);
 303		for_each_online_cpu(cpu)
 304			per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
 305							= threshold;
 306	}
 307}
 308
 309/*
 310 * For use when we know that interrupts are disabled,
 311 * or when we know that preemption is disabled and that
 312 * particular counter cannot be updated from interrupt context.
 313 */
 314void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 315			   long delta)
 316{
 317	struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
 318	s8 __percpu *p = pcp->vm_stat_diff + item;
 319	long x;
 320	long t;
 321
 322	x = delta + __this_cpu_read(*p);
 323
 324	t = __this_cpu_read(pcp->stat_threshold);
 325
 326	if (unlikely(abs(x) > t)) {
 327		zone_page_state_add(x, zone, item);
 328		x = 0;
 329	}
 330	__this_cpu_write(*p, x);
 331}
 332EXPORT_SYMBOL(__mod_zone_page_state);
 333
 334void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
 335				long delta)
 336{
 337	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
 338	s8 __percpu *p = pcp->vm_node_stat_diff + item;
 339	long x;
 340	long t;
 341
 342	if (vmstat_item_in_bytes(item)) {
 343		/*
 344		 * Only cgroups use subpage accounting right now; at
 345		 * the global level, these items still change in
 346		 * multiples of whole pages. Store them as pages
 347		 * internally to keep the per-cpu counters compact.
 348		 */
 349		VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
 350		delta >>= PAGE_SHIFT;
 351	}
 352
 353	x = delta + __this_cpu_read(*p);
 354
 355	t = __this_cpu_read(pcp->stat_threshold);
 356
 357	if (unlikely(abs(x) > t)) {
 358		node_page_state_add(x, pgdat, item);
 359		x = 0;
 360	}
 361	__this_cpu_write(*p, x);
 362}
 363EXPORT_SYMBOL(__mod_node_page_state);
 364
 365/*
 366 * Optimized increment and decrement functions.
 367 *
 368 * These are only for a single page and therefore can take a struct page *
 369 * argument instead of struct zone *. This allows the inclusion of the code
 370 * generated for page_zone(page) into the optimized functions.
 371 *
 372 * No overflow check is necessary and therefore the differential can be
 373 * incremented or decremented in place which may allow the compilers to
 374 * generate better code.
 375 * The increment or decrement is known and therefore one boundary check can
 376 * be omitted.
 377 *
 378 * NOTE: These functions are very performance sensitive. Change only
 379 * with care.
 380 *
 381 * Some processors have inc/dec instructions that are atomic vs an interrupt.
 382 * However, the code must first determine the differential location in a zone
 383 * based on the processor number and then inc/dec the counter. There is no
 384 * guarantee without disabling preemption that the processor will not change
 385 * in between and therefore the atomicity vs. interrupt cannot be exploited
 386 * in a useful way here.
 387 */
 388void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
 389{
 390	struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
 391	s8 __percpu *p = pcp->vm_stat_diff + item;
 392	s8 v, t;
 393
 394	v = __this_cpu_inc_return(*p);
 395	t = __this_cpu_read(pcp->stat_threshold);
 396	if (unlikely(v > t)) {
 397		s8 overstep = t >> 1;
 398
 399		zone_page_state_add(v + overstep, zone, item);
 400		__this_cpu_write(*p, -overstep);
 401	}
 402}
 403
 404void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 405{
 406	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
 407	s8 __percpu *p = pcp->vm_node_stat_diff + item;
 408	s8 v, t;
 409
 410	VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
 411
 412	v = __this_cpu_inc_return(*p);
 413	t = __this_cpu_read(pcp->stat_threshold);
 414	if (unlikely(v > t)) {
 415		s8 overstep = t >> 1;
 416
 417		node_page_state_add(v + overstep, pgdat, item);
 418		__this_cpu_write(*p, -overstep);
 419	}
 420}
 421
 422void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
 423{
 424	__inc_zone_state(page_zone(page), item);
 425}
 426EXPORT_SYMBOL(__inc_zone_page_state);
 427
 428void __inc_node_page_state(struct page *page, enum node_stat_item item)
 429{
 430	__inc_node_state(page_pgdat(page), item);
 431}
 432EXPORT_SYMBOL(__inc_node_page_state);
 433
 434void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
 435{
 436	struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
 437	s8 __percpu *p = pcp->vm_stat_diff + item;
 438	s8 v, t;
 439
 440	v = __this_cpu_dec_return(*p);
 441	t = __this_cpu_read(pcp->stat_threshold);
 442	if (unlikely(v < - t)) {
 443		s8 overstep = t >> 1;
 444
 445		zone_page_state_add(v - overstep, zone, item);
 446		__this_cpu_write(*p, overstep);
 447	}
 448}
 449
 450void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 451{
 452	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
 453	s8 __percpu *p = pcp->vm_node_stat_diff + item;
 454	s8 v, t;
 455
 456	VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
 457
 458	v = __this_cpu_dec_return(*p);
 459	t = __this_cpu_read(pcp->stat_threshold);
 460	if (unlikely(v < - t)) {
 461		s8 overstep = t >> 1;
 462
 463		node_page_state_add(v - overstep, pgdat, item);
 464		__this_cpu_write(*p, overstep);
 465	}
 466}
 467
 468void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
 469{
 470	__dec_zone_state(page_zone(page), item);
 471}
 472EXPORT_SYMBOL(__dec_zone_page_state);
 473
 474void __dec_node_page_state(struct page *page, enum node_stat_item item)
 475{
 476	__dec_node_state(page_pgdat(page), item);
 477}
 478EXPORT_SYMBOL(__dec_node_page_state);
 479
 480#ifdef CONFIG_HAVE_CMPXCHG_LOCAL
 481/*
 482 * If we have cmpxchg_local support then we do not need to incur the overhead
 483 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
 484 *
 485 * mod_state() modifies the zone counter state through atomic per cpu
 486 * operations.
 487 *
 488 * Overstep mode specifies how overstep should handled:
 489 *     0       No overstepping
 490 *     1       Overstepping half of threshold
 491 *     -1      Overstepping minus half of threshold
 492*/
 493static inline void mod_zone_state(struct zone *zone,
 494       enum zone_stat_item item, long delta, int overstep_mode)
 495{
 496	struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
 497	s8 __percpu *p = pcp->vm_stat_diff + item;
 498	long o, n, t, z;
 499
 500	do {
 501		z = 0;  /* overflow to zone counters */
 502
 503		/*
 504		 * The fetching of the stat_threshold is racy. We may apply
 505		 * a counter threshold to the wrong the cpu if we get
 506		 * rescheduled while executing here. However, the next
 507		 * counter update will apply the threshold again and
 508		 * therefore bring the counter under the threshold again.
 509		 *
 510		 * Most of the time the thresholds are the same anyways
 511		 * for all cpus in a zone.
 512		 */
 513		t = this_cpu_read(pcp->stat_threshold);
 514
 515		o = this_cpu_read(*p);
 516		n = delta + o;
 517
 518		if (abs(n) > t) {
 519			int os = overstep_mode * (t >> 1) ;
 520
 521			/* Overflow must be added to zone counters */
 522			z = n + os;
 523			n = -os;
 524		}
 525	} while (this_cpu_cmpxchg(*p, o, n) != o);
 526
 527	if (z)
 528		zone_page_state_add(z, zone, item);
 529}
 530
 531void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 532			 long delta)
 533{
 534	mod_zone_state(zone, item, delta, 0);
 535}
 536EXPORT_SYMBOL(mod_zone_page_state);
 537
 538void inc_zone_page_state(struct page *page, enum zone_stat_item item)
 539{
 540	mod_zone_state(page_zone(page), item, 1, 1);
 541}
 542EXPORT_SYMBOL(inc_zone_page_state);
 543
 544void dec_zone_page_state(struct page *page, enum zone_stat_item item)
 545{
 546	mod_zone_state(page_zone(page), item, -1, -1);
 547}
 548EXPORT_SYMBOL(dec_zone_page_state);
 549
 550static inline void mod_node_state(struct pglist_data *pgdat,
 551       enum node_stat_item item, int delta, int overstep_mode)
 552{
 553	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
 554	s8 __percpu *p = pcp->vm_node_stat_diff + item;
 555	long o, n, t, z;
 556
 557	if (vmstat_item_in_bytes(item)) {
 558		/*
 559		 * Only cgroups use subpage accounting right now; at
 560		 * the global level, these items still change in
 561		 * multiples of whole pages. Store them as pages
 562		 * internally to keep the per-cpu counters compact.
 563		 */
 564		VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
 565		delta >>= PAGE_SHIFT;
 566	}
 567
 568	do {
 569		z = 0;  /* overflow to node counters */
 570
 571		/*
 572		 * The fetching of the stat_threshold is racy. We may apply
 573		 * a counter threshold to the wrong the cpu if we get
 574		 * rescheduled while executing here. However, the next
 575		 * counter update will apply the threshold again and
 576		 * therefore bring the counter under the threshold again.
 577		 *
 578		 * Most of the time the thresholds are the same anyways
 579		 * for all cpus in a node.
 580		 */
 581		t = this_cpu_read(pcp->stat_threshold);
 582
 583		o = this_cpu_read(*p);
 584		n = delta + o;
 585
 586		if (abs(n) > t) {
 587			int os = overstep_mode * (t >> 1) ;
 588
 589			/* Overflow must be added to node counters */
 590			z = n + os;
 591			n = -os;
 592		}
 593	} while (this_cpu_cmpxchg(*p, o, n) != o);
 594
 595	if (z)
 596		node_page_state_add(z, pgdat, item);
 597}
 598
 599void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
 600					long delta)
 601{
 602	mod_node_state(pgdat, item, delta, 0);
 603}
 604EXPORT_SYMBOL(mod_node_page_state);
 605
 606void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 607{
 608	mod_node_state(pgdat, item, 1, 1);
 609}
 610
 611void inc_node_page_state(struct page *page, enum node_stat_item item)
 612{
 613	mod_node_state(page_pgdat(page), item, 1, 1);
 614}
 615EXPORT_SYMBOL(inc_node_page_state);
 616
 617void dec_node_page_state(struct page *page, enum node_stat_item item)
 618{
 619	mod_node_state(page_pgdat(page), item, -1, -1);
 620}
 621EXPORT_SYMBOL(dec_node_page_state);
 622#else
 623/*
 624 * Use interrupt disable to serialize counter updates
 625 */
 626void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 627			 long delta)
 628{
 629	unsigned long flags;
 630
 631	local_irq_save(flags);
 632	__mod_zone_page_state(zone, item, delta);
 633	local_irq_restore(flags);
 634}
 635EXPORT_SYMBOL(mod_zone_page_state);
 636
 637void inc_zone_page_state(struct page *page, enum zone_stat_item item)
 638{
 639	unsigned long flags;
 640	struct zone *zone;
 641
 642	zone = page_zone(page);
 643	local_irq_save(flags);
 644	__inc_zone_state(zone, item);
 645	local_irq_restore(flags);
 646}
 647EXPORT_SYMBOL(inc_zone_page_state);
 648
 649void dec_zone_page_state(struct page *page, enum zone_stat_item item)
 650{
 651	unsigned long flags;
 652
 653	local_irq_save(flags);
 654	__dec_zone_page_state(page, item);
 655	local_irq_restore(flags);
 656}
 657EXPORT_SYMBOL(dec_zone_page_state);
 658
 659void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 660{
 661	unsigned long flags;
 662
 663	local_irq_save(flags);
 664	__inc_node_state(pgdat, item);
 665	local_irq_restore(flags);
 666}
 667EXPORT_SYMBOL(inc_node_state);
 668
 669void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
 670					long delta)
 671{
 672	unsigned long flags;
 673
 674	local_irq_save(flags);
 675	__mod_node_page_state(pgdat, item, delta);
 676	local_irq_restore(flags);
 677}
 678EXPORT_SYMBOL(mod_node_page_state);
 679
 680void inc_node_page_state(struct page *page, enum node_stat_item item)
 681{
 682	unsigned long flags;
 683	struct pglist_data *pgdat;
 684
 685	pgdat = page_pgdat(page);
 686	local_irq_save(flags);
 687	__inc_node_state(pgdat, item);
 688	local_irq_restore(flags);
 689}
 690EXPORT_SYMBOL(inc_node_page_state);
 691
 692void dec_node_page_state(struct page *page, enum node_stat_item item)
 693{
 694	unsigned long flags;
 695
 696	local_irq_save(flags);
 697	__dec_node_page_state(page, item);
 698	local_irq_restore(flags);
 699}
 700EXPORT_SYMBOL(dec_node_page_state);
 701#endif
 702
 703/*
 704 * Fold a differential into the global counters.
 705 * Returns the number of counters updated.
 706 */
 707static int fold_diff(int *zone_diff, int *node_diff)
 708{
 709	int i;
 710	int changes = 0;
 711
 712	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 713		if (zone_diff[i]) {
 714			atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
 715			changes++;
 716	}
 717
 718	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
 719		if (node_diff[i]) {
 720			atomic_long_add(node_diff[i], &vm_node_stat[i]);
 721			changes++;
 722	}
 723	return changes;
 724}
 725
 726#ifdef CONFIG_NUMA
 727static void fold_vm_zone_numa_events(struct zone *zone)
 728{
 729	unsigned long zone_numa_events[NR_VM_NUMA_EVENT_ITEMS] = { 0, };
 730	int cpu;
 731	enum numa_stat_item item;
 732
 733	for_each_online_cpu(cpu) {
 734		struct per_cpu_zonestat *pzstats;
 735
 736		pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
 737		for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
 738			zone_numa_events[item] += xchg(&pzstats->vm_numa_event[item], 0);
 739	}
 740
 741	for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
 742		zone_numa_event_add(zone_numa_events[item], zone, item);
 743}
 744
 745void fold_vm_numa_events(void)
 746{
 747	struct zone *zone;
 748
 749	for_each_populated_zone(zone)
 750		fold_vm_zone_numa_events(zone);
 751}
 752#endif
 753
 754/*
 755 * Update the zone counters for the current cpu.
 756 *
 757 * Note that refresh_cpu_vm_stats strives to only access
 758 * node local memory. The per cpu pagesets on remote zones are placed
 759 * in the memory local to the processor using that pageset. So the
 760 * loop over all zones will access a series of cachelines local to
 761 * the processor.
 762 *
 763 * The call to zone_page_state_add updates the cachelines with the
 764 * statistics in the remote zone struct as well as the global cachelines
 765 * with the global counters. These could cause remote node cache line
 766 * bouncing and will have to be only done when necessary.
 767 *
 768 * The function returns the number of global counters updated.
 769 */
 770static int refresh_cpu_vm_stats(bool do_pagesets)
 771{
 772	struct pglist_data *pgdat;
 773	struct zone *zone;
 774	int i;
 775	int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
 776	int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
 777	int changes = 0;
 778
 779	for_each_populated_zone(zone) {
 780		struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
 781#ifdef CONFIG_NUMA
 782		struct per_cpu_pages __percpu *pcp = zone->per_cpu_pageset;
 783#endif
 784
 785		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
 786			int v;
 787
 788			v = this_cpu_xchg(pzstats->vm_stat_diff[i], 0);
 789			if (v) {
 790
 791				atomic_long_add(v, &zone->vm_stat[i]);
 792				global_zone_diff[i] += v;
 793#ifdef CONFIG_NUMA
 794				/* 3 seconds idle till flush */
 795				__this_cpu_write(pcp->expire, 3);
 796#endif
 797			}
 798		}
 799#ifdef CONFIG_NUMA
 800
 801		if (do_pagesets) {
 802			cond_resched();
 803			/*
 804			 * Deal with draining the remote pageset of this
 805			 * processor
 806			 *
 807			 * Check if there are pages remaining in this pageset
 808			 * if not then there is nothing to expire.
 809			 */
 810			if (!__this_cpu_read(pcp->expire) ||
 811			       !__this_cpu_read(pcp->count))
 812				continue;
 813
 814			/*
 815			 * We never drain zones local to this processor.
 816			 */
 817			if (zone_to_nid(zone) == numa_node_id()) {
 818				__this_cpu_write(pcp->expire, 0);
 819				continue;
 820			}
 821
 822			if (__this_cpu_dec_return(pcp->expire))
 823				continue;
 824
 825			if (__this_cpu_read(pcp->count)) {
 826				drain_zone_pages(zone, this_cpu_ptr(pcp));
 827				changes++;
 828			}
 829		}
 830#endif
 831	}
 832
 833	for_each_online_pgdat(pgdat) {
 834		struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;
 835
 836		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
 837			int v;
 838
 839			v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
 840			if (v) {
 841				atomic_long_add(v, &pgdat->vm_stat[i]);
 842				global_node_diff[i] += v;
 843			}
 844		}
 845	}
 846
 847	changes += fold_diff(global_zone_diff, global_node_diff);
 848	return changes;
 849}
 850
 851/*
 852 * Fold the data for an offline cpu into the global array.
 853 * There cannot be any access by the offline cpu and therefore
 854 * synchronization is simplified.
 855 */
 856void cpu_vm_stats_fold(int cpu)
 857{
 858	struct pglist_data *pgdat;
 859	struct zone *zone;
 860	int i;
 861	int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
 862	int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
 863
 864	for_each_populated_zone(zone) {
 865		struct per_cpu_zonestat *pzstats;
 866
 867		pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
 868
 869		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
 870			if (pzstats->vm_stat_diff[i]) {
 871				int v;
 872
 873				v = pzstats->vm_stat_diff[i];
 874				pzstats->vm_stat_diff[i] = 0;
 875				atomic_long_add(v, &zone->vm_stat[i]);
 876				global_zone_diff[i] += v;
 877			}
 878		}
 879#ifdef CONFIG_NUMA
 880		for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) {
 881			if (pzstats->vm_numa_event[i]) {
 882				unsigned long v;
 883
 884				v = pzstats->vm_numa_event[i];
 885				pzstats->vm_numa_event[i] = 0;
 886				zone_numa_event_add(v, zone, i);
 887			}
 888		}
 889#endif
 890	}
 891
 892	for_each_online_pgdat(pgdat) {
 893		struct per_cpu_nodestat *p;
 894
 895		p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
 896
 897		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
 898			if (p->vm_node_stat_diff[i]) {
 899				int v;
 900
 901				v = p->vm_node_stat_diff[i];
 902				p->vm_node_stat_diff[i] = 0;
 903				atomic_long_add(v, &pgdat->vm_stat[i]);
 904				global_node_diff[i] += v;
 905			}
 906	}
 907
 908	fold_diff(global_zone_diff, global_node_diff);
 909}
 910
 911/*
 912 * this is only called if !populated_zone(zone), which implies no other users of
 913 * pset->vm_stat_diff[] exist.
 914 */
 915void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *pzstats)
 916{
 917	unsigned long v;
 918	int i;
 919
 920	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
 921		if (pzstats->vm_stat_diff[i]) {
 922			v = pzstats->vm_stat_diff[i];
 923			pzstats->vm_stat_diff[i] = 0;
 924			zone_page_state_add(v, zone, i);
 925		}
 926	}
 927
 928#ifdef CONFIG_NUMA
 929	for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) {
 930		if (pzstats->vm_numa_event[i]) {
 931			v = pzstats->vm_numa_event[i];
 932			pzstats->vm_numa_event[i] = 0;
 933			zone_numa_event_add(v, zone, i);
 934		}
 935	}
 936#endif
 937}
 938#endif
 939
 940#ifdef CONFIG_NUMA
 941/*
 942 * Determine the per node value of a stat item. This function
 943 * is called frequently in a NUMA machine, so try to be as
 944 * frugal as possible.
 945 */
 946unsigned long sum_zone_node_page_state(int node,
 947				 enum zone_stat_item item)
 948{
 949	struct zone *zones = NODE_DATA(node)->node_zones;
 950	int i;
 951	unsigned long count = 0;
 952
 953	for (i = 0; i < MAX_NR_ZONES; i++)
 954		count += zone_page_state(zones + i, item);
 955
 956	return count;
 957}
 958
 959/* Determine the per node value of a numa stat item. */
 960unsigned long sum_zone_numa_event_state(int node,
 961				 enum numa_stat_item item)
 962{
 963	struct zone *zones = NODE_DATA(node)->node_zones;
 964	unsigned long count = 0;
 965	int i;
 966
 967	for (i = 0; i < MAX_NR_ZONES; i++)
 968		count += zone_numa_event_state(zones + i, item);
 969
 970	return count;
 971}
 972
 973/*
 974 * Determine the per node value of a stat item.
 975 */
 976unsigned long node_page_state_pages(struct pglist_data *pgdat,
 977				    enum node_stat_item item)
 978{
 979	long x = atomic_long_read(&pgdat->vm_stat[item]);
 980#ifdef CONFIG_SMP
 981	if (x < 0)
 982		x = 0;
 983#endif
 984	return x;
 985}
 986
 987unsigned long node_page_state(struct pglist_data *pgdat,
 988			      enum node_stat_item item)
 989{
 990	VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
 991
 992	return node_page_state_pages(pgdat, item);
 993}
 994#endif
 995
 996#ifdef CONFIG_COMPACTION
 997
 998struct contig_page_info {
 999	unsigned long free_pages;
1000	unsigned long free_blocks_total;
1001	unsigned long free_blocks_suitable;
1002};
1003
1004/*
1005 * Calculate the number of free pages in a zone, how many contiguous
1006 * pages are free and how many are large enough to satisfy an allocation of
1007 * the target size. Note that this function makes no attempt to estimate
1008 * how many suitable free blocks there *might* be if MOVABLE pages were
1009 * migrated. Calculating that is possible, but expensive and can be
1010 * figured out from userspace
1011 */
1012static void fill_contig_page_info(struct zone *zone,
1013				unsigned int suitable_order,
1014				struct contig_page_info *info)
1015{
1016	unsigned int order;
1017
1018	info->free_pages = 0;
1019	info->free_blocks_total = 0;
1020	info->free_blocks_suitable = 0;
1021
1022	for (order = 0; order < MAX_ORDER; order++) {
1023		unsigned long blocks;
1024
1025		/* Count number of free blocks */
1026		blocks = zone->free_area[order].nr_free;
1027		info->free_blocks_total += blocks;
1028
1029		/* Count free base pages */
1030		info->free_pages += blocks << order;
1031
1032		/* Count the suitable free blocks */
1033		if (order >= suitable_order)
1034			info->free_blocks_suitable += blocks <<
1035						(order - suitable_order);
1036	}
1037}
1038
1039/*
1040 * A fragmentation index only makes sense if an allocation of a requested
1041 * size would fail. If that is true, the fragmentation index indicates
1042 * whether external fragmentation or a lack of memory was the problem.
1043 * The value can be used to determine if page reclaim or compaction
1044 * should be used
1045 */
1046static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
1047{
1048	unsigned long requested = 1UL << order;
1049
1050	if (WARN_ON_ONCE(order >= MAX_ORDER))
1051		return 0;
1052
1053	if (!info->free_blocks_total)
1054		return 0;
1055
1056	/* Fragmentation index only makes sense when a request would fail */
1057	if (info->free_blocks_suitable)
1058		return -1000;
1059
1060	/*
1061	 * Index is between 0 and 1 so return within 3 decimal places
1062	 *
1063	 * 0 => allocation would fail due to lack of memory
1064	 * 1 => allocation would fail due to fragmentation
1065	 */
1066	return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
1067}
1068
1069/*
1070 * Calculates external fragmentation within a zone wrt the given order.
1071 * It is defined as the percentage of pages found in blocks of size
1072 * less than 1 << order. It returns values in range [0, 100].
1073 */
1074unsigned int extfrag_for_order(struct zone *zone, unsigned int order)
1075{
1076	struct contig_page_info info;
1077
1078	fill_contig_page_info(zone, order, &info);
1079	if (info.free_pages == 0)
1080		return 0;
1081
1082	return div_u64((info.free_pages -
1083			(info.free_blocks_suitable << order)) * 100,
1084			info.free_pages);
1085}
1086
1087/* Same as __fragmentation index but allocs contig_page_info on stack */
1088int fragmentation_index(struct zone *zone, unsigned int order)
1089{
1090	struct contig_page_info info;
1091
1092	fill_contig_page_info(zone, order, &info);
1093	return __fragmentation_index(order, &info);
1094}
1095#endif
1096
1097#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || \
1098    defined(CONFIG_NUMA) || defined(CONFIG_MEMCG)
1099#ifdef CONFIG_ZONE_DMA
1100#define TEXT_FOR_DMA(xx) xx "_dma",
1101#else
1102#define TEXT_FOR_DMA(xx)
1103#endif
1104
1105#ifdef CONFIG_ZONE_DMA32
1106#define TEXT_FOR_DMA32(xx) xx "_dma32",
1107#else
1108#define TEXT_FOR_DMA32(xx)
1109#endif
1110
1111#ifdef CONFIG_HIGHMEM
1112#define TEXT_FOR_HIGHMEM(xx) xx "_high",
1113#else
1114#define TEXT_FOR_HIGHMEM(xx)
1115#endif
1116
1117#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
1118					TEXT_FOR_HIGHMEM(xx) xx "_movable",
1119
1120const char * const vmstat_text[] = {
1121	/* enum zone_stat_item counters */
1122	"nr_free_pages",
1123	"nr_zone_inactive_anon",
1124	"nr_zone_active_anon",
1125	"nr_zone_inactive_file",
1126	"nr_zone_active_file",
1127	"nr_zone_unevictable",
1128	"nr_zone_write_pending",
1129	"nr_mlock",
 
 
 
 
1130	"nr_bounce",
1131#if IS_ENABLED(CONFIG_ZSMALLOC)
1132	"nr_zspages",
1133#endif
1134	"nr_free_cma",
1135
1136	/* enum numa_stat_item counters */
1137#ifdef CONFIG_NUMA
1138	"numa_hit",
1139	"numa_miss",
1140	"numa_foreign",
1141	"numa_interleave",
1142	"numa_local",
1143	"numa_other",
1144#endif
 
1145
1146	/* enum node_stat_item counters */
1147	"nr_inactive_anon",
1148	"nr_active_anon",
1149	"nr_inactive_file",
1150	"nr_active_file",
1151	"nr_unevictable",
1152	"nr_slab_reclaimable",
1153	"nr_slab_unreclaimable",
1154	"nr_isolated_anon",
1155	"nr_isolated_file",
1156	"workingset_nodes",
1157	"workingset_refault_anon",
1158	"workingset_refault_file",
1159	"workingset_activate_anon",
1160	"workingset_activate_file",
1161	"workingset_restore_anon",
1162	"workingset_restore_file",
1163	"workingset_nodereclaim",
1164	"nr_anon_pages",
1165	"nr_mapped",
1166	"nr_file_pages",
1167	"nr_dirty",
1168	"nr_writeback",
1169	"nr_writeback_temp",
1170	"nr_shmem",
1171	"nr_shmem_hugepages",
1172	"nr_shmem_pmdmapped",
1173	"nr_file_hugepages",
1174	"nr_file_pmdmapped",
1175	"nr_anon_transparent_hugepages",
 
1176	"nr_vmscan_write",
1177	"nr_vmscan_immediate_reclaim",
1178	"nr_dirtied",
1179	"nr_written",
1180	"nr_kernel_misc_reclaimable",
1181	"nr_foll_pin_acquired",
1182	"nr_foll_pin_released",
1183	"nr_kernel_stack",
1184#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
1185	"nr_shadow_call_stack",
1186#endif
1187	"nr_page_table_pages",
1188#ifdef CONFIG_SWAP
1189	"nr_swapcached",
1190#endif
1191
1192	/* enum writeback_stat_item counters */
1193	"nr_dirty_threshold",
1194	"nr_dirty_background_threshold",
1195
1196#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
1197	/* enum vm_event_item counters */
1198	"pgpgin",
1199	"pgpgout",
1200	"pswpin",
1201	"pswpout",
1202
1203	TEXTS_FOR_ZONES("pgalloc")
1204	TEXTS_FOR_ZONES("allocstall")
1205	TEXTS_FOR_ZONES("pgskip")
1206
1207	"pgfree",
1208	"pgactivate",
1209	"pgdeactivate",
1210	"pglazyfree",
1211
1212	"pgfault",
1213	"pgmajfault",
1214	"pglazyfreed",
1215
1216	"pgrefill",
1217	"pgreuse",
1218	"pgsteal_kswapd",
1219	"pgsteal_direct",
1220	"pgscan_kswapd",
1221	"pgscan_direct",
1222	"pgscan_direct_throttle",
1223	"pgscan_anon",
1224	"pgscan_file",
1225	"pgsteal_anon",
1226	"pgsteal_file",
1227
1228#ifdef CONFIG_NUMA
1229	"zone_reclaim_failed",
1230#endif
1231	"pginodesteal",
1232	"slabs_scanned",
1233	"kswapd_inodesteal",
1234	"kswapd_low_wmark_hit_quickly",
1235	"kswapd_high_wmark_hit_quickly",
1236	"pageoutrun",
1237
1238	"pgrotated",
1239
1240	"drop_pagecache",
1241	"drop_slab",
1242	"oom_kill",
1243
1244#ifdef CONFIG_NUMA_BALANCING
1245	"numa_pte_updates",
1246	"numa_huge_pte_updates",
1247	"numa_hint_faults",
1248	"numa_hint_faults_local",
1249	"numa_pages_migrated",
1250#endif
1251#ifdef CONFIG_MIGRATION
1252	"pgmigrate_success",
1253	"pgmigrate_fail",
1254	"thp_migration_success",
1255	"thp_migration_fail",
1256	"thp_migration_split",
1257#endif
1258#ifdef CONFIG_COMPACTION
1259	"compact_migrate_scanned",
1260	"compact_free_scanned",
1261	"compact_isolated",
1262	"compact_stall",
1263	"compact_fail",
1264	"compact_success",
1265	"compact_daemon_wake",
1266	"compact_daemon_migrate_scanned",
1267	"compact_daemon_free_scanned",
1268#endif
1269
1270#ifdef CONFIG_HUGETLB_PAGE
1271	"htlb_buddy_alloc_success",
1272	"htlb_buddy_alloc_fail",
1273#endif
1274#ifdef CONFIG_CMA
1275	"cma_alloc_success",
1276	"cma_alloc_fail",
1277#endif
1278	"unevictable_pgs_culled",
1279	"unevictable_pgs_scanned",
1280	"unevictable_pgs_rescued",
1281	"unevictable_pgs_mlocked",
1282	"unevictable_pgs_munlocked",
1283	"unevictable_pgs_cleared",
1284	"unevictable_pgs_stranded",
1285
1286#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1287	"thp_fault_alloc",
1288	"thp_fault_fallback",
1289	"thp_fault_fallback_charge",
1290	"thp_collapse_alloc",
1291	"thp_collapse_alloc_failed",
1292	"thp_file_alloc",
1293	"thp_file_fallback",
1294	"thp_file_fallback_charge",
1295	"thp_file_mapped",
1296	"thp_split_page",
1297	"thp_split_page_failed",
1298	"thp_deferred_split_page",
1299	"thp_split_pmd",
1300#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1301	"thp_split_pud",
1302#endif
1303	"thp_zero_page_alloc",
1304	"thp_zero_page_alloc_failed",
1305	"thp_swpout",
1306	"thp_swpout_fallback",
1307#endif
1308#ifdef CONFIG_MEMORY_BALLOON
1309	"balloon_inflate",
1310	"balloon_deflate",
1311#ifdef CONFIG_BALLOON_COMPACTION
1312	"balloon_migrate",
1313#endif
1314#endif /* CONFIG_MEMORY_BALLOON */
1315#ifdef CONFIG_DEBUG_TLBFLUSH
 
1316	"nr_tlb_remote_flush",
1317	"nr_tlb_remote_flush_received",
 
1318	"nr_tlb_local_flush_all",
1319	"nr_tlb_local_flush_one",
1320#endif /* CONFIG_DEBUG_TLBFLUSH */
1321
1322#ifdef CONFIG_DEBUG_VM_VMACACHE
1323	"vmacache_find_calls",
1324	"vmacache_find_hits",
 
1325#endif
1326#ifdef CONFIG_SWAP
1327	"swap_ra",
1328	"swap_ra_hit",
1329#endif
1330#ifdef CONFIG_X86
1331	"direct_map_level2_splits",
1332	"direct_map_level3_splits",
1333#endif
1334#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
1335};
1336#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */
 
1337
1338#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
1339     defined(CONFIG_PROC_FS)
1340static void *frag_start(struct seq_file *m, loff_t *pos)
1341{
1342	pg_data_t *pgdat;
1343	loff_t node = *pos;
1344
1345	for (pgdat = first_online_pgdat();
1346	     pgdat && node;
1347	     pgdat = next_online_pgdat(pgdat))
1348		--node;
1349
1350	return pgdat;
1351}
1352
1353static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
1354{
1355	pg_data_t *pgdat = (pg_data_t *)arg;
1356
1357	(*pos)++;
1358	return next_online_pgdat(pgdat);
1359}
1360
1361static void frag_stop(struct seq_file *m, void *arg)
1362{
1363}
1364
1365/*
1366 * Walk zones in a node and print using a callback.
1367 * If @assert_populated is true, only use callback for zones that are populated.
1368 */
1369static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
1370		bool assert_populated, bool nolock,
1371		void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
1372{
1373	struct zone *zone;
1374	struct zone *node_zones = pgdat->node_zones;
1375	unsigned long flags;
1376
1377	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
1378		if (assert_populated && !populated_zone(zone))
1379			continue;
1380
1381		if (!nolock)
1382			spin_lock_irqsave(&zone->lock, flags);
1383		print(m, pgdat, zone);
1384		if (!nolock)
1385			spin_unlock_irqrestore(&zone->lock, flags);
1386	}
1387}
1388#endif
1389
1390#ifdef CONFIG_PROC_FS
1391static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
1392						struct zone *zone)
1393{
1394	int order;
1395
1396	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1397	for (order = 0; order < MAX_ORDER; ++order)
1398		seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
1399	seq_putc(m, '\n');
1400}
1401
1402/*
1403 * This walks the free areas for each zone.
1404 */
1405static int frag_show(struct seq_file *m, void *arg)
1406{
1407	pg_data_t *pgdat = (pg_data_t *)arg;
1408	walk_zones_in_node(m, pgdat, true, false, frag_show_print);
1409	return 0;
1410}
1411
1412static void pagetypeinfo_showfree_print(struct seq_file *m,
1413					pg_data_t *pgdat, struct zone *zone)
1414{
1415	int order, mtype;
1416
1417	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
1418		seq_printf(m, "Node %4d, zone %8s, type %12s ",
1419					pgdat->node_id,
1420					zone->name,
1421					migratetype_names[mtype]);
1422		for (order = 0; order < MAX_ORDER; ++order) {
1423			unsigned long freecount = 0;
1424			struct free_area *area;
1425			struct list_head *curr;
1426			bool overflow = false;
1427
1428			area = &(zone->free_area[order]);
1429
1430			list_for_each(curr, &area->free_list[mtype]) {
1431				/*
1432				 * Cap the free_list iteration because it might
1433				 * be really large and we are under a spinlock
1434				 * so a long time spent here could trigger a
1435				 * hard lockup detector. Anyway this is a
1436				 * debugging tool so knowing there is a handful
1437				 * of pages of this order should be more than
1438				 * sufficient.
1439				 */
1440				if (++freecount >= 100000) {
1441					overflow = true;
1442					break;
1443				}
1444			}
1445			seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount);
1446			spin_unlock_irq(&zone->lock);
1447			cond_resched();
1448			spin_lock_irq(&zone->lock);
1449		}
1450		seq_putc(m, '\n');
1451	}
1452}
1453
1454/* Print out the free pages at each order for each migatetype */
1455static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
1456{
1457	int order;
1458	pg_data_t *pgdat = (pg_data_t *)arg;
1459
1460	/* Print header */
1461	seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
1462	for (order = 0; order < MAX_ORDER; ++order)
1463		seq_printf(m, "%6d ", order);
1464	seq_putc(m, '\n');
1465
1466	walk_zones_in_node(m, pgdat, true, false, pagetypeinfo_showfree_print);
1467
1468	return 0;
1469}
1470
1471static void pagetypeinfo_showblockcount_print(struct seq_file *m,
1472					pg_data_t *pgdat, struct zone *zone)
1473{
1474	int mtype;
1475	unsigned long pfn;
1476	unsigned long start_pfn = zone->zone_start_pfn;
1477	unsigned long end_pfn = zone_end_pfn(zone);
1478	unsigned long count[MIGRATE_TYPES] = { 0, };
1479
1480	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
1481		struct page *page;
1482
1483		page = pfn_to_online_page(pfn);
1484		if (!page)
 
 
 
 
 
1485			continue;
1486
1487		if (page_zone(page) != zone)
1488			continue;
1489
1490		mtype = get_pageblock_migratetype(page);
1491
1492		if (mtype < MIGRATE_TYPES)
1493			count[mtype]++;
1494	}
1495
1496	/* Print counts */
1497	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1498	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1499		seq_printf(m, "%12lu ", count[mtype]);
1500	seq_putc(m, '\n');
1501}
1502
1503/* Print out the number of pageblocks for each migratetype */
1504static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1505{
1506	int mtype;
1507	pg_data_t *pgdat = (pg_data_t *)arg;
1508
1509	seq_printf(m, "\n%-23s", "Number of blocks type ");
1510	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1511		seq_printf(m, "%12s ", migratetype_names[mtype]);
1512	seq_putc(m, '\n');
1513	walk_zones_in_node(m, pgdat, true, false,
1514		pagetypeinfo_showblockcount_print);
1515
1516	return 0;
1517}
1518
1519/*
1520 * Print out the number of pageblocks for each migratetype that contain pages
1521 * of other types. This gives an indication of how well fallbacks are being
1522 * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1523 * to determine what is going on
1524 */
1525static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1526{
1527#ifdef CONFIG_PAGE_OWNER
1528	int mtype;
1529
1530	if (!static_branch_unlikely(&page_owner_inited))
1531		return;
1532
1533	drain_all_pages(NULL);
1534
1535	seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1536	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1537		seq_printf(m, "%12s ", migratetype_names[mtype]);
1538	seq_putc(m, '\n');
1539
1540	walk_zones_in_node(m, pgdat, true, true,
1541		pagetypeinfo_showmixedcount_print);
1542#endif /* CONFIG_PAGE_OWNER */
1543}
1544
1545/*
1546 * This prints out statistics in relation to grouping pages by mobility.
1547 * It is expensive to collect so do not constantly read the file.
1548 */
1549static int pagetypeinfo_show(struct seq_file *m, void *arg)
1550{
1551	pg_data_t *pgdat = (pg_data_t *)arg;
1552
1553	/* check memoryless node */
1554	if (!node_state(pgdat->node_id, N_MEMORY))
1555		return 0;
1556
1557	seq_printf(m, "Page block order: %d\n", pageblock_order);
1558	seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
1559	seq_putc(m, '\n');
1560	pagetypeinfo_showfree(m, pgdat);
1561	pagetypeinfo_showblockcount(m, pgdat);
1562	pagetypeinfo_showmixedcount(m, pgdat);
1563
1564	return 0;
1565}
1566
1567static const struct seq_operations fragmentation_op = {
1568	.start	= frag_start,
1569	.next	= frag_next,
1570	.stop	= frag_stop,
1571	.show	= frag_show,
1572};
1573
 
 
 
 
 
 
 
 
 
 
 
 
1574static const struct seq_operations pagetypeinfo_op = {
1575	.start	= frag_start,
1576	.next	= frag_next,
1577	.stop	= frag_stop,
1578	.show	= pagetypeinfo_show,
1579};
1580
 
 
 
 
 
 
 
 
 
 
 
 
1581static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
1582{
1583	int zid;
1584
1585	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1586		struct zone *compare = &pgdat->node_zones[zid];
1587
1588		if (populated_zone(compare))
1589			return zone == compare;
1590	}
1591
 
 
1592	return false;
1593}
1594
1595static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1596							struct zone *zone)
1597{
1598	int i;
1599	seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1600	if (is_zone_first_populated(pgdat, zone)) {
1601		seq_printf(m, "\n  per-node stats");
1602		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1603			unsigned long pages = node_page_state_pages(pgdat, i);
1604
1605			if (vmstat_item_print_in_thp(i))
1606				pages /= HPAGE_PMD_NR;
1607			seq_printf(m, "\n      %-12s %lu", node_stat_name(i),
1608				   pages);
1609		}
1610	}
1611	seq_printf(m,
1612		   "\n  pages free     %lu"
1613		   "\n        min      %lu"
1614		   "\n        low      %lu"
1615		   "\n        high     %lu"
 
1616		   "\n        spanned  %lu"
1617		   "\n        present  %lu"
1618		   "\n        managed  %lu"
1619		   "\n        cma      %lu",
1620		   zone_page_state(zone, NR_FREE_PAGES),
1621		   min_wmark_pages(zone),
1622		   low_wmark_pages(zone),
1623		   high_wmark_pages(zone),
 
1624		   zone->spanned_pages,
1625		   zone->present_pages,
1626		   zone_managed_pages(zone),
1627		   zone_cma_pages(zone));
 
 
 
1628
1629	seq_printf(m,
1630		   "\n        protection: (%ld",
1631		   zone->lowmem_reserve[0]);
1632	for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1633		seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1634	seq_putc(m, ')');
1635
1636	/* If unpopulated, no other information is useful */
1637	if (!populated_zone(zone)) {
1638		seq_putc(m, '\n');
1639		return;
1640	}
1641
1642	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1643		seq_printf(m, "\n      %-12s %lu", zone_stat_name(i),
1644			   zone_page_state(zone, i));
1645
1646#ifdef CONFIG_NUMA
1647	for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
1648		seq_printf(m, "\n      %-12s %lu", numa_stat_name(i),
1649			   zone_numa_event_state(zone, i));
1650#endif
1651
1652	seq_printf(m, "\n  pagesets");
1653	for_each_online_cpu(i) {
1654		struct per_cpu_pages *pcp;
1655		struct per_cpu_zonestat __maybe_unused *pzstats;
1656
1657		pcp = per_cpu_ptr(zone->per_cpu_pageset, i);
1658		seq_printf(m,
1659			   "\n    cpu: %i"
1660			   "\n              count: %i"
1661			   "\n              high:  %i"
1662			   "\n              batch: %i",
1663			   i,
1664			   pcp->count,
1665			   pcp->high,
1666			   pcp->batch);
1667#ifdef CONFIG_SMP
1668		pzstats = per_cpu_ptr(zone->per_cpu_zonestats, i);
1669		seq_printf(m, "\n  vm stats threshold: %d",
1670				pzstats->stat_threshold);
1671#endif
1672	}
1673	seq_printf(m,
1674		   "\n  node_unreclaimable:  %u"
1675		   "\n  start_pfn:           %lu",
1676		   pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES,
1677		   zone->zone_start_pfn);
 
 
1678	seq_putc(m, '\n');
1679}
1680
1681/*
1682 * Output information about zones in @pgdat.  All zones are printed regardless
1683 * of whether they are populated or not: lowmem_reserve_ratio operates on the
1684 * set of all zones and userspace would not be aware of such zones if they are
1685 * suppressed here (zoneinfo displays the effect of lowmem_reserve_ratio).
1686 */
1687static int zoneinfo_show(struct seq_file *m, void *arg)
1688{
1689	pg_data_t *pgdat = (pg_data_t *)arg;
1690	walk_zones_in_node(m, pgdat, false, false, zoneinfo_show_print);
1691	return 0;
1692}
1693
1694static const struct seq_operations zoneinfo_op = {
1695	.start	= frag_start, /* iterate over all zones. The same as in
1696			       * fragmentation. */
1697	.next	= frag_next,
1698	.stop	= frag_stop,
1699	.show	= zoneinfo_show,
1700};
1701
1702#define NR_VMSTAT_ITEMS (NR_VM_ZONE_STAT_ITEMS + \
1703			 NR_VM_NUMA_EVENT_ITEMS + \
1704			 NR_VM_NODE_STAT_ITEMS + \
1705			 NR_VM_WRITEBACK_STAT_ITEMS + \
1706			 (IS_ENABLED(CONFIG_VM_EVENT_COUNTERS) ? \
1707			  NR_VM_EVENT_ITEMS : 0))
 
 
 
 
 
 
 
 
 
 
 
1708
1709static void *vmstat_start(struct seq_file *m, loff_t *pos)
1710{
1711	unsigned long *v;
1712	int i;
1713
1714	if (*pos >= NR_VMSTAT_ITEMS)
1715		return NULL;
 
 
 
1716
1717	BUILD_BUG_ON(ARRAY_SIZE(vmstat_text) < NR_VMSTAT_ITEMS);
1718	fold_vm_numa_events();
1719	v = kmalloc_array(NR_VMSTAT_ITEMS, sizeof(unsigned long), GFP_KERNEL);
 
 
1720	m->private = v;
1721	if (!v)
1722		return ERR_PTR(-ENOMEM);
1723	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1724		v[i] = global_zone_page_state(i);
1725	v += NR_VM_ZONE_STAT_ITEMS;
1726
1727#ifdef CONFIG_NUMA
1728	for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
1729		v[i] = global_numa_event_state(i);
1730	v += NR_VM_NUMA_EVENT_ITEMS;
1731#endif
1732
1733	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1734		v[i] = global_node_page_state_pages(i);
1735		if (vmstat_item_print_in_thp(i))
1736			v[i] /= HPAGE_PMD_NR;
1737	}
1738	v += NR_VM_NODE_STAT_ITEMS;
1739
1740	global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1741			    v + NR_DIRTY_THRESHOLD);
1742	v += NR_VM_WRITEBACK_STAT_ITEMS;
1743
1744#ifdef CONFIG_VM_EVENT_COUNTERS
1745	all_vm_events(v);
1746	v[PGPGIN] /= 2;		/* sectors -> kbytes */
1747	v[PGPGOUT] /= 2;
1748#endif
1749	return (unsigned long *)m->private + *pos;
1750}
1751
1752static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1753{
1754	(*pos)++;
1755	if (*pos >= NR_VMSTAT_ITEMS)
1756		return NULL;
1757	return (unsigned long *)m->private + *pos;
1758}
1759
1760static int vmstat_show(struct seq_file *m, void *arg)
1761{
1762	unsigned long *l = arg;
1763	unsigned long off = l - (unsigned long *)m->private;
1764
1765	seq_puts(m, vmstat_text[off]);
1766	seq_put_decimal_ull(m, " ", *l);
1767	seq_putc(m, '\n');
1768
1769	if (off == NR_VMSTAT_ITEMS - 1) {
1770		/*
1771		 * We've come to the end - add any deprecated counters to avoid
1772		 * breaking userspace which might depend on them being present.
1773		 */
1774		seq_puts(m, "nr_unstable 0\n");
1775	}
1776	return 0;
1777}
1778
1779static void vmstat_stop(struct seq_file *m, void *arg)
1780{
1781	kfree(m->private);
1782	m->private = NULL;
1783}
1784
1785static const struct seq_operations vmstat_op = {
1786	.start	= vmstat_start,
1787	.next	= vmstat_next,
1788	.stop	= vmstat_stop,
1789	.show	= vmstat_show,
1790};
 
 
 
 
 
 
 
 
 
 
 
 
1791#endif /* CONFIG_PROC_FS */
1792
1793#ifdef CONFIG_SMP
 
1794static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1795int sysctl_stat_interval __read_mostly = HZ;
1796
1797#ifdef CONFIG_PROC_FS
1798static void refresh_vm_stats(struct work_struct *work)
1799{
1800	refresh_cpu_vm_stats(true);
1801}
1802
1803int vmstat_refresh(struct ctl_table *table, int write,
1804		   void *buffer, size_t *lenp, loff_t *ppos)
1805{
1806	long val;
1807	int err;
1808	int i;
1809
1810	/*
1811	 * The regular update, every sysctl_stat_interval, may come later
1812	 * than expected: leaving a significant amount in per_cpu buckets.
1813	 * This is particularly misleading when checking a quantity of HUGE
1814	 * pages, immediately after running a test.  /proc/sys/vm/stat_refresh,
1815	 * which can equally be echo'ed to or cat'ted from (by root),
1816	 * can be used to update the stats just before reading them.
1817	 *
1818	 * Oh, and since global_zone_page_state() etc. are so careful to hide
1819	 * transiently negative values, report an error here if any of
1820	 * the stats is negative, so we know to go looking for imbalance.
1821	 */
1822	err = schedule_on_each_cpu(refresh_vm_stats);
1823	if (err)
1824		return err;
1825	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
1826		/*
1827		 * Skip checking stats known to go negative occasionally.
1828		 */
1829		switch (i) {
1830		case NR_ZONE_WRITE_PENDING:
1831		case NR_FREE_CMA_PAGES:
1832			continue;
1833		}
1834		val = atomic_long_read(&vm_zone_stat[i]);
1835		if (val < 0) {
1836			pr_warn("%s: %s %ld\n",
1837				__func__, zone_stat_name(i), val);
1838		}
1839	}
1840	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1841		/*
1842		 * Skip checking stats known to go negative occasionally.
1843		 */
1844		switch (i) {
1845		case NR_WRITEBACK:
1846			continue;
1847		}
1848		val = atomic_long_read(&vm_node_stat[i]);
1849		if (val < 0) {
1850			pr_warn("%s: %s %ld\n",
1851				__func__, node_stat_name(i), val);
1852		}
1853	}
 
 
1854	if (write)
1855		*ppos += *lenp;
1856	else
1857		*lenp = 0;
1858	return 0;
1859}
1860#endif /* CONFIG_PROC_FS */
1861
1862static void vmstat_update(struct work_struct *w)
1863{
1864	if (refresh_cpu_vm_stats(true)) {
1865		/*
1866		 * Counters were updated so we expect more updates
1867		 * to occur in the future. Keep on running the
1868		 * update worker thread.
1869		 */
1870		queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
1871				this_cpu_ptr(&vmstat_work),
1872				round_jiffies_relative(sysctl_stat_interval));
1873	}
1874}
1875
1876/*
1877 * Switch off vmstat processing and then fold all the remaining differentials
1878 * until the diffs stay at zero. The function is used by NOHZ and can only be
1879 * invoked when tick processing is not active.
1880 */
1881/*
1882 * Check if the diffs for a certain cpu indicate that
1883 * an update is needed.
1884 */
1885static bool need_update(int cpu)
1886{
1887	pg_data_t *last_pgdat = NULL;
1888	struct zone *zone;
1889
1890	for_each_populated_zone(zone) {
1891		struct per_cpu_zonestat *pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
1892		struct per_cpu_nodestat *n;
1893
 
1894		/*
1895		 * The fast way of checking if there are any vmstat diffs.
 
1896		 */
1897		if (memchr_inv(pzstats->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS *
1898			       sizeof(pzstats->vm_stat_diff[0])))
1899			return true;
1900
1901		if (last_pgdat == zone->zone_pgdat)
1902			continue;
1903		last_pgdat = zone->zone_pgdat;
1904		n = per_cpu_ptr(zone->zone_pgdat->per_cpu_nodestats, cpu);
1905		if (memchr_inv(n->vm_node_stat_diff, 0, NR_VM_NODE_STAT_ITEMS *
1906			       sizeof(n->vm_node_stat_diff[0])))
1907		    return true;
1908	}
1909	return false;
1910}
1911
1912/*
1913 * Switch off vmstat processing and then fold all the remaining differentials
1914 * until the diffs stay at zero. The function is used by NOHZ and can only be
1915 * invoked when tick processing is not active.
1916 */
1917void quiet_vmstat(void)
1918{
1919	if (system_state != SYSTEM_RUNNING)
1920		return;
1921
1922	if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
1923		return;
1924
1925	if (!need_update(smp_processor_id()))
1926		return;
1927
1928	/*
1929	 * Just refresh counters and do not care about the pending delayed
1930	 * vmstat_update. It doesn't fire that often to matter and canceling
1931	 * it would be too expensive from this path.
1932	 * vmstat_shepherd will take care about that for us.
1933	 */
1934	refresh_cpu_vm_stats(false);
1935}
1936
1937/*
1938 * Shepherd worker thread that checks the
1939 * differentials of processors that have their worker
1940 * threads for vm statistics updates disabled because of
1941 * inactivity.
1942 */
1943static void vmstat_shepherd(struct work_struct *w);
1944
1945static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
1946
1947static void vmstat_shepherd(struct work_struct *w)
1948{
1949	int cpu;
1950
1951	get_online_cpus();
1952	/* Check processors whose vmstat worker threads have been disabled */
1953	for_each_online_cpu(cpu) {
1954		struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
1955
1956		if (!delayed_work_pending(dw) && need_update(cpu))
1957			queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
1958
1959		cond_resched();
1960	}
1961	put_online_cpus();
1962
1963	schedule_delayed_work(&shepherd,
1964		round_jiffies_relative(sysctl_stat_interval));
1965}
1966
1967static void __init start_shepherd_timer(void)
1968{
1969	int cpu;
1970
1971	for_each_possible_cpu(cpu)
1972		INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
1973			vmstat_update);
1974
 
1975	schedule_delayed_work(&shepherd,
1976		round_jiffies_relative(sysctl_stat_interval));
1977}
1978
1979static void __init init_cpu_node_state(void)
1980{
1981	int node;
1982
1983	for_each_online_node(node) {
1984		if (cpumask_weight(cpumask_of_node(node)) > 0)
1985			node_set_state(node, N_CPU);
1986	}
1987}
1988
1989static int vmstat_cpu_online(unsigned int cpu)
1990{
1991	refresh_zone_stat_thresholds();
1992	node_set_state(cpu_to_node(cpu), N_CPU);
1993	return 0;
1994}
1995
1996static int vmstat_cpu_down_prep(unsigned int cpu)
1997{
1998	cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
1999	return 0;
2000}
2001
2002static int vmstat_cpu_dead(unsigned int cpu)
2003{
2004	const struct cpumask *node_cpus;
2005	int node;
2006
2007	node = cpu_to_node(cpu);
2008
2009	refresh_zone_stat_thresholds();
2010	node_cpus = cpumask_of_node(node);
2011	if (cpumask_weight(node_cpus) > 0)
2012		return 0;
2013
2014	node_clear_state(node, N_CPU);
2015	return 0;
2016}
2017
2018#endif
2019
2020struct workqueue_struct *mm_percpu_wq;
2021
2022void __init init_mm_internals(void)
2023{
2024	int ret __maybe_unused;
2025
2026	mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0);
2027
2028#ifdef CONFIG_SMP
 
 
2029	ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
2030					NULL, vmstat_cpu_dead);
2031	if (ret < 0)
2032		pr_err("vmstat: failed to register 'dead' hotplug state\n");
2033
2034	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online",
2035					vmstat_cpu_online,
2036					vmstat_cpu_down_prep);
2037	if (ret < 0)
2038		pr_err("vmstat: failed to register 'online' hotplug state\n");
2039
2040	get_online_cpus();
2041	init_cpu_node_state();
2042	put_online_cpus();
2043
2044	start_shepherd_timer();
2045#endif
2046#ifdef CONFIG_PROC_FS
2047	proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op);
2048	proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op);
2049	proc_create_seq("vmstat", 0444, NULL, &vmstat_op);
2050	proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op);
2051#endif
 
2052}
 
2053
2054#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
2055
2056/*
2057 * Return an index indicating how much of the available free memory is
2058 * unusable for an allocation of the requested size.
2059 */
2060static int unusable_free_index(unsigned int order,
2061				struct contig_page_info *info)
2062{
2063	/* No free memory is interpreted as all free memory is unusable */
2064	if (info->free_pages == 0)
2065		return 1000;
2066
2067	/*
2068	 * Index should be a value between 0 and 1. Return a value to 3
2069	 * decimal places.
2070	 *
2071	 * 0 => no fragmentation
2072	 * 1 => high fragmentation
2073	 */
2074	return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
2075
2076}
2077
2078static void unusable_show_print(struct seq_file *m,
2079					pg_data_t *pgdat, struct zone *zone)
2080{
2081	unsigned int order;
2082	int index;
2083	struct contig_page_info info;
2084
2085	seq_printf(m, "Node %d, zone %8s ",
2086				pgdat->node_id,
2087				zone->name);
2088	for (order = 0; order < MAX_ORDER; ++order) {
2089		fill_contig_page_info(zone, order, &info);
2090		index = unusable_free_index(order, &info);
2091		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2092	}
2093
2094	seq_putc(m, '\n');
2095}
2096
2097/*
2098 * Display unusable free space index
2099 *
2100 * The unusable free space index measures how much of the available free
2101 * memory cannot be used to satisfy an allocation of a given size and is a
2102 * value between 0 and 1. The higher the value, the more of free memory is
2103 * unusable and by implication, the worse the external fragmentation is. This
2104 * can be expressed as a percentage by multiplying by 100.
2105 */
2106static int unusable_show(struct seq_file *m, void *arg)
2107{
2108	pg_data_t *pgdat = (pg_data_t *)arg;
2109
2110	/* check memoryless node */
2111	if (!node_state(pgdat->node_id, N_MEMORY))
2112		return 0;
2113
2114	walk_zones_in_node(m, pgdat, true, false, unusable_show_print);
2115
2116	return 0;
2117}
2118
2119static const struct seq_operations unusable_sops = {
2120	.start	= frag_start,
2121	.next	= frag_next,
2122	.stop	= frag_stop,
2123	.show	= unusable_show,
2124};
2125
2126DEFINE_SEQ_ATTRIBUTE(unusable);
 
 
 
 
 
 
 
 
 
 
2127
2128static void extfrag_show_print(struct seq_file *m,
2129					pg_data_t *pgdat, struct zone *zone)
2130{
2131	unsigned int order;
2132	int index;
2133
2134	/* Alloc on stack as interrupts are disabled for zone walk */
2135	struct contig_page_info info;
2136
2137	seq_printf(m, "Node %d, zone %8s ",
2138				pgdat->node_id,
2139				zone->name);
2140	for (order = 0; order < MAX_ORDER; ++order) {
2141		fill_contig_page_info(zone, order, &info);
2142		index = __fragmentation_index(order, &info);
2143		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2144	}
2145
2146	seq_putc(m, '\n');
2147}
2148
2149/*
2150 * Display fragmentation index for orders that allocations would fail for
2151 */
2152static int extfrag_show(struct seq_file *m, void *arg)
2153{
2154	pg_data_t *pgdat = (pg_data_t *)arg;
2155
2156	walk_zones_in_node(m, pgdat, true, false, extfrag_show_print);
2157
2158	return 0;
2159}
2160
2161static const struct seq_operations extfrag_sops = {
2162	.start	= frag_start,
2163	.next	= frag_next,
2164	.stop	= frag_stop,
2165	.show	= extfrag_show,
2166};
2167
2168DEFINE_SEQ_ATTRIBUTE(extfrag);
 
 
 
 
 
 
 
 
 
 
2169
2170static int __init extfrag_debug_init(void)
2171{
2172	struct dentry *extfrag_debug_root;
2173
2174	extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
 
 
2175
2176	debugfs_create_file("unusable_index", 0444, extfrag_debug_root, NULL,
2177			    &unusable_fops);
2178
2179	debugfs_create_file("extfrag_index", 0444, extfrag_debug_root, NULL,
2180			    &extfrag_fops);
 
 
2181
2182	return 0;
 
 
 
2183}
2184
2185module_init(extfrag_debug_init);
2186#endif
v4.10.11
 
   1/*
   2 *  linux/mm/vmstat.c
   3 *
   4 *  Manages VM statistics
   5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   6 *
   7 *  zoned VM statistics
   8 *  Copyright (C) 2006 Silicon Graphics, Inc.,
   9 *		Christoph Lameter <christoph@lameter.com>
  10 *  Copyright (C) 2008-2014 Christoph Lameter
  11 */
  12#include <linux/fs.h>
  13#include <linux/mm.h>
  14#include <linux/err.h>
  15#include <linux/module.h>
  16#include <linux/slab.h>
  17#include <linux/cpu.h>
  18#include <linux/cpumask.h>
  19#include <linux/vmstat.h>
  20#include <linux/proc_fs.h>
  21#include <linux/seq_file.h>
  22#include <linux/debugfs.h>
  23#include <linux/sched.h>
  24#include <linux/math64.h>
  25#include <linux/writeback.h>
  26#include <linux/compaction.h>
  27#include <linux/mm_inline.h>
  28#include <linux/page_ext.h>
  29#include <linux/page_owner.h>
  30
  31#include "internal.h"
  32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  33#ifdef CONFIG_VM_EVENT_COUNTERS
  34DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
  35EXPORT_PER_CPU_SYMBOL(vm_event_states);
  36
  37static void sum_vm_events(unsigned long *ret)
  38{
  39	int cpu;
  40	int i;
  41
  42	memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
  43
  44	for_each_online_cpu(cpu) {
  45		struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
  46
  47		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
  48			ret[i] += this->event[i];
  49	}
  50}
  51
  52/*
  53 * Accumulate the vm event counters across all CPUs.
  54 * The result is unavoidably approximate - it can change
  55 * during and after execution of this function.
  56*/
  57void all_vm_events(unsigned long *ret)
  58{
  59	get_online_cpus();
  60	sum_vm_events(ret);
  61	put_online_cpus();
  62}
  63EXPORT_SYMBOL_GPL(all_vm_events);
  64
  65/*
  66 * Fold the foreign cpu events into our own.
  67 *
  68 * This is adding to the events on one processor
  69 * but keeps the global counts constant.
  70 */
  71void vm_events_fold_cpu(int cpu)
  72{
  73	struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
  74	int i;
  75
  76	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
  77		count_vm_events(i, fold_state->event[i]);
  78		fold_state->event[i] = 0;
  79	}
  80}
  81
  82#endif /* CONFIG_VM_EVENT_COUNTERS */
  83
  84/*
  85 * Manage combined zone based / global counters
  86 *
  87 * vm_stat contains the global counters
  88 */
  89atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
  90atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp;
 
  91EXPORT_SYMBOL(vm_zone_stat);
  92EXPORT_SYMBOL(vm_node_stat);
  93
  94#ifdef CONFIG_SMP
  95
  96int calculate_pressure_threshold(struct zone *zone)
  97{
  98	int threshold;
  99	int watermark_distance;
 100
 101	/*
 102	 * As vmstats are not up to date, there is drift between the estimated
 103	 * and real values. For high thresholds and a high number of CPUs, it
 104	 * is possible for the min watermark to be breached while the estimated
 105	 * value looks fine. The pressure threshold is a reduced value such
 106	 * that even the maximum amount of drift will not accidentally breach
 107	 * the min watermark
 108	 */
 109	watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
 110	threshold = max(1, (int)(watermark_distance / num_online_cpus()));
 111
 112	/*
 113	 * Maximum threshold is 125
 114	 */
 115	threshold = min(125, threshold);
 116
 117	return threshold;
 118}
 119
 120int calculate_normal_threshold(struct zone *zone)
 121{
 122	int threshold;
 123	int mem;	/* memory in 128 MB units */
 124
 125	/*
 126	 * The threshold scales with the number of processors and the amount
 127	 * of memory per zone. More memory means that we can defer updates for
 128	 * longer, more processors could lead to more contention.
 129 	 * fls() is used to have a cheap way of logarithmic scaling.
 130	 *
 131	 * Some sample thresholds:
 132	 *
 133	 * Threshold	Processors	(fls)	Zonesize	fls(mem+1)
 134	 * ------------------------------------------------------------------
 135	 * 8		1		1	0.9-1 GB	4
 136	 * 16		2		2	0.9-1 GB	4
 137	 * 20 		2		2	1-2 GB		5
 138	 * 24		2		2	2-4 GB		6
 139	 * 28		2		2	4-8 GB		7
 140	 * 32		2		2	8-16 GB		8
 141	 * 4		2		2	<128M		1
 142	 * 30		4		3	2-4 GB		5
 143	 * 48		4		3	8-16 GB		8
 144	 * 32		8		4	1-2 GB		4
 145	 * 32		8		4	0.9-1GB		4
 146	 * 10		16		5	<128M		1
 147	 * 40		16		5	900M		4
 148	 * 70		64		7	2-4 GB		5
 149	 * 84		64		7	4-8 GB		6
 150	 * 108		512		9	4-8 GB		6
 151	 * 125		1024		10	8-16 GB		8
 152	 * 125		1024		10	16-32 GB	9
 153	 */
 154
 155	mem = zone->managed_pages >> (27 - PAGE_SHIFT);
 156
 157	threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
 158
 159	/*
 160	 * Maximum threshold is 125
 161	 */
 162	threshold = min(125, threshold);
 163
 164	return threshold;
 165}
 166
 167/*
 168 * Refresh the thresholds for each zone.
 169 */
 170void refresh_zone_stat_thresholds(void)
 171{
 172	struct pglist_data *pgdat;
 173	struct zone *zone;
 174	int cpu;
 175	int threshold;
 176
 177	/* Zero current pgdat thresholds */
 178	for_each_online_pgdat(pgdat) {
 179		for_each_online_cpu(cpu) {
 180			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0;
 181		}
 182	}
 183
 184	for_each_populated_zone(zone) {
 185		struct pglist_data *pgdat = zone->zone_pgdat;
 186		unsigned long max_drift, tolerate_drift;
 187
 188		threshold = calculate_normal_threshold(zone);
 189
 190		for_each_online_cpu(cpu) {
 191			int pgdat_threshold;
 192
 193			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
 194							= threshold;
 195
 196			/* Base nodestat threshold on the largest populated zone. */
 197			pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold;
 198			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold
 199				= max(threshold, pgdat_threshold);
 200		}
 201
 202		/*
 203		 * Only set percpu_drift_mark if there is a danger that
 204		 * NR_FREE_PAGES reports the low watermark is ok when in fact
 205		 * the min watermark could be breached by an allocation
 206		 */
 207		tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
 208		max_drift = num_online_cpus() * threshold;
 209		if (max_drift > tolerate_drift)
 210			zone->percpu_drift_mark = high_wmark_pages(zone) +
 211					max_drift;
 212	}
 213}
 214
 215void set_pgdat_percpu_threshold(pg_data_t *pgdat,
 216				int (*calculate_pressure)(struct zone *))
 217{
 218	struct zone *zone;
 219	int cpu;
 220	int threshold;
 221	int i;
 222
 223	for (i = 0; i < pgdat->nr_zones; i++) {
 224		zone = &pgdat->node_zones[i];
 225		if (!zone->percpu_drift_mark)
 226			continue;
 227
 228		threshold = (*calculate_pressure)(zone);
 229		for_each_online_cpu(cpu)
 230			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
 231							= threshold;
 232	}
 233}
 234
 235/*
 236 * For use when we know that interrupts are disabled,
 237 * or when we know that preemption is disabled and that
 238 * particular counter cannot be updated from interrupt context.
 239 */
 240void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 241			   long delta)
 242{
 243	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 244	s8 __percpu *p = pcp->vm_stat_diff + item;
 245	long x;
 246	long t;
 247
 248	x = delta + __this_cpu_read(*p);
 249
 250	t = __this_cpu_read(pcp->stat_threshold);
 251
 252	if (unlikely(x > t || x < -t)) {
 253		zone_page_state_add(x, zone, item);
 254		x = 0;
 255	}
 256	__this_cpu_write(*p, x);
 257}
 258EXPORT_SYMBOL(__mod_zone_page_state);
 259
 260void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
 261				long delta)
 262{
 263	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
 264	s8 __percpu *p = pcp->vm_node_stat_diff + item;
 265	long x;
 266	long t;
 267
 
 
 
 
 
 
 
 
 
 
 
 268	x = delta + __this_cpu_read(*p);
 269
 270	t = __this_cpu_read(pcp->stat_threshold);
 271
 272	if (unlikely(x > t || x < -t)) {
 273		node_page_state_add(x, pgdat, item);
 274		x = 0;
 275	}
 276	__this_cpu_write(*p, x);
 277}
 278EXPORT_SYMBOL(__mod_node_page_state);
 279
 280/*
 281 * Optimized increment and decrement functions.
 282 *
 283 * These are only for a single page and therefore can take a struct page *
 284 * argument instead of struct zone *. This allows the inclusion of the code
 285 * generated for page_zone(page) into the optimized functions.
 286 *
 287 * No overflow check is necessary and therefore the differential can be
 288 * incremented or decremented in place which may allow the compilers to
 289 * generate better code.
 290 * The increment or decrement is known and therefore one boundary check can
 291 * be omitted.
 292 *
 293 * NOTE: These functions are very performance sensitive. Change only
 294 * with care.
 295 *
 296 * Some processors have inc/dec instructions that are atomic vs an interrupt.
 297 * However, the code must first determine the differential location in a zone
 298 * based on the processor number and then inc/dec the counter. There is no
 299 * guarantee without disabling preemption that the processor will not change
 300 * in between and therefore the atomicity vs. interrupt cannot be exploited
 301 * in a useful way here.
 302 */
 303void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
 304{
 305	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 306	s8 __percpu *p = pcp->vm_stat_diff + item;
 307	s8 v, t;
 308
 309	v = __this_cpu_inc_return(*p);
 310	t = __this_cpu_read(pcp->stat_threshold);
 311	if (unlikely(v > t)) {
 312		s8 overstep = t >> 1;
 313
 314		zone_page_state_add(v + overstep, zone, item);
 315		__this_cpu_write(*p, -overstep);
 316	}
 317}
 318
 319void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 320{
 321	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
 322	s8 __percpu *p = pcp->vm_node_stat_diff + item;
 323	s8 v, t;
 324
 
 
 325	v = __this_cpu_inc_return(*p);
 326	t = __this_cpu_read(pcp->stat_threshold);
 327	if (unlikely(v > t)) {
 328		s8 overstep = t >> 1;
 329
 330		node_page_state_add(v + overstep, pgdat, item);
 331		__this_cpu_write(*p, -overstep);
 332	}
 333}
 334
 335void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
 336{
 337	__inc_zone_state(page_zone(page), item);
 338}
 339EXPORT_SYMBOL(__inc_zone_page_state);
 340
 341void __inc_node_page_state(struct page *page, enum node_stat_item item)
 342{
 343	__inc_node_state(page_pgdat(page), item);
 344}
 345EXPORT_SYMBOL(__inc_node_page_state);
 346
 347void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
 348{
 349	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 350	s8 __percpu *p = pcp->vm_stat_diff + item;
 351	s8 v, t;
 352
 353	v = __this_cpu_dec_return(*p);
 354	t = __this_cpu_read(pcp->stat_threshold);
 355	if (unlikely(v < - t)) {
 356		s8 overstep = t >> 1;
 357
 358		zone_page_state_add(v - overstep, zone, item);
 359		__this_cpu_write(*p, overstep);
 360	}
 361}
 362
 363void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 364{
 365	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
 366	s8 __percpu *p = pcp->vm_node_stat_diff + item;
 367	s8 v, t;
 368
 
 
 369	v = __this_cpu_dec_return(*p);
 370	t = __this_cpu_read(pcp->stat_threshold);
 371	if (unlikely(v < - t)) {
 372		s8 overstep = t >> 1;
 373
 374		node_page_state_add(v - overstep, pgdat, item);
 375		__this_cpu_write(*p, overstep);
 376	}
 377}
 378
 379void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
 380{
 381	__dec_zone_state(page_zone(page), item);
 382}
 383EXPORT_SYMBOL(__dec_zone_page_state);
 384
 385void __dec_node_page_state(struct page *page, enum node_stat_item item)
 386{
 387	__dec_node_state(page_pgdat(page), item);
 388}
 389EXPORT_SYMBOL(__dec_node_page_state);
 390
 391#ifdef CONFIG_HAVE_CMPXCHG_LOCAL
 392/*
 393 * If we have cmpxchg_local support then we do not need to incur the overhead
 394 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
 395 *
 396 * mod_state() modifies the zone counter state through atomic per cpu
 397 * operations.
 398 *
 399 * Overstep mode specifies how overstep should handled:
 400 *     0       No overstepping
 401 *     1       Overstepping half of threshold
 402 *     -1      Overstepping minus half of threshold
 403*/
 404static inline void mod_zone_state(struct zone *zone,
 405       enum zone_stat_item item, long delta, int overstep_mode)
 406{
 407	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 408	s8 __percpu *p = pcp->vm_stat_diff + item;
 409	long o, n, t, z;
 410
 411	do {
 412		z = 0;  /* overflow to zone counters */
 413
 414		/*
 415		 * The fetching of the stat_threshold is racy. We may apply
 416		 * a counter threshold to the wrong the cpu if we get
 417		 * rescheduled while executing here. However, the next
 418		 * counter update will apply the threshold again and
 419		 * therefore bring the counter under the threshold again.
 420		 *
 421		 * Most of the time the thresholds are the same anyways
 422		 * for all cpus in a zone.
 423		 */
 424		t = this_cpu_read(pcp->stat_threshold);
 425
 426		o = this_cpu_read(*p);
 427		n = delta + o;
 428
 429		if (n > t || n < -t) {
 430			int os = overstep_mode * (t >> 1) ;
 431
 432			/* Overflow must be added to zone counters */
 433			z = n + os;
 434			n = -os;
 435		}
 436	} while (this_cpu_cmpxchg(*p, o, n) != o);
 437
 438	if (z)
 439		zone_page_state_add(z, zone, item);
 440}
 441
 442void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 443			 long delta)
 444{
 445	mod_zone_state(zone, item, delta, 0);
 446}
 447EXPORT_SYMBOL(mod_zone_page_state);
 448
 449void inc_zone_page_state(struct page *page, enum zone_stat_item item)
 450{
 451	mod_zone_state(page_zone(page), item, 1, 1);
 452}
 453EXPORT_SYMBOL(inc_zone_page_state);
 454
 455void dec_zone_page_state(struct page *page, enum zone_stat_item item)
 456{
 457	mod_zone_state(page_zone(page), item, -1, -1);
 458}
 459EXPORT_SYMBOL(dec_zone_page_state);
 460
 461static inline void mod_node_state(struct pglist_data *pgdat,
 462       enum node_stat_item item, int delta, int overstep_mode)
 463{
 464	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
 465	s8 __percpu *p = pcp->vm_node_stat_diff + item;
 466	long o, n, t, z;
 467
 
 
 
 
 
 
 
 
 
 
 
 468	do {
 469		z = 0;  /* overflow to node counters */
 470
 471		/*
 472		 * The fetching of the stat_threshold is racy. We may apply
 473		 * a counter threshold to the wrong the cpu if we get
 474		 * rescheduled while executing here. However, the next
 475		 * counter update will apply the threshold again and
 476		 * therefore bring the counter under the threshold again.
 477		 *
 478		 * Most of the time the thresholds are the same anyways
 479		 * for all cpus in a node.
 480		 */
 481		t = this_cpu_read(pcp->stat_threshold);
 482
 483		o = this_cpu_read(*p);
 484		n = delta + o;
 485
 486		if (n > t || n < -t) {
 487			int os = overstep_mode * (t >> 1) ;
 488
 489			/* Overflow must be added to node counters */
 490			z = n + os;
 491			n = -os;
 492		}
 493	} while (this_cpu_cmpxchg(*p, o, n) != o);
 494
 495	if (z)
 496		node_page_state_add(z, pgdat, item);
 497}
 498
 499void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
 500					long delta)
 501{
 502	mod_node_state(pgdat, item, delta, 0);
 503}
 504EXPORT_SYMBOL(mod_node_page_state);
 505
 506void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 507{
 508	mod_node_state(pgdat, item, 1, 1);
 509}
 510
 511void inc_node_page_state(struct page *page, enum node_stat_item item)
 512{
 513	mod_node_state(page_pgdat(page), item, 1, 1);
 514}
 515EXPORT_SYMBOL(inc_node_page_state);
 516
 517void dec_node_page_state(struct page *page, enum node_stat_item item)
 518{
 519	mod_node_state(page_pgdat(page), item, -1, -1);
 520}
 521EXPORT_SYMBOL(dec_node_page_state);
 522#else
 523/*
 524 * Use interrupt disable to serialize counter updates
 525 */
 526void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 527			 long delta)
 528{
 529	unsigned long flags;
 530
 531	local_irq_save(flags);
 532	__mod_zone_page_state(zone, item, delta);
 533	local_irq_restore(flags);
 534}
 535EXPORT_SYMBOL(mod_zone_page_state);
 536
 537void inc_zone_page_state(struct page *page, enum zone_stat_item item)
 538{
 539	unsigned long flags;
 540	struct zone *zone;
 541
 542	zone = page_zone(page);
 543	local_irq_save(flags);
 544	__inc_zone_state(zone, item);
 545	local_irq_restore(flags);
 546}
 547EXPORT_SYMBOL(inc_zone_page_state);
 548
 549void dec_zone_page_state(struct page *page, enum zone_stat_item item)
 550{
 551	unsigned long flags;
 552
 553	local_irq_save(flags);
 554	__dec_zone_page_state(page, item);
 555	local_irq_restore(flags);
 556}
 557EXPORT_SYMBOL(dec_zone_page_state);
 558
 559void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 560{
 561	unsigned long flags;
 562
 563	local_irq_save(flags);
 564	__inc_node_state(pgdat, item);
 565	local_irq_restore(flags);
 566}
 567EXPORT_SYMBOL(inc_node_state);
 568
 569void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
 570					long delta)
 571{
 572	unsigned long flags;
 573
 574	local_irq_save(flags);
 575	__mod_node_page_state(pgdat, item, delta);
 576	local_irq_restore(flags);
 577}
 578EXPORT_SYMBOL(mod_node_page_state);
 579
 580void inc_node_page_state(struct page *page, enum node_stat_item item)
 581{
 582	unsigned long flags;
 583	struct pglist_data *pgdat;
 584
 585	pgdat = page_pgdat(page);
 586	local_irq_save(flags);
 587	__inc_node_state(pgdat, item);
 588	local_irq_restore(flags);
 589}
 590EXPORT_SYMBOL(inc_node_page_state);
 591
 592void dec_node_page_state(struct page *page, enum node_stat_item item)
 593{
 594	unsigned long flags;
 595
 596	local_irq_save(flags);
 597	__dec_node_page_state(page, item);
 598	local_irq_restore(flags);
 599}
 600EXPORT_SYMBOL(dec_node_page_state);
 601#endif
 602
 603/*
 604 * Fold a differential into the global counters.
 605 * Returns the number of counters updated.
 606 */
 607static int fold_diff(int *zone_diff, int *node_diff)
 608{
 609	int i;
 610	int changes = 0;
 611
 612	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 613		if (zone_diff[i]) {
 614			atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
 615			changes++;
 616	}
 617
 618	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
 619		if (node_diff[i]) {
 620			atomic_long_add(node_diff[i], &vm_node_stat[i]);
 621			changes++;
 622	}
 623	return changes;
 624}
 625
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 626/*
 627 * Update the zone counters for the current cpu.
 628 *
 629 * Note that refresh_cpu_vm_stats strives to only access
 630 * node local memory. The per cpu pagesets on remote zones are placed
 631 * in the memory local to the processor using that pageset. So the
 632 * loop over all zones will access a series of cachelines local to
 633 * the processor.
 634 *
 635 * The call to zone_page_state_add updates the cachelines with the
 636 * statistics in the remote zone struct as well as the global cachelines
 637 * with the global counters. These could cause remote node cache line
 638 * bouncing and will have to be only done when necessary.
 639 *
 640 * The function returns the number of global counters updated.
 641 */
 642static int refresh_cpu_vm_stats(bool do_pagesets)
 643{
 644	struct pglist_data *pgdat;
 645	struct zone *zone;
 646	int i;
 647	int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
 648	int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
 649	int changes = 0;
 650
 651	for_each_populated_zone(zone) {
 652		struct per_cpu_pageset __percpu *p = zone->pageset;
 
 
 
 653
 654		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
 655			int v;
 656
 657			v = this_cpu_xchg(p->vm_stat_diff[i], 0);
 658			if (v) {
 659
 660				atomic_long_add(v, &zone->vm_stat[i]);
 661				global_zone_diff[i] += v;
 662#ifdef CONFIG_NUMA
 663				/* 3 seconds idle till flush */
 664				__this_cpu_write(p->expire, 3);
 665#endif
 666			}
 667		}
 668#ifdef CONFIG_NUMA
 
 669		if (do_pagesets) {
 670			cond_resched();
 671			/*
 672			 * Deal with draining the remote pageset of this
 673			 * processor
 674			 *
 675			 * Check if there are pages remaining in this pageset
 676			 * if not then there is nothing to expire.
 677			 */
 678			if (!__this_cpu_read(p->expire) ||
 679			       !__this_cpu_read(p->pcp.count))
 680				continue;
 681
 682			/*
 683			 * We never drain zones local to this processor.
 684			 */
 685			if (zone_to_nid(zone) == numa_node_id()) {
 686				__this_cpu_write(p->expire, 0);
 687				continue;
 688			}
 689
 690			if (__this_cpu_dec_return(p->expire))
 691				continue;
 692
 693			if (__this_cpu_read(p->pcp.count)) {
 694				drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
 695				changes++;
 696			}
 697		}
 698#endif
 699	}
 700
 701	for_each_online_pgdat(pgdat) {
 702		struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;
 703
 704		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
 705			int v;
 706
 707			v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
 708			if (v) {
 709				atomic_long_add(v, &pgdat->vm_stat[i]);
 710				global_node_diff[i] += v;
 711			}
 712		}
 713	}
 714
 715	changes += fold_diff(global_zone_diff, global_node_diff);
 716	return changes;
 717}
 718
 719/*
 720 * Fold the data for an offline cpu into the global array.
 721 * There cannot be any access by the offline cpu and therefore
 722 * synchronization is simplified.
 723 */
 724void cpu_vm_stats_fold(int cpu)
 725{
 726	struct pglist_data *pgdat;
 727	struct zone *zone;
 728	int i;
 729	int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
 730	int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
 731
 732	for_each_populated_zone(zone) {
 733		struct per_cpu_pageset *p;
 734
 735		p = per_cpu_ptr(zone->pageset, cpu);
 736
 737		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 738			if (p->vm_stat_diff[i]) {
 739				int v;
 740
 741				v = p->vm_stat_diff[i];
 742				p->vm_stat_diff[i] = 0;
 743				atomic_long_add(v, &zone->vm_stat[i]);
 744				global_zone_diff[i] += v;
 745			}
 
 
 
 
 
 
 
 
 
 
 
 
 746	}
 747
 748	for_each_online_pgdat(pgdat) {
 749		struct per_cpu_nodestat *p;
 750
 751		p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
 752
 753		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
 754			if (p->vm_node_stat_diff[i]) {
 755				int v;
 756
 757				v = p->vm_node_stat_diff[i];
 758				p->vm_node_stat_diff[i] = 0;
 759				atomic_long_add(v, &pgdat->vm_stat[i]);
 760				global_node_diff[i] += v;
 761			}
 762	}
 763
 764	fold_diff(global_zone_diff, global_node_diff);
 765}
 766
 767/*
 768 * this is only called if !populated_zone(zone), which implies no other users of
 769 * pset->vm_stat_diff[] exsist.
 770 */
 771void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
 772{
 
 773	int i;
 774
 775	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 776		if (pset->vm_stat_diff[i]) {
 777			int v = pset->vm_stat_diff[i];
 778			pset->vm_stat_diff[i] = 0;
 779			atomic_long_add(v, &zone->vm_stat[i]);
 780			atomic_long_add(v, &vm_zone_stat[i]);
 
 
 
 
 
 
 
 
 781		}
 
 
 782}
 783#endif
 784
 785#ifdef CONFIG_NUMA
 786/*
 787 * Determine the per node value of a stat item. This function
 788 * is called frequently in a NUMA machine, so try to be as
 789 * frugal as possible.
 790 */
 791unsigned long sum_zone_node_page_state(int node,
 792				 enum zone_stat_item item)
 793{
 794	struct zone *zones = NODE_DATA(node)->node_zones;
 795	int i;
 796	unsigned long count = 0;
 797
 798	for (i = 0; i < MAX_NR_ZONES; i++)
 799		count += zone_page_state(zones + i, item);
 800
 801	return count;
 802}
 803
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 804/*
 805 * Determine the per node value of a stat item.
 806 */
 807unsigned long node_page_state(struct pglist_data *pgdat,
 808				enum node_stat_item item)
 809{
 810	long x = atomic_long_read(&pgdat->vm_stat[item]);
 811#ifdef CONFIG_SMP
 812	if (x < 0)
 813		x = 0;
 814#endif
 815	return x;
 816}
 
 
 
 
 
 
 
 
 817#endif
 818
 819#ifdef CONFIG_COMPACTION
 820
 821struct contig_page_info {
 822	unsigned long free_pages;
 823	unsigned long free_blocks_total;
 824	unsigned long free_blocks_suitable;
 825};
 826
 827/*
 828 * Calculate the number of free pages in a zone, how many contiguous
 829 * pages are free and how many are large enough to satisfy an allocation of
 830 * the target size. Note that this function makes no attempt to estimate
 831 * how many suitable free blocks there *might* be if MOVABLE pages were
 832 * migrated. Calculating that is possible, but expensive and can be
 833 * figured out from userspace
 834 */
 835static void fill_contig_page_info(struct zone *zone,
 836				unsigned int suitable_order,
 837				struct contig_page_info *info)
 838{
 839	unsigned int order;
 840
 841	info->free_pages = 0;
 842	info->free_blocks_total = 0;
 843	info->free_blocks_suitable = 0;
 844
 845	for (order = 0; order < MAX_ORDER; order++) {
 846		unsigned long blocks;
 847
 848		/* Count number of free blocks */
 849		blocks = zone->free_area[order].nr_free;
 850		info->free_blocks_total += blocks;
 851
 852		/* Count free base pages */
 853		info->free_pages += blocks << order;
 854
 855		/* Count the suitable free blocks */
 856		if (order >= suitable_order)
 857			info->free_blocks_suitable += blocks <<
 858						(order - suitable_order);
 859	}
 860}
 861
 862/*
 863 * A fragmentation index only makes sense if an allocation of a requested
 864 * size would fail. If that is true, the fragmentation index indicates
 865 * whether external fragmentation or a lack of memory was the problem.
 866 * The value can be used to determine if page reclaim or compaction
 867 * should be used
 868 */
 869static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
 870{
 871	unsigned long requested = 1UL << order;
 872
 
 
 
 873	if (!info->free_blocks_total)
 874		return 0;
 875
 876	/* Fragmentation index only makes sense when a request would fail */
 877	if (info->free_blocks_suitable)
 878		return -1000;
 879
 880	/*
 881	 * Index is between 0 and 1 so return within 3 decimal places
 882	 *
 883	 * 0 => allocation would fail due to lack of memory
 884	 * 1 => allocation would fail due to fragmentation
 885	 */
 886	return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
 887}
 888
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 889/* Same as __fragmentation index but allocs contig_page_info on stack */
 890int fragmentation_index(struct zone *zone, unsigned int order)
 891{
 892	struct contig_page_info info;
 893
 894	fill_contig_page_info(zone, order, &info);
 895	return __fragmentation_index(order, &info);
 896}
 897#endif
 898
 899#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
 
 900#ifdef CONFIG_ZONE_DMA
 901#define TEXT_FOR_DMA(xx) xx "_dma",
 902#else
 903#define TEXT_FOR_DMA(xx)
 904#endif
 905
 906#ifdef CONFIG_ZONE_DMA32
 907#define TEXT_FOR_DMA32(xx) xx "_dma32",
 908#else
 909#define TEXT_FOR_DMA32(xx)
 910#endif
 911
 912#ifdef CONFIG_HIGHMEM
 913#define TEXT_FOR_HIGHMEM(xx) xx "_high",
 914#else
 915#define TEXT_FOR_HIGHMEM(xx)
 916#endif
 917
 918#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
 919					TEXT_FOR_HIGHMEM(xx) xx "_movable",
 920
 921const char * const vmstat_text[] = {
 922	/* enum zone_stat_item countes */
 923	"nr_free_pages",
 924	"nr_zone_inactive_anon",
 925	"nr_zone_active_anon",
 926	"nr_zone_inactive_file",
 927	"nr_zone_active_file",
 928	"nr_zone_unevictable",
 929	"nr_zone_write_pending",
 930	"nr_mlock",
 931	"nr_slab_reclaimable",
 932	"nr_slab_unreclaimable",
 933	"nr_page_table_pages",
 934	"nr_kernel_stack",
 935	"nr_bounce",
 936#if IS_ENABLED(CONFIG_ZSMALLOC)
 937	"nr_zspages",
 938#endif
 
 
 
 939#ifdef CONFIG_NUMA
 940	"numa_hit",
 941	"numa_miss",
 942	"numa_foreign",
 943	"numa_interleave",
 944	"numa_local",
 945	"numa_other",
 946#endif
 947	"nr_free_cma",
 948
 949	/* Node-based counters */
 950	"nr_inactive_anon",
 951	"nr_active_anon",
 952	"nr_inactive_file",
 953	"nr_active_file",
 954	"nr_unevictable",
 
 
 955	"nr_isolated_anon",
 956	"nr_isolated_file",
 957	"nr_pages_scanned",
 958	"workingset_refault",
 959	"workingset_activate",
 
 
 
 
 960	"workingset_nodereclaim",
 961	"nr_anon_pages",
 962	"nr_mapped",
 963	"nr_file_pages",
 964	"nr_dirty",
 965	"nr_writeback",
 966	"nr_writeback_temp",
 967	"nr_shmem",
 968	"nr_shmem_hugepages",
 969	"nr_shmem_pmdmapped",
 
 
 970	"nr_anon_transparent_hugepages",
 971	"nr_unstable",
 972	"nr_vmscan_write",
 973	"nr_vmscan_immediate_reclaim",
 974	"nr_dirtied",
 975	"nr_written",
 
 
 
 
 
 
 
 
 
 
 
 976
 977	/* enum writeback_stat_item counters */
 978	"nr_dirty_threshold",
 979	"nr_dirty_background_threshold",
 980
 981#ifdef CONFIG_VM_EVENT_COUNTERS
 982	/* enum vm_event_item counters */
 983	"pgpgin",
 984	"pgpgout",
 985	"pswpin",
 986	"pswpout",
 987
 988	TEXTS_FOR_ZONES("pgalloc")
 989	TEXTS_FOR_ZONES("allocstall")
 990	TEXTS_FOR_ZONES("pgskip")
 991
 992	"pgfree",
 993	"pgactivate",
 994	"pgdeactivate",
 
 995
 996	"pgfault",
 997	"pgmajfault",
 998	"pglazyfreed",
 999
1000	"pgrefill",
 
1001	"pgsteal_kswapd",
1002	"pgsteal_direct",
1003	"pgscan_kswapd",
1004	"pgscan_direct",
1005	"pgscan_direct_throttle",
 
 
 
 
1006
1007#ifdef CONFIG_NUMA
1008	"zone_reclaim_failed",
1009#endif
1010	"pginodesteal",
1011	"slabs_scanned",
1012	"kswapd_inodesteal",
1013	"kswapd_low_wmark_hit_quickly",
1014	"kswapd_high_wmark_hit_quickly",
1015	"pageoutrun",
1016
1017	"pgrotated",
1018
1019	"drop_pagecache",
1020	"drop_slab",
 
1021
1022#ifdef CONFIG_NUMA_BALANCING
1023	"numa_pte_updates",
1024	"numa_huge_pte_updates",
1025	"numa_hint_faults",
1026	"numa_hint_faults_local",
1027	"numa_pages_migrated",
1028#endif
1029#ifdef CONFIG_MIGRATION
1030	"pgmigrate_success",
1031	"pgmigrate_fail",
 
 
 
1032#endif
1033#ifdef CONFIG_COMPACTION
1034	"compact_migrate_scanned",
1035	"compact_free_scanned",
1036	"compact_isolated",
1037	"compact_stall",
1038	"compact_fail",
1039	"compact_success",
1040	"compact_daemon_wake",
 
 
1041#endif
1042
1043#ifdef CONFIG_HUGETLB_PAGE
1044	"htlb_buddy_alloc_success",
1045	"htlb_buddy_alloc_fail",
1046#endif
 
 
 
 
1047	"unevictable_pgs_culled",
1048	"unevictable_pgs_scanned",
1049	"unevictable_pgs_rescued",
1050	"unevictable_pgs_mlocked",
1051	"unevictable_pgs_munlocked",
1052	"unevictable_pgs_cleared",
1053	"unevictable_pgs_stranded",
1054
1055#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1056	"thp_fault_alloc",
1057	"thp_fault_fallback",
 
1058	"thp_collapse_alloc",
1059	"thp_collapse_alloc_failed",
1060	"thp_file_alloc",
 
 
1061	"thp_file_mapped",
1062	"thp_split_page",
1063	"thp_split_page_failed",
1064	"thp_deferred_split_page",
1065	"thp_split_pmd",
 
 
 
1066	"thp_zero_page_alloc",
1067	"thp_zero_page_alloc_failed",
 
 
1068#endif
1069#ifdef CONFIG_MEMORY_BALLOON
1070	"balloon_inflate",
1071	"balloon_deflate",
1072#ifdef CONFIG_BALLOON_COMPACTION
1073	"balloon_migrate",
1074#endif
1075#endif /* CONFIG_MEMORY_BALLOON */
1076#ifdef CONFIG_DEBUG_TLBFLUSH
1077#ifdef CONFIG_SMP
1078	"nr_tlb_remote_flush",
1079	"nr_tlb_remote_flush_received",
1080#endif /* CONFIG_SMP */
1081	"nr_tlb_local_flush_all",
1082	"nr_tlb_local_flush_one",
1083#endif /* CONFIG_DEBUG_TLBFLUSH */
1084
1085#ifdef CONFIG_DEBUG_VM_VMACACHE
1086	"vmacache_find_calls",
1087	"vmacache_find_hits",
1088	"vmacache_full_flushes",
1089#endif
1090#endif /* CONFIG_VM_EVENTS_COUNTERS */
 
 
 
 
 
 
 
 
1091};
1092#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
1093
1094
1095#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
1096     defined(CONFIG_PROC_FS)
1097static void *frag_start(struct seq_file *m, loff_t *pos)
1098{
1099	pg_data_t *pgdat;
1100	loff_t node = *pos;
1101
1102	for (pgdat = first_online_pgdat();
1103	     pgdat && node;
1104	     pgdat = next_online_pgdat(pgdat))
1105		--node;
1106
1107	return pgdat;
1108}
1109
1110static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
1111{
1112	pg_data_t *pgdat = (pg_data_t *)arg;
1113
1114	(*pos)++;
1115	return next_online_pgdat(pgdat);
1116}
1117
1118static void frag_stop(struct seq_file *m, void *arg)
1119{
1120}
1121
1122/* Walk all the zones in a node and print using a callback */
 
 
 
1123static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
 
1124		void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
1125{
1126	struct zone *zone;
1127	struct zone *node_zones = pgdat->node_zones;
1128	unsigned long flags;
1129
1130	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
1131		if (!populated_zone(zone))
1132			continue;
1133
1134		spin_lock_irqsave(&zone->lock, flags);
 
1135		print(m, pgdat, zone);
1136		spin_unlock_irqrestore(&zone->lock, flags);
 
1137	}
1138}
1139#endif
1140
1141#ifdef CONFIG_PROC_FS
1142static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
1143						struct zone *zone)
1144{
1145	int order;
1146
1147	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1148	for (order = 0; order < MAX_ORDER; ++order)
1149		seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
1150	seq_putc(m, '\n');
1151}
1152
1153/*
1154 * This walks the free areas for each zone.
1155 */
1156static int frag_show(struct seq_file *m, void *arg)
1157{
1158	pg_data_t *pgdat = (pg_data_t *)arg;
1159	walk_zones_in_node(m, pgdat, frag_show_print);
1160	return 0;
1161}
1162
1163static void pagetypeinfo_showfree_print(struct seq_file *m,
1164					pg_data_t *pgdat, struct zone *zone)
1165{
1166	int order, mtype;
1167
1168	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
1169		seq_printf(m, "Node %4d, zone %8s, type %12s ",
1170					pgdat->node_id,
1171					zone->name,
1172					migratetype_names[mtype]);
1173		for (order = 0; order < MAX_ORDER; ++order) {
1174			unsigned long freecount = 0;
1175			struct free_area *area;
1176			struct list_head *curr;
 
1177
1178			area = &(zone->free_area[order]);
1179
1180			list_for_each(curr, &area->free_list[mtype])
1181				freecount++;
1182			seq_printf(m, "%6lu ", freecount);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1183		}
1184		seq_putc(m, '\n');
1185	}
1186}
1187
1188/* Print out the free pages at each order for each migatetype */
1189static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
1190{
1191	int order;
1192	pg_data_t *pgdat = (pg_data_t *)arg;
1193
1194	/* Print header */
1195	seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
1196	for (order = 0; order < MAX_ORDER; ++order)
1197		seq_printf(m, "%6d ", order);
1198	seq_putc(m, '\n');
1199
1200	walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
1201
1202	return 0;
1203}
1204
1205static void pagetypeinfo_showblockcount_print(struct seq_file *m,
1206					pg_data_t *pgdat, struct zone *zone)
1207{
1208	int mtype;
1209	unsigned long pfn;
1210	unsigned long start_pfn = zone->zone_start_pfn;
1211	unsigned long end_pfn = zone_end_pfn(zone);
1212	unsigned long count[MIGRATE_TYPES] = { 0, };
1213
1214	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
1215		struct page *page;
1216
1217		if (!pfn_valid(pfn))
1218			continue;
1219
1220		page = pfn_to_page(pfn);
1221
1222		/* Watch for unexpected holes punched in the memmap */
1223		if (!memmap_valid_within(pfn, page, zone))
1224			continue;
1225
1226		if (page_zone(page) != zone)
1227			continue;
1228
1229		mtype = get_pageblock_migratetype(page);
1230
1231		if (mtype < MIGRATE_TYPES)
1232			count[mtype]++;
1233	}
1234
1235	/* Print counts */
1236	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1237	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1238		seq_printf(m, "%12lu ", count[mtype]);
1239	seq_putc(m, '\n');
1240}
1241
1242/* Print out the free pages at each order for each migratetype */
1243static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1244{
1245	int mtype;
1246	pg_data_t *pgdat = (pg_data_t *)arg;
1247
1248	seq_printf(m, "\n%-23s", "Number of blocks type ");
1249	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1250		seq_printf(m, "%12s ", migratetype_names[mtype]);
1251	seq_putc(m, '\n');
1252	walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
 
1253
1254	return 0;
1255}
1256
1257/*
1258 * Print out the number of pageblocks for each migratetype that contain pages
1259 * of other types. This gives an indication of how well fallbacks are being
1260 * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1261 * to determine what is going on
1262 */
1263static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1264{
1265#ifdef CONFIG_PAGE_OWNER
1266	int mtype;
1267
1268	if (!static_branch_unlikely(&page_owner_inited))
1269		return;
1270
1271	drain_all_pages(NULL);
1272
1273	seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1274	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1275		seq_printf(m, "%12s ", migratetype_names[mtype]);
1276	seq_putc(m, '\n');
1277
1278	walk_zones_in_node(m, pgdat, pagetypeinfo_showmixedcount_print);
 
1279#endif /* CONFIG_PAGE_OWNER */
1280}
1281
1282/*
1283 * This prints out statistics in relation to grouping pages by mobility.
1284 * It is expensive to collect so do not constantly read the file.
1285 */
1286static int pagetypeinfo_show(struct seq_file *m, void *arg)
1287{
1288	pg_data_t *pgdat = (pg_data_t *)arg;
1289
1290	/* check memoryless node */
1291	if (!node_state(pgdat->node_id, N_MEMORY))
1292		return 0;
1293
1294	seq_printf(m, "Page block order: %d\n", pageblock_order);
1295	seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
1296	seq_putc(m, '\n');
1297	pagetypeinfo_showfree(m, pgdat);
1298	pagetypeinfo_showblockcount(m, pgdat);
1299	pagetypeinfo_showmixedcount(m, pgdat);
1300
1301	return 0;
1302}
1303
1304static const struct seq_operations fragmentation_op = {
1305	.start	= frag_start,
1306	.next	= frag_next,
1307	.stop	= frag_stop,
1308	.show	= frag_show,
1309};
1310
1311static int fragmentation_open(struct inode *inode, struct file *file)
1312{
1313	return seq_open(file, &fragmentation_op);
1314}
1315
1316static const struct file_operations fragmentation_file_operations = {
1317	.open		= fragmentation_open,
1318	.read		= seq_read,
1319	.llseek		= seq_lseek,
1320	.release	= seq_release,
1321};
1322
1323static const struct seq_operations pagetypeinfo_op = {
1324	.start	= frag_start,
1325	.next	= frag_next,
1326	.stop	= frag_stop,
1327	.show	= pagetypeinfo_show,
1328};
1329
1330static int pagetypeinfo_open(struct inode *inode, struct file *file)
1331{
1332	return seq_open(file, &pagetypeinfo_op);
1333}
1334
1335static const struct file_operations pagetypeinfo_file_ops = {
1336	.open		= pagetypeinfo_open,
1337	.read		= seq_read,
1338	.llseek		= seq_lseek,
1339	.release	= seq_release,
1340};
1341
1342static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
1343{
1344	int zid;
1345
1346	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1347		struct zone *compare = &pgdat->node_zones[zid];
1348
1349		if (populated_zone(compare))
1350			return zone == compare;
1351	}
1352
1353	/* The zone must be somewhere! */
1354	WARN_ON_ONCE(1);
1355	return false;
1356}
1357
1358static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1359							struct zone *zone)
1360{
1361	int i;
1362	seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1363	if (is_zone_first_populated(pgdat, zone)) {
1364		seq_printf(m, "\n  per-node stats");
1365		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1366			seq_printf(m, "\n      %-12s %lu",
1367				vmstat_text[i + NR_VM_ZONE_STAT_ITEMS],
1368				node_page_state(pgdat, i));
 
 
 
1369		}
1370	}
1371	seq_printf(m,
1372		   "\n  pages free     %lu"
1373		   "\n        min      %lu"
1374		   "\n        low      %lu"
1375		   "\n        high     %lu"
1376		   "\n   node_scanned  %lu"
1377		   "\n        spanned  %lu"
1378		   "\n        present  %lu"
1379		   "\n        managed  %lu",
 
1380		   zone_page_state(zone, NR_FREE_PAGES),
1381		   min_wmark_pages(zone),
1382		   low_wmark_pages(zone),
1383		   high_wmark_pages(zone),
1384		   node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED),
1385		   zone->spanned_pages,
1386		   zone->present_pages,
1387		   zone->managed_pages);
1388
1389	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1390		seq_printf(m, "\n      %-12s %lu", vmstat_text[i],
1391				zone_page_state(zone, i));
1392
1393	seq_printf(m,
1394		   "\n        protection: (%ld",
1395		   zone->lowmem_reserve[0]);
1396	for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1397		seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1398	seq_printf(m,
1399		   ")"
1400		   "\n  pagesets");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1401	for_each_online_cpu(i) {
1402		struct per_cpu_pageset *pageset;
 
1403
1404		pageset = per_cpu_ptr(zone->pageset, i);
1405		seq_printf(m,
1406			   "\n    cpu: %i"
1407			   "\n              count: %i"
1408			   "\n              high:  %i"
1409			   "\n              batch: %i",
1410			   i,
1411			   pageset->pcp.count,
1412			   pageset->pcp.high,
1413			   pageset->pcp.batch);
1414#ifdef CONFIG_SMP
 
1415		seq_printf(m, "\n  vm stats threshold: %d",
1416				pageset->stat_threshold);
1417#endif
1418	}
1419	seq_printf(m,
1420		   "\n  node_unreclaimable:  %u"
1421		   "\n  start_pfn:           %lu"
1422		   "\n  node_inactive_ratio: %u",
1423		   !pgdat_reclaimable(zone->zone_pgdat),
1424		   zone->zone_start_pfn,
1425		   zone->zone_pgdat->inactive_ratio);
1426	seq_putc(m, '\n');
1427}
1428
1429/*
1430 * Output information about zones in @pgdat.
 
 
 
1431 */
1432static int zoneinfo_show(struct seq_file *m, void *arg)
1433{
1434	pg_data_t *pgdat = (pg_data_t *)arg;
1435	walk_zones_in_node(m, pgdat, zoneinfo_show_print);
1436	return 0;
1437}
1438
1439static const struct seq_operations zoneinfo_op = {
1440	.start	= frag_start, /* iterate over all zones. The same as in
1441			       * fragmentation. */
1442	.next	= frag_next,
1443	.stop	= frag_stop,
1444	.show	= zoneinfo_show,
1445};
1446
1447static int zoneinfo_open(struct inode *inode, struct file *file)
1448{
1449	return seq_open(file, &zoneinfo_op);
1450}
1451
1452static const struct file_operations proc_zoneinfo_file_operations = {
1453	.open		= zoneinfo_open,
1454	.read		= seq_read,
1455	.llseek		= seq_lseek,
1456	.release	= seq_release,
1457};
1458
1459enum writeback_stat_item {
1460	NR_DIRTY_THRESHOLD,
1461	NR_DIRTY_BG_THRESHOLD,
1462	NR_VM_WRITEBACK_STAT_ITEMS,
1463};
1464
1465static void *vmstat_start(struct seq_file *m, loff_t *pos)
1466{
1467	unsigned long *v;
1468	int i, stat_items_size;
1469
1470	if (*pos >= ARRAY_SIZE(vmstat_text))
1471		return NULL;
1472	stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
1473			  NR_VM_NODE_STAT_ITEMS * sizeof(unsigned long) +
1474			  NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
1475
1476#ifdef CONFIG_VM_EVENT_COUNTERS
1477	stat_items_size += sizeof(struct vm_event_state);
1478#endif
1479
1480	v = kmalloc(stat_items_size, GFP_KERNEL);
1481	m->private = v;
1482	if (!v)
1483		return ERR_PTR(-ENOMEM);
1484	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1485		v[i] = global_page_state(i);
1486	v += NR_VM_ZONE_STAT_ITEMS;
1487
1488	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
1489		v[i] = global_node_page_state(i);
 
 
 
 
 
 
 
 
 
1490	v += NR_VM_NODE_STAT_ITEMS;
1491
1492	global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1493			    v + NR_DIRTY_THRESHOLD);
1494	v += NR_VM_WRITEBACK_STAT_ITEMS;
1495
1496#ifdef CONFIG_VM_EVENT_COUNTERS
1497	all_vm_events(v);
1498	v[PGPGIN] /= 2;		/* sectors -> kbytes */
1499	v[PGPGOUT] /= 2;
1500#endif
1501	return (unsigned long *)m->private + *pos;
1502}
1503
1504static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1505{
1506	(*pos)++;
1507	if (*pos >= ARRAY_SIZE(vmstat_text))
1508		return NULL;
1509	return (unsigned long *)m->private + *pos;
1510}
1511
1512static int vmstat_show(struct seq_file *m, void *arg)
1513{
1514	unsigned long *l = arg;
1515	unsigned long off = l - (unsigned long *)m->private;
1516
1517	seq_puts(m, vmstat_text[off]);
1518	seq_put_decimal_ull(m, " ", *l);
1519	seq_putc(m, '\n');
 
 
 
 
 
 
 
 
1520	return 0;
1521}
1522
1523static void vmstat_stop(struct seq_file *m, void *arg)
1524{
1525	kfree(m->private);
1526	m->private = NULL;
1527}
1528
1529static const struct seq_operations vmstat_op = {
1530	.start	= vmstat_start,
1531	.next	= vmstat_next,
1532	.stop	= vmstat_stop,
1533	.show	= vmstat_show,
1534};
1535
1536static int vmstat_open(struct inode *inode, struct file *file)
1537{
1538	return seq_open(file, &vmstat_op);
1539}
1540
1541static const struct file_operations proc_vmstat_file_operations = {
1542	.open		= vmstat_open,
1543	.read		= seq_read,
1544	.llseek		= seq_lseek,
1545	.release	= seq_release,
1546};
1547#endif /* CONFIG_PROC_FS */
1548
1549#ifdef CONFIG_SMP
1550static struct workqueue_struct *vmstat_wq;
1551static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1552int sysctl_stat_interval __read_mostly = HZ;
1553
1554#ifdef CONFIG_PROC_FS
1555static void refresh_vm_stats(struct work_struct *work)
1556{
1557	refresh_cpu_vm_stats(true);
1558}
1559
1560int vmstat_refresh(struct ctl_table *table, int write,
1561		   void __user *buffer, size_t *lenp, loff_t *ppos)
1562{
1563	long val;
1564	int err;
1565	int i;
1566
1567	/*
1568	 * The regular update, every sysctl_stat_interval, may come later
1569	 * than expected: leaving a significant amount in per_cpu buckets.
1570	 * This is particularly misleading when checking a quantity of HUGE
1571	 * pages, immediately after running a test.  /proc/sys/vm/stat_refresh,
1572	 * which can equally be echo'ed to or cat'ted from (by root),
1573	 * can be used to update the stats just before reading them.
1574	 *
1575	 * Oh, and since global_page_state() etc. are so careful to hide
1576	 * transiently negative values, report an error here if any of
1577	 * the stats is negative, so we know to go looking for imbalance.
1578	 */
1579	err = schedule_on_each_cpu(refresh_vm_stats);
1580	if (err)
1581		return err;
1582	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
 
 
 
 
 
 
 
 
1583		val = atomic_long_read(&vm_zone_stat[i]);
1584		if (val < 0) {
1585			switch (i) {
1586			case NR_PAGES_SCANNED:
1587				/*
1588				 * This is often seen to go negative in
1589				 * recent kernels, but not to go permanently
1590				 * negative.  Whilst it would be nicer not to
1591				 * have exceptions, rooting them out would be
1592				 * another task, of rather low priority.
1593				 */
1594				break;
1595			default:
1596				pr_warn("%s: %s %ld\n",
1597					__func__, vmstat_text[i], val);
1598				err = -EINVAL;
1599				break;
1600			}
1601		}
1602	}
1603	if (err)
1604		return err;
1605	if (write)
1606		*ppos += *lenp;
1607	else
1608		*lenp = 0;
1609	return 0;
1610}
1611#endif /* CONFIG_PROC_FS */
1612
1613static void vmstat_update(struct work_struct *w)
1614{
1615	if (refresh_cpu_vm_stats(true)) {
1616		/*
1617		 * Counters were updated so we expect more updates
1618		 * to occur in the future. Keep on running the
1619		 * update worker thread.
1620		 */
1621		queue_delayed_work_on(smp_processor_id(), vmstat_wq,
1622				this_cpu_ptr(&vmstat_work),
1623				round_jiffies_relative(sysctl_stat_interval));
1624	}
1625}
1626
1627/*
1628 * Switch off vmstat processing and then fold all the remaining differentials
1629 * until the diffs stay at zero. The function is used by NOHZ and can only be
1630 * invoked when tick processing is not active.
1631 */
1632/*
1633 * Check if the diffs for a certain cpu indicate that
1634 * an update is needed.
1635 */
1636static bool need_update(int cpu)
1637{
 
1638	struct zone *zone;
1639
1640	for_each_populated_zone(zone) {
1641		struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
 
1642
1643		BUILD_BUG_ON(sizeof(p->vm_stat_diff[0]) != 1);
1644		/*
1645		 * The fast way of checking if there are any vmstat diffs.
1646		 * This works because the diffs are byte sized items.
1647		 */
1648		if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS))
 
1649			return true;
1650
 
 
 
 
 
 
 
1651	}
1652	return false;
1653}
1654
1655/*
1656 * Switch off vmstat processing and then fold all the remaining differentials
1657 * until the diffs stay at zero. The function is used by NOHZ and can only be
1658 * invoked when tick processing is not active.
1659 */
1660void quiet_vmstat(void)
1661{
1662	if (system_state != SYSTEM_RUNNING)
1663		return;
1664
1665	if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
1666		return;
1667
1668	if (!need_update(smp_processor_id()))
1669		return;
1670
1671	/*
1672	 * Just refresh counters and do not care about the pending delayed
1673	 * vmstat_update. It doesn't fire that often to matter and canceling
1674	 * it would be too expensive from this path.
1675	 * vmstat_shepherd will take care about that for us.
1676	 */
1677	refresh_cpu_vm_stats(false);
1678}
1679
1680/*
1681 * Shepherd worker thread that checks the
1682 * differentials of processors that have their worker
1683 * threads for vm statistics updates disabled because of
1684 * inactivity.
1685 */
1686static void vmstat_shepherd(struct work_struct *w);
1687
1688static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
1689
1690static void vmstat_shepherd(struct work_struct *w)
1691{
1692	int cpu;
1693
1694	get_online_cpus();
1695	/* Check processors whose vmstat worker threads have been disabled */
1696	for_each_online_cpu(cpu) {
1697		struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
1698
1699		if (!delayed_work_pending(dw) && need_update(cpu))
1700			queue_delayed_work_on(cpu, vmstat_wq, dw, 0);
 
 
1701	}
1702	put_online_cpus();
1703
1704	schedule_delayed_work(&shepherd,
1705		round_jiffies_relative(sysctl_stat_interval));
1706}
1707
1708static void __init start_shepherd_timer(void)
1709{
1710	int cpu;
1711
1712	for_each_possible_cpu(cpu)
1713		INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
1714			vmstat_update);
1715
1716	vmstat_wq = alloc_workqueue("vmstat", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1717	schedule_delayed_work(&shepherd,
1718		round_jiffies_relative(sysctl_stat_interval));
1719}
1720
1721static void __init init_cpu_node_state(void)
1722{
1723	int node;
1724
1725	for_each_online_node(node) {
1726		if (cpumask_weight(cpumask_of_node(node)) > 0)
1727			node_set_state(node, N_CPU);
1728	}
1729}
1730
1731static int vmstat_cpu_online(unsigned int cpu)
1732{
1733	refresh_zone_stat_thresholds();
1734	node_set_state(cpu_to_node(cpu), N_CPU);
1735	return 0;
1736}
1737
1738static int vmstat_cpu_down_prep(unsigned int cpu)
1739{
1740	cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
1741	return 0;
1742}
1743
1744static int vmstat_cpu_dead(unsigned int cpu)
1745{
1746	const struct cpumask *node_cpus;
1747	int node;
1748
1749	node = cpu_to_node(cpu);
1750
1751	refresh_zone_stat_thresholds();
1752	node_cpus = cpumask_of_node(node);
1753	if (cpumask_weight(node_cpus) > 0)
1754		return 0;
1755
1756	node_clear_state(node, N_CPU);
1757	return 0;
1758}
1759
1760#endif
1761
1762static int __init setup_vmstat(void)
 
 
1763{
 
 
 
 
1764#ifdef CONFIG_SMP
1765	int ret;
1766
1767	ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
1768					NULL, vmstat_cpu_dead);
1769	if (ret < 0)
1770		pr_err("vmstat: failed to register 'dead' hotplug state\n");
1771
1772	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online",
1773					vmstat_cpu_online,
1774					vmstat_cpu_down_prep);
1775	if (ret < 0)
1776		pr_err("vmstat: failed to register 'online' hotplug state\n");
1777
1778	get_online_cpus();
1779	init_cpu_node_state();
1780	put_online_cpus();
1781
1782	start_shepherd_timer();
1783#endif
1784#ifdef CONFIG_PROC_FS
1785	proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
1786	proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
1787	proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
1788	proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
1789#endif
1790	return 0;
1791}
1792module_init(setup_vmstat)
1793
1794#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1795
1796/*
1797 * Return an index indicating how much of the available free memory is
1798 * unusable for an allocation of the requested size.
1799 */
1800static int unusable_free_index(unsigned int order,
1801				struct contig_page_info *info)
1802{
1803	/* No free memory is interpreted as all free memory is unusable */
1804	if (info->free_pages == 0)
1805		return 1000;
1806
1807	/*
1808	 * Index should be a value between 0 and 1. Return a value to 3
1809	 * decimal places.
1810	 *
1811	 * 0 => no fragmentation
1812	 * 1 => high fragmentation
1813	 */
1814	return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
1815
1816}
1817
1818static void unusable_show_print(struct seq_file *m,
1819					pg_data_t *pgdat, struct zone *zone)
1820{
1821	unsigned int order;
1822	int index;
1823	struct contig_page_info info;
1824
1825	seq_printf(m, "Node %d, zone %8s ",
1826				pgdat->node_id,
1827				zone->name);
1828	for (order = 0; order < MAX_ORDER; ++order) {
1829		fill_contig_page_info(zone, order, &info);
1830		index = unusable_free_index(order, &info);
1831		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1832	}
1833
1834	seq_putc(m, '\n');
1835}
1836
1837/*
1838 * Display unusable free space index
1839 *
1840 * The unusable free space index measures how much of the available free
1841 * memory cannot be used to satisfy an allocation of a given size and is a
1842 * value between 0 and 1. The higher the value, the more of free memory is
1843 * unusable and by implication, the worse the external fragmentation is. This
1844 * can be expressed as a percentage by multiplying by 100.
1845 */
1846static int unusable_show(struct seq_file *m, void *arg)
1847{
1848	pg_data_t *pgdat = (pg_data_t *)arg;
1849
1850	/* check memoryless node */
1851	if (!node_state(pgdat->node_id, N_MEMORY))
1852		return 0;
1853
1854	walk_zones_in_node(m, pgdat, unusable_show_print);
1855
1856	return 0;
1857}
1858
1859static const struct seq_operations unusable_op = {
1860	.start	= frag_start,
1861	.next	= frag_next,
1862	.stop	= frag_stop,
1863	.show	= unusable_show,
1864};
1865
1866static int unusable_open(struct inode *inode, struct file *file)
1867{
1868	return seq_open(file, &unusable_op);
1869}
1870
1871static const struct file_operations unusable_file_ops = {
1872	.open		= unusable_open,
1873	.read		= seq_read,
1874	.llseek		= seq_lseek,
1875	.release	= seq_release,
1876};
1877
1878static void extfrag_show_print(struct seq_file *m,
1879					pg_data_t *pgdat, struct zone *zone)
1880{
1881	unsigned int order;
1882	int index;
1883
1884	/* Alloc on stack as interrupts are disabled for zone walk */
1885	struct contig_page_info info;
1886
1887	seq_printf(m, "Node %d, zone %8s ",
1888				pgdat->node_id,
1889				zone->name);
1890	for (order = 0; order < MAX_ORDER; ++order) {
1891		fill_contig_page_info(zone, order, &info);
1892		index = __fragmentation_index(order, &info);
1893		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1894	}
1895
1896	seq_putc(m, '\n');
1897}
1898
1899/*
1900 * Display fragmentation index for orders that allocations would fail for
1901 */
1902static int extfrag_show(struct seq_file *m, void *arg)
1903{
1904	pg_data_t *pgdat = (pg_data_t *)arg;
1905
1906	walk_zones_in_node(m, pgdat, extfrag_show_print);
1907
1908	return 0;
1909}
1910
1911static const struct seq_operations extfrag_op = {
1912	.start	= frag_start,
1913	.next	= frag_next,
1914	.stop	= frag_stop,
1915	.show	= extfrag_show,
1916};
1917
1918static int extfrag_open(struct inode *inode, struct file *file)
1919{
1920	return seq_open(file, &extfrag_op);
1921}
1922
1923static const struct file_operations extfrag_file_ops = {
1924	.open		= extfrag_open,
1925	.read		= seq_read,
1926	.llseek		= seq_lseek,
1927	.release	= seq_release,
1928};
1929
1930static int __init extfrag_debug_init(void)
1931{
1932	struct dentry *extfrag_debug_root;
1933
1934	extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
1935	if (!extfrag_debug_root)
1936		return -ENOMEM;
1937
1938	if (!debugfs_create_file("unusable_index", 0444,
1939			extfrag_debug_root, NULL, &unusable_file_ops))
1940		goto fail;
1941
1942	if (!debugfs_create_file("extfrag_index", 0444,
1943			extfrag_debug_root, NULL, &extfrag_file_ops))
1944		goto fail;
1945
1946	return 0;
1947fail:
1948	debugfs_remove_recursive(extfrag_debug_root);
1949	return -ENOMEM;
1950}
1951
1952module_init(extfrag_debug_init);
1953#endif