Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 *  linux/mm/vmstat.c
   3 *
   4 *  Manages VM statistics
   5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   6 *
   7 *  zoned VM statistics
   8 *  Copyright (C) 2006 Silicon Graphics, Inc.,
   9 *		Christoph Lameter <christoph@lameter.com>
  10 *  Copyright (C) 2008-2014 Christoph Lameter
  11 */
  12#include <linux/fs.h>
  13#include <linux/mm.h>
  14#include <linux/err.h>
  15#include <linux/module.h>
  16#include <linux/slab.h>
  17#include <linux/cpu.h>
  18#include <linux/cpumask.h>
  19#include <linux/vmstat.h>
  20#include <linux/proc_fs.h>
  21#include <linux/seq_file.h>
  22#include <linux/debugfs.h>
  23#include <linux/sched.h>
  24#include <linux/math64.h>
  25#include <linux/writeback.h>
  26#include <linux/compaction.h>
  27#include <linux/mm_inline.h>
  28#include <linux/page_ext.h>
  29#include <linux/page_owner.h>
  30
  31#include "internal.h"
  32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  33#ifdef CONFIG_VM_EVENT_COUNTERS
  34DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
  35EXPORT_PER_CPU_SYMBOL(vm_event_states);
  36
  37static void sum_vm_events(unsigned long *ret)
  38{
  39	int cpu;
  40	int i;
  41
  42	memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
  43
  44	for_each_online_cpu(cpu) {
  45		struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
  46
  47		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
  48			ret[i] += this->event[i];
  49	}
  50}
  51
  52/*
  53 * Accumulate the vm event counters across all CPUs.
  54 * The result is unavoidably approximate - it can change
  55 * during and after execution of this function.
  56*/
  57void all_vm_events(unsigned long *ret)
  58{
  59	get_online_cpus();
  60	sum_vm_events(ret);
  61	put_online_cpus();
  62}
  63EXPORT_SYMBOL_GPL(all_vm_events);
  64
  65/*
  66 * Fold the foreign cpu events into our own.
  67 *
  68 * This is adding to the events on one processor
  69 * but keeps the global counts constant.
  70 */
  71void vm_events_fold_cpu(int cpu)
  72{
  73	struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
  74	int i;
  75
  76	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
  77		count_vm_events(i, fold_state->event[i]);
  78		fold_state->event[i] = 0;
  79	}
  80}
  81
  82#endif /* CONFIG_VM_EVENT_COUNTERS */
  83
  84/*
  85 * Manage combined zone based / global counters
  86 *
  87 * vm_stat contains the global counters
  88 */
  89atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
  90EXPORT_SYMBOL(vm_stat);
 
 
 
 
  91
  92#ifdef CONFIG_SMP
  93
  94int calculate_pressure_threshold(struct zone *zone)
  95{
  96	int threshold;
  97	int watermark_distance;
  98
  99	/*
 100	 * As vmstats are not up to date, there is drift between the estimated
 101	 * and real values. For high thresholds and a high number of CPUs, it
 102	 * is possible for the min watermark to be breached while the estimated
 103	 * value looks fine. The pressure threshold is a reduced value such
 104	 * that even the maximum amount of drift will not accidentally breach
 105	 * the min watermark
 106	 */
 107	watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
 108	threshold = max(1, (int)(watermark_distance / num_online_cpus()));
 109
 110	/*
 111	 * Maximum threshold is 125
 112	 */
 113	threshold = min(125, threshold);
 114
 115	return threshold;
 116}
 117
 118int calculate_normal_threshold(struct zone *zone)
 119{
 120	int threshold;
 121	int mem;	/* memory in 128 MB units */
 122
 123	/*
 124	 * The threshold scales with the number of processors and the amount
 125	 * of memory per zone. More memory means that we can defer updates for
 126	 * longer, more processors could lead to more contention.
 127 	 * fls() is used to have a cheap way of logarithmic scaling.
 128	 *
 129	 * Some sample thresholds:
 130	 *
 131	 * Threshold	Processors	(fls)	Zonesize	fls(mem+1)
 132	 * ------------------------------------------------------------------
 133	 * 8		1		1	0.9-1 GB	4
 134	 * 16		2		2	0.9-1 GB	4
 135	 * 20 		2		2	1-2 GB		5
 136	 * 24		2		2	2-4 GB		6
 137	 * 28		2		2	4-8 GB		7
 138	 * 32		2		2	8-16 GB		8
 139	 * 4		2		2	<128M		1
 140	 * 30		4		3	2-4 GB		5
 141	 * 48		4		3	8-16 GB		8
 142	 * 32		8		4	1-2 GB		4
 143	 * 32		8		4	0.9-1GB		4
 144	 * 10		16		5	<128M		1
 145	 * 40		16		5	900M		4
 146	 * 70		64		7	2-4 GB		5
 147	 * 84		64		7	4-8 GB		6
 148	 * 108		512		9	4-8 GB		6
 149	 * 125		1024		10	8-16 GB		8
 150	 * 125		1024		10	16-32 GB	9
 151	 */
 152
 153	mem = zone->managed_pages >> (27 - PAGE_SHIFT);
 154
 155	threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
 156
 157	/*
 158	 * Maximum threshold is 125
 159	 */
 160	threshold = min(125, threshold);
 161
 162	return threshold;
 163}
 164
 165/*
 166 * Refresh the thresholds for each zone.
 167 */
 168void refresh_zone_stat_thresholds(void)
 169{
 
 170	struct zone *zone;
 171	int cpu;
 172	int threshold;
 173
 
 
 
 
 
 
 
 174	for_each_populated_zone(zone) {
 
 175		unsigned long max_drift, tolerate_drift;
 176
 177		threshold = calculate_normal_threshold(zone);
 178
 179		for_each_online_cpu(cpu)
 
 
 180			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
 181							= threshold;
 182
 
 
 
 
 
 
 183		/*
 184		 * Only set percpu_drift_mark if there is a danger that
 185		 * NR_FREE_PAGES reports the low watermark is ok when in fact
 186		 * the min watermark could be breached by an allocation
 187		 */
 188		tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
 189		max_drift = num_online_cpus() * threshold;
 190		if (max_drift > tolerate_drift)
 191			zone->percpu_drift_mark = high_wmark_pages(zone) +
 192					max_drift;
 193	}
 194}
 195
 196void set_pgdat_percpu_threshold(pg_data_t *pgdat,
 197				int (*calculate_pressure)(struct zone *))
 198{
 199	struct zone *zone;
 200	int cpu;
 201	int threshold;
 202	int i;
 203
 204	for (i = 0; i < pgdat->nr_zones; i++) {
 205		zone = &pgdat->node_zones[i];
 206		if (!zone->percpu_drift_mark)
 207			continue;
 208
 209		threshold = (*calculate_pressure)(zone);
 210		for_each_online_cpu(cpu)
 211			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
 212							= threshold;
 213	}
 214}
 215
 216/*
 217 * For use when we know that interrupts are disabled,
 218 * or when we know that preemption is disabled and that
 219 * particular counter cannot be updated from interrupt context.
 220 */
 221void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 222			   long delta)
 223{
 224	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 225	s8 __percpu *p = pcp->vm_stat_diff + item;
 226	long x;
 227	long t;
 228
 229	x = delta + __this_cpu_read(*p);
 230
 231	t = __this_cpu_read(pcp->stat_threshold);
 232
 233	if (unlikely(x > t || x < -t)) {
 234		zone_page_state_add(x, zone, item);
 235		x = 0;
 236	}
 237	__this_cpu_write(*p, x);
 238}
 239EXPORT_SYMBOL(__mod_zone_page_state);
 240
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 241/*
 242 * Optimized increment and decrement functions.
 243 *
 244 * These are only for a single page and therefore can take a struct page *
 245 * argument instead of struct zone *. This allows the inclusion of the code
 246 * generated for page_zone(page) into the optimized functions.
 247 *
 248 * No overflow check is necessary and therefore the differential can be
 249 * incremented or decremented in place which may allow the compilers to
 250 * generate better code.
 251 * The increment or decrement is known and therefore one boundary check can
 252 * be omitted.
 253 *
 254 * NOTE: These functions are very performance sensitive. Change only
 255 * with care.
 256 *
 257 * Some processors have inc/dec instructions that are atomic vs an interrupt.
 258 * However, the code must first determine the differential location in a zone
 259 * based on the processor number and then inc/dec the counter. There is no
 260 * guarantee without disabling preemption that the processor will not change
 261 * in between and therefore the atomicity vs. interrupt cannot be exploited
 262 * in a useful way here.
 263 */
 264void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
 265{
 266	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 267	s8 __percpu *p = pcp->vm_stat_diff + item;
 268	s8 v, t;
 269
 270	v = __this_cpu_inc_return(*p);
 271	t = __this_cpu_read(pcp->stat_threshold);
 272	if (unlikely(v > t)) {
 273		s8 overstep = t >> 1;
 274
 275		zone_page_state_add(v + overstep, zone, item);
 276		__this_cpu_write(*p, -overstep);
 277	}
 278}
 279
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 280void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
 281{
 282	__inc_zone_state(page_zone(page), item);
 283}
 284EXPORT_SYMBOL(__inc_zone_page_state);
 285
 
 
 
 
 
 
 286void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
 287{
 288	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 289	s8 __percpu *p = pcp->vm_stat_diff + item;
 290	s8 v, t;
 291
 292	v = __this_cpu_dec_return(*p);
 293	t = __this_cpu_read(pcp->stat_threshold);
 294	if (unlikely(v < - t)) {
 295		s8 overstep = t >> 1;
 296
 297		zone_page_state_add(v - overstep, zone, item);
 298		__this_cpu_write(*p, overstep);
 299	}
 300}
 301
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 302void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
 303{
 304	__dec_zone_state(page_zone(page), item);
 305}
 306EXPORT_SYMBOL(__dec_zone_page_state);
 307
 
 
 
 
 
 
 308#ifdef CONFIG_HAVE_CMPXCHG_LOCAL
 309/*
 310 * If we have cmpxchg_local support then we do not need to incur the overhead
 311 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
 312 *
 313 * mod_state() modifies the zone counter state through atomic per cpu
 314 * operations.
 315 *
 316 * Overstep mode specifies how overstep should handled:
 317 *     0       No overstepping
 318 *     1       Overstepping half of threshold
 319 *     -1      Overstepping minus half of threshold
 320*/
 321static inline void mod_state(struct zone *zone, enum zone_stat_item item,
 322			     long delta, int overstep_mode)
 323{
 324	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 325	s8 __percpu *p = pcp->vm_stat_diff + item;
 326	long o, n, t, z;
 327
 328	do {
 329		z = 0;  /* overflow to zone counters */
 330
 331		/*
 332		 * The fetching of the stat_threshold is racy. We may apply
 333		 * a counter threshold to the wrong the cpu if we get
 334		 * rescheduled while executing here. However, the next
 335		 * counter update will apply the threshold again and
 336		 * therefore bring the counter under the threshold again.
 337		 *
 338		 * Most of the time the thresholds are the same anyways
 339		 * for all cpus in a zone.
 340		 */
 341		t = this_cpu_read(pcp->stat_threshold);
 342
 343		o = this_cpu_read(*p);
 344		n = delta + o;
 345
 346		if (n > t || n < -t) {
 347			int os = overstep_mode * (t >> 1) ;
 348
 349			/* Overflow must be added to zone counters */
 350			z = n + os;
 351			n = -os;
 352		}
 353	} while (this_cpu_cmpxchg(*p, o, n) != o);
 354
 355	if (z)
 356		zone_page_state_add(z, zone, item);
 357}
 358
 359void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 360			 long delta)
 361{
 362	mod_state(zone, item, delta, 0);
 363}
 364EXPORT_SYMBOL(mod_zone_page_state);
 365
 366void inc_zone_state(struct zone *zone, enum zone_stat_item item)
 367{
 368	mod_state(zone, item, 1, 1);
 369}
 370
 371void inc_zone_page_state(struct page *page, enum zone_stat_item item)
 372{
 373	mod_state(page_zone(page), item, 1, 1);
 374}
 375EXPORT_SYMBOL(inc_zone_page_state);
 376
 377void dec_zone_page_state(struct page *page, enum zone_stat_item item)
 378{
 379	mod_state(page_zone(page), item, -1, -1);
 380}
 381EXPORT_SYMBOL(dec_zone_page_state);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 382#else
 383/*
 384 * Use interrupt disable to serialize counter updates
 385 */
 386void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 387			 long delta)
 388{
 389	unsigned long flags;
 390
 391	local_irq_save(flags);
 392	__mod_zone_page_state(zone, item, delta);
 393	local_irq_restore(flags);
 394}
 395EXPORT_SYMBOL(mod_zone_page_state);
 396
 397void inc_zone_state(struct zone *zone, enum zone_stat_item item)
 398{
 399	unsigned long flags;
 400
 401	local_irq_save(flags);
 402	__inc_zone_state(zone, item);
 403	local_irq_restore(flags);
 404}
 405
 406void inc_zone_page_state(struct page *page, enum zone_stat_item item)
 407{
 408	unsigned long flags;
 409	struct zone *zone;
 410
 411	zone = page_zone(page);
 412	local_irq_save(flags);
 413	__inc_zone_state(zone, item);
 414	local_irq_restore(flags);
 415}
 416EXPORT_SYMBOL(inc_zone_page_state);
 417
 418void dec_zone_page_state(struct page *page, enum zone_stat_item item)
 419{
 420	unsigned long flags;
 421
 422	local_irq_save(flags);
 423	__dec_zone_page_state(page, item);
 424	local_irq_restore(flags);
 425}
 426EXPORT_SYMBOL(dec_zone_page_state);
 427#endif
 428
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 429
 430/*
 431 * Fold a differential into the global counters.
 432 * Returns the number of counters updated.
 433 */
 434static int fold_diff(int *diff)
 
 435{
 436	int i;
 437	int changes = 0;
 438
 439	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 440		if (diff[i]) {
 441			atomic_long_add(diff[i], &vm_stat[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 442			changes++;
 443	}
 444	return changes;
 445}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 446
 447/*
 448 * Update the zone counters for the current cpu.
 449 *
 450 * Note that refresh_cpu_vm_stats strives to only access
 451 * node local memory. The per cpu pagesets on remote zones are placed
 452 * in the memory local to the processor using that pageset. So the
 453 * loop over all zones will access a series of cachelines local to
 454 * the processor.
 455 *
 456 * The call to zone_page_state_add updates the cachelines with the
 457 * statistics in the remote zone struct as well as the global cachelines
 458 * with the global counters. These could cause remote node cache line
 459 * bouncing and will have to be only done when necessary.
 460 *
 461 * The function returns the number of global counters updated.
 462 */
 463static int refresh_cpu_vm_stats(bool do_pagesets)
 464{
 
 465	struct zone *zone;
 466	int i;
 467	int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
 
 
 
 
 468	int changes = 0;
 469
 470	for_each_populated_zone(zone) {
 471		struct per_cpu_pageset __percpu *p = zone->pageset;
 472
 473		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
 474			int v;
 475
 476			v = this_cpu_xchg(p->vm_stat_diff[i], 0);
 477			if (v) {
 478
 479				atomic_long_add(v, &zone->vm_stat[i]);
 480				global_diff[i] += v;
 481#ifdef CONFIG_NUMA
 482				/* 3 seconds idle till flush */
 483				__this_cpu_write(p->expire, 3);
 484#endif
 485			}
 486		}
 487#ifdef CONFIG_NUMA
 
 
 
 
 
 
 
 
 
 
 
 
 488		if (do_pagesets) {
 489			cond_resched();
 490			/*
 491			 * Deal with draining the remote pageset of this
 492			 * processor
 493			 *
 494			 * Check if there are pages remaining in this pageset
 495			 * if not then there is nothing to expire.
 496			 */
 497			if (!__this_cpu_read(p->expire) ||
 498			       !__this_cpu_read(p->pcp.count))
 499				continue;
 500
 501			/*
 502			 * We never drain zones local to this processor.
 503			 */
 504			if (zone_to_nid(zone) == numa_node_id()) {
 505				__this_cpu_write(p->expire, 0);
 506				continue;
 507			}
 508
 509			if (__this_cpu_dec_return(p->expire))
 510				continue;
 511
 512			if (__this_cpu_read(p->pcp.count)) {
 513				drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
 514				changes++;
 515			}
 516		}
 517#endif
 518	}
 519	changes += fold_diff(global_diff);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 520	return changes;
 521}
 522
 523/*
 524 * Fold the data for an offline cpu into the global array.
 525 * There cannot be any access by the offline cpu and therefore
 526 * synchronization is simplified.
 527 */
 528void cpu_vm_stats_fold(int cpu)
 529{
 
 530	struct zone *zone;
 531	int i;
 532	int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
 
 
 
 
 533
 534	for_each_populated_zone(zone) {
 535		struct per_cpu_pageset *p;
 536
 537		p = per_cpu_ptr(zone->pageset, cpu);
 538
 539		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 540			if (p->vm_stat_diff[i]) {
 541				int v;
 542
 543				v = p->vm_stat_diff[i];
 544				p->vm_stat_diff[i] = 0;
 545				atomic_long_add(v, &zone->vm_stat[i]);
 546				global_diff[i] += v;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 547			}
 548	}
 549
 550	fold_diff(global_diff);
 
 
 
 
 551}
 552
 553/*
 554 * this is only called if !populated_zone(zone), which implies no other users of
 555 * pset->vm_stat_diff[] exsist.
 556 */
 557void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
 558{
 559	int i;
 560
 561	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 562		if (pset->vm_stat_diff[i]) {
 563			int v = pset->vm_stat_diff[i];
 564			pset->vm_stat_diff[i] = 0;
 565			atomic_long_add(v, &zone->vm_stat[i]);
 566			atomic_long_add(v, &vm_stat[i]);
 
 
 
 
 
 
 
 
 
 
 567		}
 
 568}
 569#endif
 570
 571#ifdef CONFIG_NUMA
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 572/*
 573 * zonelist = the list of zones passed to the allocator
 574 * z 	    = the zone from which the allocation occurred.
 575 *
 576 * Must be called with interrupts disabled.
 577 *
 578 * When __GFP_OTHER_NODE is set assume the node of the preferred
 579 * zone is the local node. This is useful for daemons who allocate
 580 * memory on behalf of other processes.
 581 */
 582void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags)
 
 583{
 584	if (z->zone_pgdat == preferred_zone->zone_pgdat) {
 585		__inc_zone_state(z, NUMA_HIT);
 586	} else {
 587		__inc_zone_state(z, NUMA_MISS);
 588		__inc_zone_state(preferred_zone, NUMA_FOREIGN);
 589	}
 590	if (z->node == ((flags & __GFP_OTHER_NODE) ?
 591			preferred_zone->node : numa_node_id()))
 592		__inc_zone_state(z, NUMA_LOCAL);
 593	else
 594		__inc_zone_state(z, NUMA_OTHER);
 595}
 596
 597/*
 598 * Determine the per node value of a stat item.
 
 599 */
 600unsigned long node_page_state(int node, enum zone_stat_item item)
 
 601{
 602	struct zone *zones = NODE_DATA(node)->node_zones;
 
 
 603
 604	return
 605#ifdef CONFIG_ZONE_DMA
 606		zone_page_state(&zones[ZONE_DMA], item) +
 607#endif
 608#ifdef CONFIG_ZONE_DMA32
 609		zone_page_state(&zones[ZONE_DMA32], item) +
 610#endif
 611#ifdef CONFIG_HIGHMEM
 612		zone_page_state(&zones[ZONE_HIGHMEM], item) +
 613#endif
 614		zone_page_state(&zones[ZONE_NORMAL], item) +
 615		zone_page_state(&zones[ZONE_MOVABLE], item);
 616}
 617
 
 
 
 
 
 
 
 
 
 
 
 
 
 618#endif
 619
 620#ifdef CONFIG_COMPACTION
 621
 622struct contig_page_info {
 623	unsigned long free_pages;
 624	unsigned long free_blocks_total;
 625	unsigned long free_blocks_suitable;
 626};
 627
 628/*
 629 * Calculate the number of free pages in a zone, how many contiguous
 630 * pages are free and how many are large enough to satisfy an allocation of
 631 * the target size. Note that this function makes no attempt to estimate
 632 * how many suitable free blocks there *might* be if MOVABLE pages were
 633 * migrated. Calculating that is possible, but expensive and can be
 634 * figured out from userspace
 635 */
 636static void fill_contig_page_info(struct zone *zone,
 637				unsigned int suitable_order,
 638				struct contig_page_info *info)
 639{
 640	unsigned int order;
 641
 642	info->free_pages = 0;
 643	info->free_blocks_total = 0;
 644	info->free_blocks_suitable = 0;
 645
 646	for (order = 0; order < MAX_ORDER; order++) {
 647		unsigned long blocks;
 648
 649		/* Count number of free blocks */
 650		blocks = zone->free_area[order].nr_free;
 651		info->free_blocks_total += blocks;
 652
 653		/* Count free base pages */
 654		info->free_pages += blocks << order;
 655
 656		/* Count the suitable free blocks */
 657		if (order >= suitable_order)
 658			info->free_blocks_suitable += blocks <<
 659						(order - suitable_order);
 660	}
 661}
 662
 663/*
 664 * A fragmentation index only makes sense if an allocation of a requested
 665 * size would fail. If that is true, the fragmentation index indicates
 666 * whether external fragmentation or a lack of memory was the problem.
 667 * The value can be used to determine if page reclaim or compaction
 668 * should be used
 669 */
 670static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
 671{
 672	unsigned long requested = 1UL << order;
 673
 
 
 
 674	if (!info->free_blocks_total)
 675		return 0;
 676
 677	/* Fragmentation index only makes sense when a request would fail */
 678	if (info->free_blocks_suitable)
 679		return -1000;
 680
 681	/*
 682	 * Index is between 0 and 1 so return within 3 decimal places
 683	 *
 684	 * 0 => allocation would fail due to lack of memory
 685	 * 1 => allocation would fail due to fragmentation
 686	 */
 687	return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
 688}
 689
 690/* Same as __fragmentation index but allocs contig_page_info on stack */
 691int fragmentation_index(struct zone *zone, unsigned int order)
 692{
 693	struct contig_page_info info;
 694
 695	fill_contig_page_info(zone, order, &info);
 696	return __fragmentation_index(order, &info);
 697}
 698#endif
 699
 700#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
 701#ifdef CONFIG_ZONE_DMA
 702#define TEXT_FOR_DMA(xx) xx "_dma",
 703#else
 704#define TEXT_FOR_DMA(xx)
 705#endif
 706
 707#ifdef CONFIG_ZONE_DMA32
 708#define TEXT_FOR_DMA32(xx) xx "_dma32",
 709#else
 710#define TEXT_FOR_DMA32(xx)
 711#endif
 712
 713#ifdef CONFIG_HIGHMEM
 714#define TEXT_FOR_HIGHMEM(xx) xx "_high",
 715#else
 716#define TEXT_FOR_HIGHMEM(xx)
 717#endif
 718
 719#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
 720					TEXT_FOR_HIGHMEM(xx) xx "_movable",
 721
 722const char * const vmstat_text[] = {
 723	/* enum zone_stat_item countes */
 724	"nr_free_pages",
 725	"nr_alloc_batch",
 726	"nr_inactive_anon",
 727	"nr_active_anon",
 728	"nr_inactive_file",
 729	"nr_active_file",
 730	"nr_unevictable",
 731	"nr_mlock",
 732	"nr_anon_pages",
 733	"nr_mapped",
 734	"nr_file_pages",
 735	"nr_dirty",
 736	"nr_writeback",
 737	"nr_slab_reclaimable",
 738	"nr_slab_unreclaimable",
 739	"nr_page_table_pages",
 740	"nr_kernel_stack",
 741	"nr_unstable",
 742	"nr_bounce",
 743	"nr_vmscan_write",
 744	"nr_vmscan_immediate_reclaim",
 745	"nr_writeback_temp",
 746	"nr_isolated_anon",
 747	"nr_isolated_file",
 748	"nr_shmem",
 749	"nr_dirtied",
 750	"nr_written",
 751	"nr_pages_scanned",
 752
 
 753#ifdef CONFIG_NUMA
 754	"numa_hit",
 755	"numa_miss",
 756	"numa_foreign",
 757	"numa_interleave",
 758	"numa_local",
 759	"numa_other",
 760#endif
 
 
 
 
 
 
 
 
 
 
 
 761	"workingset_refault",
 762	"workingset_activate",
 763	"workingset_nodereclaim",
 
 
 
 
 
 
 
 
 
 764	"nr_anon_transparent_hugepages",
 765	"nr_free_cma",
 
 
 
 
 
 766
 767	/* enum writeback_stat_item counters */
 768	"nr_dirty_threshold",
 769	"nr_dirty_background_threshold",
 770
 771#ifdef CONFIG_VM_EVENT_COUNTERS
 772	/* enum vm_event_item counters */
 773	"pgpgin",
 774	"pgpgout",
 775	"pswpin",
 776	"pswpout",
 777
 778	TEXTS_FOR_ZONES("pgalloc")
 
 
 779
 780	"pgfree",
 781	"pgactivate",
 782	"pgdeactivate",
 
 783
 784	"pgfault",
 785	"pgmajfault",
 786	"pglazyfreed",
 787
 788	TEXTS_FOR_ZONES("pgrefill")
 789	TEXTS_FOR_ZONES("pgsteal_kswapd")
 790	TEXTS_FOR_ZONES("pgsteal_direct")
 791	TEXTS_FOR_ZONES("pgscan_kswapd")
 792	TEXTS_FOR_ZONES("pgscan_direct")
 793	"pgscan_direct_throttle",
 794
 795#ifdef CONFIG_NUMA
 796	"zone_reclaim_failed",
 797#endif
 798	"pginodesteal",
 799	"slabs_scanned",
 800	"kswapd_inodesteal",
 801	"kswapd_low_wmark_hit_quickly",
 802	"kswapd_high_wmark_hit_quickly",
 803	"pageoutrun",
 804	"allocstall",
 805
 806	"pgrotated",
 807
 808	"drop_pagecache",
 809	"drop_slab",
 
 810
 811#ifdef CONFIG_NUMA_BALANCING
 812	"numa_pte_updates",
 813	"numa_huge_pte_updates",
 814	"numa_hint_faults",
 815	"numa_hint_faults_local",
 816	"numa_pages_migrated",
 817#endif
 818#ifdef CONFIG_MIGRATION
 819	"pgmigrate_success",
 820	"pgmigrate_fail",
 821#endif
 822#ifdef CONFIG_COMPACTION
 823	"compact_migrate_scanned",
 824	"compact_free_scanned",
 825	"compact_isolated",
 826	"compact_stall",
 827	"compact_fail",
 828	"compact_success",
 829	"compact_daemon_wake",
 
 
 830#endif
 831
 832#ifdef CONFIG_HUGETLB_PAGE
 833	"htlb_buddy_alloc_success",
 834	"htlb_buddy_alloc_fail",
 835#endif
 836	"unevictable_pgs_culled",
 837	"unevictable_pgs_scanned",
 838	"unevictable_pgs_rescued",
 839	"unevictable_pgs_mlocked",
 840	"unevictable_pgs_munlocked",
 841	"unevictable_pgs_cleared",
 842	"unevictable_pgs_stranded",
 843
 844#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 845	"thp_fault_alloc",
 846	"thp_fault_fallback",
 847	"thp_collapse_alloc",
 848	"thp_collapse_alloc_failed",
 
 
 849	"thp_split_page",
 850	"thp_split_page_failed",
 851	"thp_deferred_split_page",
 852	"thp_split_pmd",
 
 
 
 853	"thp_zero_page_alloc",
 854	"thp_zero_page_alloc_failed",
 
 
 855#endif
 856#ifdef CONFIG_MEMORY_BALLOON
 857	"balloon_inflate",
 858	"balloon_deflate",
 859#ifdef CONFIG_BALLOON_COMPACTION
 860	"balloon_migrate",
 861#endif
 862#endif /* CONFIG_MEMORY_BALLOON */
 863#ifdef CONFIG_DEBUG_TLBFLUSH
 864#ifdef CONFIG_SMP
 865	"nr_tlb_remote_flush",
 866	"nr_tlb_remote_flush_received",
 867#endif /* CONFIG_SMP */
 868	"nr_tlb_local_flush_all",
 869	"nr_tlb_local_flush_one",
 870#endif /* CONFIG_DEBUG_TLBFLUSH */
 871
 872#ifdef CONFIG_DEBUG_VM_VMACACHE
 873	"vmacache_find_calls",
 874	"vmacache_find_hits",
 875	"vmacache_full_flushes",
 876#endif
 
 
 
 
 877#endif /* CONFIG_VM_EVENTS_COUNTERS */
 878};
 879#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
 880
 881
 882#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
 883     defined(CONFIG_PROC_FS)
 884static void *frag_start(struct seq_file *m, loff_t *pos)
 885{
 886	pg_data_t *pgdat;
 887	loff_t node = *pos;
 888
 889	for (pgdat = first_online_pgdat();
 890	     pgdat && node;
 891	     pgdat = next_online_pgdat(pgdat))
 892		--node;
 893
 894	return pgdat;
 895}
 896
 897static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
 898{
 899	pg_data_t *pgdat = (pg_data_t *)arg;
 900
 901	(*pos)++;
 902	return next_online_pgdat(pgdat);
 903}
 904
 905static void frag_stop(struct seq_file *m, void *arg)
 906{
 907}
 908
 909/* Walk all the zones in a node and print using a callback */
 
 
 
 910static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
 
 911		void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
 912{
 913	struct zone *zone;
 914	struct zone *node_zones = pgdat->node_zones;
 915	unsigned long flags;
 916
 917	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
 918		if (!populated_zone(zone))
 919			continue;
 920
 921		spin_lock_irqsave(&zone->lock, flags);
 
 922		print(m, pgdat, zone);
 923		spin_unlock_irqrestore(&zone->lock, flags);
 
 924	}
 925}
 926#endif
 927
 928#ifdef CONFIG_PROC_FS
 929static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
 930						struct zone *zone)
 931{
 932	int order;
 933
 934	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
 935	for (order = 0; order < MAX_ORDER; ++order)
 936		seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
 937	seq_putc(m, '\n');
 938}
 939
 940/*
 941 * This walks the free areas for each zone.
 942 */
 943static int frag_show(struct seq_file *m, void *arg)
 944{
 945	pg_data_t *pgdat = (pg_data_t *)arg;
 946	walk_zones_in_node(m, pgdat, frag_show_print);
 947	return 0;
 948}
 949
 950static void pagetypeinfo_showfree_print(struct seq_file *m,
 951					pg_data_t *pgdat, struct zone *zone)
 952{
 953	int order, mtype;
 954
 955	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
 956		seq_printf(m, "Node %4d, zone %8s, type %12s ",
 957					pgdat->node_id,
 958					zone->name,
 959					migratetype_names[mtype]);
 960		for (order = 0; order < MAX_ORDER; ++order) {
 961			unsigned long freecount = 0;
 962			struct free_area *area;
 963			struct list_head *curr;
 964
 965			area = &(zone->free_area[order]);
 966
 967			list_for_each(curr, &area->free_list[mtype])
 968				freecount++;
 969			seq_printf(m, "%6lu ", freecount);
 970		}
 971		seq_putc(m, '\n');
 972	}
 973}
 974
 975/* Print out the free pages at each order for each migatetype */
 976static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
 977{
 978	int order;
 979	pg_data_t *pgdat = (pg_data_t *)arg;
 980
 981	/* Print header */
 982	seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
 983	for (order = 0; order < MAX_ORDER; ++order)
 984		seq_printf(m, "%6d ", order);
 985	seq_putc(m, '\n');
 986
 987	walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
 988
 989	return 0;
 990}
 991
 992static void pagetypeinfo_showblockcount_print(struct seq_file *m,
 993					pg_data_t *pgdat, struct zone *zone)
 994{
 995	int mtype;
 996	unsigned long pfn;
 997	unsigned long start_pfn = zone->zone_start_pfn;
 998	unsigned long end_pfn = zone_end_pfn(zone);
 999	unsigned long count[MIGRATE_TYPES] = { 0, };
1000
1001	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
1002		struct page *page;
1003
1004		if (!pfn_valid(pfn))
 
1005			continue;
1006
1007		page = pfn_to_page(pfn);
1008
1009		/* Watch for unexpected holes punched in the memmap */
1010		if (!memmap_valid_within(pfn, page, zone))
1011			continue;
1012
 
 
 
1013		mtype = get_pageblock_migratetype(page);
1014
1015		if (mtype < MIGRATE_TYPES)
1016			count[mtype]++;
1017	}
1018
1019	/* Print counts */
1020	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1021	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1022		seq_printf(m, "%12lu ", count[mtype]);
1023	seq_putc(m, '\n');
1024}
1025
1026/* Print out the free pages at each order for each migratetype */
1027static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1028{
1029	int mtype;
1030	pg_data_t *pgdat = (pg_data_t *)arg;
1031
1032	seq_printf(m, "\n%-23s", "Number of blocks type ");
1033	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1034		seq_printf(m, "%12s ", migratetype_names[mtype]);
1035	seq_putc(m, '\n');
1036	walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
 
1037
1038	return 0;
1039}
1040
1041#ifdef CONFIG_PAGE_OWNER
1042static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
1043							pg_data_t *pgdat,
1044							struct zone *zone)
1045{
1046	struct page *page;
1047	struct page_ext *page_ext;
1048	unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
1049	unsigned long end_pfn = pfn + zone->spanned_pages;
1050	unsigned long count[MIGRATE_TYPES] = { 0, };
1051	int pageblock_mt, page_mt;
1052	int i;
1053
1054	/* Scan block by block. First and last block may be incomplete */
1055	pfn = zone->zone_start_pfn;
1056
1057	/*
1058	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
1059	 * a zone boundary, it will be double counted between zones. This does
1060	 * not matter as the mixed block count will still be correct
1061	 */
1062	for (; pfn < end_pfn; ) {
1063		if (!pfn_valid(pfn)) {
1064			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
1065			continue;
1066		}
1067
1068		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
1069		block_end_pfn = min(block_end_pfn, end_pfn);
1070
1071		page = pfn_to_page(pfn);
1072		pageblock_mt = get_pfnblock_migratetype(page, pfn);
1073
1074		for (; pfn < block_end_pfn; pfn++) {
1075			if (!pfn_valid_within(pfn))
1076				continue;
1077
1078			page = pfn_to_page(pfn);
1079			if (PageBuddy(page)) {
1080				pfn += (1UL << page_order(page)) - 1;
1081				continue;
1082			}
1083
1084			if (PageReserved(page))
1085				continue;
1086
1087			page_ext = lookup_page_ext(page);
1088
1089			if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
1090				continue;
1091
1092			page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
1093			if (pageblock_mt != page_mt) {
1094				if (is_migrate_cma(pageblock_mt))
1095					count[MIGRATE_MOVABLE]++;
1096				else
1097					count[pageblock_mt]++;
1098
1099				pfn = block_end_pfn;
1100				break;
1101			}
1102			pfn += (1UL << page_ext->order) - 1;
1103		}
1104	}
1105
1106	/* Print counts */
1107	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1108	for (i = 0; i < MIGRATE_TYPES; i++)
1109		seq_printf(m, "%12lu ", count[i]);
1110	seq_putc(m, '\n');
1111}
1112#endif /* CONFIG_PAGE_OWNER */
1113
1114/*
1115 * Print out the number of pageblocks for each migratetype that contain pages
1116 * of other types. This gives an indication of how well fallbacks are being
1117 * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1118 * to determine what is going on
1119 */
1120static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1121{
1122#ifdef CONFIG_PAGE_OWNER
1123	int mtype;
1124
1125	if (!static_branch_unlikely(&page_owner_inited))
1126		return;
1127
1128	drain_all_pages(NULL);
1129
1130	seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1131	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1132		seq_printf(m, "%12s ", migratetype_names[mtype]);
1133	seq_putc(m, '\n');
1134
1135	walk_zones_in_node(m, pgdat, pagetypeinfo_showmixedcount_print);
 
1136#endif /* CONFIG_PAGE_OWNER */
1137}
1138
1139/*
1140 * This prints out statistics in relation to grouping pages by mobility.
1141 * It is expensive to collect so do not constantly read the file.
1142 */
1143static int pagetypeinfo_show(struct seq_file *m, void *arg)
1144{
1145	pg_data_t *pgdat = (pg_data_t *)arg;
1146
1147	/* check memoryless node */
1148	if (!node_state(pgdat->node_id, N_MEMORY))
1149		return 0;
1150
1151	seq_printf(m, "Page block order: %d\n", pageblock_order);
1152	seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
1153	seq_putc(m, '\n');
1154	pagetypeinfo_showfree(m, pgdat);
1155	pagetypeinfo_showblockcount(m, pgdat);
1156	pagetypeinfo_showmixedcount(m, pgdat);
1157
1158	return 0;
1159}
1160
1161static const struct seq_operations fragmentation_op = {
1162	.start	= frag_start,
1163	.next	= frag_next,
1164	.stop	= frag_stop,
1165	.show	= frag_show,
1166};
1167
1168static int fragmentation_open(struct inode *inode, struct file *file)
1169{
1170	return seq_open(file, &fragmentation_op);
1171}
1172
1173static const struct file_operations fragmentation_file_operations = {
1174	.open		= fragmentation_open,
1175	.read		= seq_read,
1176	.llseek		= seq_lseek,
1177	.release	= seq_release,
1178};
1179
1180static const struct seq_operations pagetypeinfo_op = {
1181	.start	= frag_start,
1182	.next	= frag_next,
1183	.stop	= frag_stop,
1184	.show	= pagetypeinfo_show,
1185};
1186
1187static int pagetypeinfo_open(struct inode *inode, struct file *file)
1188{
1189	return seq_open(file, &pagetypeinfo_op);
1190}
1191
1192static const struct file_operations pagetypeinfo_file_ops = {
1193	.open		= pagetypeinfo_open,
1194	.read		= seq_read,
1195	.llseek		= seq_lseek,
1196	.release	= seq_release,
1197};
1198
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1199static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1200							struct zone *zone)
1201{
1202	int i;
1203	seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
 
 
 
 
 
 
 
 
 
1204	seq_printf(m,
1205		   "\n  pages free     %lu"
1206		   "\n        min      %lu"
1207		   "\n        low      %lu"
1208		   "\n        high     %lu"
1209		   "\n        scanned  %lu"
1210		   "\n        spanned  %lu"
1211		   "\n        present  %lu"
1212		   "\n        managed  %lu",
1213		   zone_page_state(zone, NR_FREE_PAGES),
1214		   min_wmark_pages(zone),
1215		   low_wmark_pages(zone),
1216		   high_wmark_pages(zone),
1217		   zone_page_state(zone, NR_PAGES_SCANNED),
1218		   zone->spanned_pages,
1219		   zone->present_pages,
1220		   zone->managed_pages);
1221
1222	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1223		seq_printf(m, "\n    %-12s %lu", vmstat_text[i],
1224				zone_page_state(zone, i));
1225
1226	seq_printf(m,
1227		   "\n        protection: (%ld",
1228		   zone->lowmem_reserve[0]);
1229	for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1230		seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1231	seq_printf(m,
1232		   ")"
1233		   "\n  pagesets");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1234	for_each_online_cpu(i) {
1235		struct per_cpu_pageset *pageset;
1236
1237		pageset = per_cpu_ptr(zone->pageset, i);
1238		seq_printf(m,
1239			   "\n    cpu: %i"
1240			   "\n              count: %i"
1241			   "\n              high:  %i"
1242			   "\n              batch: %i",
1243			   i,
1244			   pageset->pcp.count,
1245			   pageset->pcp.high,
1246			   pageset->pcp.batch);
1247#ifdef CONFIG_SMP
1248		seq_printf(m, "\n  vm stats threshold: %d",
1249				pageset->stat_threshold);
1250#endif
1251	}
1252	seq_printf(m,
1253		   "\n  all_unreclaimable: %u"
1254		   "\n  start_pfn:         %lu"
1255		   "\n  inactive_ratio:    %u",
1256		   !zone_reclaimable(zone),
1257		   zone->zone_start_pfn,
1258		   zone->inactive_ratio);
1259	seq_putc(m, '\n');
1260}
1261
1262/*
1263 * Output information about zones in @pgdat.
 
 
 
1264 */
1265static int zoneinfo_show(struct seq_file *m, void *arg)
1266{
1267	pg_data_t *pgdat = (pg_data_t *)arg;
1268	walk_zones_in_node(m, pgdat, zoneinfo_show_print);
1269	return 0;
1270}
1271
1272static const struct seq_operations zoneinfo_op = {
1273	.start	= frag_start, /* iterate over all zones. The same as in
1274			       * fragmentation. */
1275	.next	= frag_next,
1276	.stop	= frag_stop,
1277	.show	= zoneinfo_show,
1278};
1279
1280static int zoneinfo_open(struct inode *inode, struct file *file)
1281{
1282	return seq_open(file, &zoneinfo_op);
1283}
1284
1285static const struct file_operations proc_zoneinfo_file_operations = {
1286	.open		= zoneinfo_open,
1287	.read		= seq_read,
1288	.llseek		= seq_lseek,
1289	.release	= seq_release,
1290};
1291
1292enum writeback_stat_item {
1293	NR_DIRTY_THRESHOLD,
1294	NR_DIRTY_BG_THRESHOLD,
1295	NR_VM_WRITEBACK_STAT_ITEMS,
1296};
1297
1298static void *vmstat_start(struct seq_file *m, loff_t *pos)
1299{
1300	unsigned long *v;
1301	int i, stat_items_size;
1302
1303	if (*pos >= ARRAY_SIZE(vmstat_text))
1304		return NULL;
1305	stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
 
 
1306			  NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
1307
1308#ifdef CONFIG_VM_EVENT_COUNTERS
1309	stat_items_size += sizeof(struct vm_event_state);
1310#endif
1311
1312	v = kmalloc(stat_items_size, GFP_KERNEL);
1313	m->private = v;
1314	if (!v)
1315		return ERR_PTR(-ENOMEM);
1316	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1317		v[i] = global_page_state(i);
1318	v += NR_VM_ZONE_STAT_ITEMS;
1319
 
 
 
 
 
 
 
 
 
 
1320	global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1321			    v + NR_DIRTY_THRESHOLD);
1322	v += NR_VM_WRITEBACK_STAT_ITEMS;
1323
1324#ifdef CONFIG_VM_EVENT_COUNTERS
1325	all_vm_events(v);
1326	v[PGPGIN] /= 2;		/* sectors -> kbytes */
1327	v[PGPGOUT] /= 2;
1328#endif
1329	return (unsigned long *)m->private + *pos;
1330}
1331
1332static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1333{
1334	(*pos)++;
1335	if (*pos >= ARRAY_SIZE(vmstat_text))
1336		return NULL;
1337	return (unsigned long *)m->private + *pos;
1338}
1339
1340static int vmstat_show(struct seq_file *m, void *arg)
1341{
1342	unsigned long *l = arg;
1343	unsigned long off = l - (unsigned long *)m->private;
1344
1345	seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
 
 
 
 
 
 
1346	return 0;
1347}
1348
1349static void vmstat_stop(struct seq_file *m, void *arg)
1350{
1351	kfree(m->private);
1352	m->private = NULL;
1353}
1354
1355static const struct seq_operations vmstat_op = {
1356	.start	= vmstat_start,
1357	.next	= vmstat_next,
1358	.stop	= vmstat_stop,
1359	.show	= vmstat_show,
1360};
1361
1362static int vmstat_open(struct inode *inode, struct file *file)
1363{
1364	return seq_open(file, &vmstat_op);
1365}
1366
1367static const struct file_operations proc_vmstat_file_operations = {
1368	.open		= vmstat_open,
1369	.read		= seq_read,
1370	.llseek		= seq_lseek,
1371	.release	= seq_release,
1372};
1373#endif /* CONFIG_PROC_FS */
1374
1375#ifdef CONFIG_SMP
1376static struct workqueue_struct *vmstat_wq;
1377static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1378int sysctl_stat_interval __read_mostly = HZ;
1379static cpumask_var_t cpu_stat_off;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1380
1381static void vmstat_update(struct work_struct *w)
1382{
1383	if (refresh_cpu_vm_stats(true)) {
1384		/*
1385		 * Counters were updated so we expect more updates
1386		 * to occur in the future. Keep on running the
1387		 * update worker thread.
1388		 * If we were marked on cpu_stat_off clear the flag
1389		 * so that vmstat_shepherd doesn't schedule us again.
1390		 */
1391		if (!cpumask_test_and_clear_cpu(smp_processor_id(),
1392						cpu_stat_off)) {
1393			queue_delayed_work_on(smp_processor_id(), vmstat_wq,
1394				this_cpu_ptr(&vmstat_work),
1395				round_jiffies_relative(sysctl_stat_interval));
1396		}
1397	} else {
1398		/*
1399		 * We did not update any counters so the app may be in
1400		 * a mode where it does not cause counter updates.
1401		 * We may be uselessly running vmstat_update.
1402		 * Defer the checking for differentials to the
1403		 * shepherd thread on a different processor.
1404		 */
1405		cpumask_set_cpu(smp_processor_id(), cpu_stat_off);
1406	}
1407}
1408
1409/*
1410 * Switch off vmstat processing and then fold all the remaining differentials
1411 * until the diffs stay at zero. The function is used by NOHZ and can only be
1412 * invoked when tick processing is not active.
1413 */
1414/*
1415 * Check if the diffs for a certain cpu indicate that
1416 * an update is needed.
1417 */
1418static bool need_update(int cpu)
1419{
1420	struct zone *zone;
1421
1422	for_each_populated_zone(zone) {
1423		struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
1424
1425		BUILD_BUG_ON(sizeof(p->vm_stat_diff[0]) != 1);
 
 
 
 
1426		/*
1427		 * The fast way of checking if there are any vmstat diffs.
1428		 * This works because the diffs are byte sized items.
1429		 */
1430		if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS))
1431			return true;
1432
 
 
 
1433	}
1434	return false;
1435}
1436
 
 
 
 
 
1437void quiet_vmstat(void)
1438{
1439	if (system_state != SYSTEM_RUNNING)
1440		return;
1441
1442	/*
1443	 * If we are already in hands of the shepherd then there
1444	 * is nothing for us to do here.
1445	 */
1446	if (cpumask_test_and_set_cpu(smp_processor_id(), cpu_stat_off))
1447		return;
1448
1449	if (!need_update(smp_processor_id()))
1450		return;
1451
1452	/*
1453	 * Just refresh counters and do not care about the pending delayed
1454	 * vmstat_update. It doesn't fire that often to matter and canceling
1455	 * it would be too expensive from this path.
1456	 * vmstat_shepherd will take care about that for us.
1457	 */
1458	refresh_cpu_vm_stats(false);
1459}
1460
1461
1462/*
1463 * Shepherd worker thread that checks the
1464 * differentials of processors that have their worker
1465 * threads for vm statistics updates disabled because of
1466 * inactivity.
1467 */
1468static void vmstat_shepherd(struct work_struct *w);
1469
1470static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
1471
1472static void vmstat_shepherd(struct work_struct *w)
1473{
1474	int cpu;
1475
1476	get_online_cpus();
1477	/* Check processors whose vmstat worker threads have been disabled */
1478	for_each_cpu(cpu, cpu_stat_off) {
1479		struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
1480
1481		if (need_update(cpu)) {
1482			if (cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
1483				queue_delayed_work_on(cpu, vmstat_wq, dw, 0);
1484		} else {
1485			/*
1486			 * Cancel the work if quiet_vmstat has put this
1487			 * cpu on cpu_stat_off because the work item might
1488			 * be still scheduled
1489			 */
1490			cancel_delayed_work(dw);
1491		}
1492	}
1493	put_online_cpus();
1494
1495	schedule_delayed_work(&shepherd,
1496		round_jiffies_relative(sysctl_stat_interval));
1497}
1498
1499static void __init start_shepherd_timer(void)
1500{
1501	int cpu;
1502
1503	for_each_possible_cpu(cpu)
1504		INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
1505			vmstat_update);
1506
1507	if (!alloc_cpumask_var(&cpu_stat_off, GFP_KERNEL))
1508		BUG();
1509	cpumask_copy(cpu_stat_off, cpu_online_mask);
1510
1511	vmstat_wq = alloc_workqueue("vmstat", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1512	schedule_delayed_work(&shepherd,
1513		round_jiffies_relative(sysctl_stat_interval));
1514}
1515
1516static void vmstat_cpu_dead(int node)
1517{
1518	int cpu;
1519
1520	get_online_cpus();
1521	for_each_online_cpu(cpu)
1522		if (cpu_to_node(cpu) == node)
1523			goto end;
 
1524
1525	node_clear_state(node, N_CPU);
1526end:
1527	put_online_cpus();
 
 
1528}
1529
1530/*
1531 * Use the cpu notifier to insure that the thresholds are recalculated
1532 * when necessary.
1533 */
1534static int vmstat_cpuup_callback(struct notifier_block *nfb,
1535		unsigned long action,
1536		void *hcpu)
1537{
1538	long cpu = (long)hcpu;
1539
1540	switch (action) {
1541	case CPU_ONLINE:
1542	case CPU_ONLINE_FROZEN:
1543		refresh_zone_stat_thresholds();
1544		node_set_state(cpu_to_node(cpu), N_CPU);
1545		cpumask_set_cpu(cpu, cpu_stat_off);
1546		break;
1547	case CPU_DOWN_PREPARE:
1548	case CPU_DOWN_PREPARE_FROZEN:
1549		cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
1550		cpumask_clear_cpu(cpu, cpu_stat_off);
1551		break;
1552	case CPU_DOWN_FAILED:
1553	case CPU_DOWN_FAILED_FROZEN:
1554		cpumask_set_cpu(cpu, cpu_stat_off);
1555		break;
1556	case CPU_DEAD:
1557	case CPU_DEAD_FROZEN:
1558		refresh_zone_stat_thresholds();
1559		vmstat_cpu_dead(cpu_to_node(cpu));
1560		break;
1561	default:
1562		break;
1563	}
1564	return NOTIFY_OK;
1565}
1566
1567static struct notifier_block vmstat_notifier =
1568	{ &vmstat_cpuup_callback, NULL, 0 };
1569#endif
1570
1571static int __init setup_vmstat(void)
 
 
1572{
 
 
 
 
1573#ifdef CONFIG_SMP
1574	cpu_notifier_register_begin();
1575	__register_cpu_notifier(&vmstat_notifier);
 
 
 
 
 
 
 
 
 
 
 
 
1576
1577	start_shepherd_timer();
1578	cpu_notifier_register_done();
1579#endif
1580#ifdef CONFIG_PROC_FS
1581	proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
1582	proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
1583	proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
1584	proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
1585#endif
1586	return 0;
1587}
1588module_init(setup_vmstat)
1589
1590#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1591
1592/*
1593 * Return an index indicating how much of the available free memory is
1594 * unusable for an allocation of the requested size.
1595 */
1596static int unusable_free_index(unsigned int order,
1597				struct contig_page_info *info)
1598{
1599	/* No free memory is interpreted as all free memory is unusable */
1600	if (info->free_pages == 0)
1601		return 1000;
1602
1603	/*
1604	 * Index should be a value between 0 and 1. Return a value to 3
1605	 * decimal places.
1606	 *
1607	 * 0 => no fragmentation
1608	 * 1 => high fragmentation
1609	 */
1610	return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
1611
1612}
1613
1614static void unusable_show_print(struct seq_file *m,
1615					pg_data_t *pgdat, struct zone *zone)
1616{
1617	unsigned int order;
1618	int index;
1619	struct contig_page_info info;
1620
1621	seq_printf(m, "Node %d, zone %8s ",
1622				pgdat->node_id,
1623				zone->name);
1624	for (order = 0; order < MAX_ORDER; ++order) {
1625		fill_contig_page_info(zone, order, &info);
1626		index = unusable_free_index(order, &info);
1627		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1628	}
1629
1630	seq_putc(m, '\n');
1631}
1632
1633/*
1634 * Display unusable free space index
1635 *
1636 * The unusable free space index measures how much of the available free
1637 * memory cannot be used to satisfy an allocation of a given size and is a
1638 * value between 0 and 1. The higher the value, the more of free memory is
1639 * unusable and by implication, the worse the external fragmentation is. This
1640 * can be expressed as a percentage by multiplying by 100.
1641 */
1642static int unusable_show(struct seq_file *m, void *arg)
1643{
1644	pg_data_t *pgdat = (pg_data_t *)arg;
1645
1646	/* check memoryless node */
1647	if (!node_state(pgdat->node_id, N_MEMORY))
1648		return 0;
1649
1650	walk_zones_in_node(m, pgdat, unusable_show_print);
1651
1652	return 0;
1653}
1654
1655static const struct seq_operations unusable_op = {
1656	.start	= frag_start,
1657	.next	= frag_next,
1658	.stop	= frag_stop,
1659	.show	= unusable_show,
1660};
1661
1662static int unusable_open(struct inode *inode, struct file *file)
1663{
1664	return seq_open(file, &unusable_op);
1665}
1666
1667static const struct file_operations unusable_file_ops = {
1668	.open		= unusable_open,
1669	.read		= seq_read,
1670	.llseek		= seq_lseek,
1671	.release	= seq_release,
1672};
1673
1674static void extfrag_show_print(struct seq_file *m,
1675					pg_data_t *pgdat, struct zone *zone)
1676{
1677	unsigned int order;
1678	int index;
1679
1680	/* Alloc on stack as interrupts are disabled for zone walk */
1681	struct contig_page_info info;
1682
1683	seq_printf(m, "Node %d, zone %8s ",
1684				pgdat->node_id,
1685				zone->name);
1686	for (order = 0; order < MAX_ORDER; ++order) {
1687		fill_contig_page_info(zone, order, &info);
1688		index = __fragmentation_index(order, &info);
1689		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1690	}
1691
1692	seq_putc(m, '\n');
1693}
1694
1695/*
1696 * Display fragmentation index for orders that allocations would fail for
1697 */
1698static int extfrag_show(struct seq_file *m, void *arg)
1699{
1700	pg_data_t *pgdat = (pg_data_t *)arg;
1701
1702	walk_zones_in_node(m, pgdat, extfrag_show_print);
1703
1704	return 0;
1705}
1706
1707static const struct seq_operations extfrag_op = {
1708	.start	= frag_start,
1709	.next	= frag_next,
1710	.stop	= frag_stop,
1711	.show	= extfrag_show,
1712};
1713
1714static int extfrag_open(struct inode *inode, struct file *file)
1715{
1716	return seq_open(file, &extfrag_op);
1717}
1718
1719static const struct file_operations extfrag_file_ops = {
1720	.open		= extfrag_open,
1721	.read		= seq_read,
1722	.llseek		= seq_lseek,
1723	.release	= seq_release,
1724};
1725
1726static int __init extfrag_debug_init(void)
1727{
1728	struct dentry *extfrag_debug_root;
1729
1730	extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
1731	if (!extfrag_debug_root)
1732		return -ENOMEM;
1733
1734	if (!debugfs_create_file("unusable_index", 0444,
1735			extfrag_debug_root, NULL, &unusable_file_ops))
1736		goto fail;
1737
1738	if (!debugfs_create_file("extfrag_index", 0444,
1739			extfrag_debug_root, NULL, &extfrag_file_ops))
1740		goto fail;
1741
1742	return 0;
1743fail:
1744	debugfs_remove_recursive(extfrag_debug_root);
1745	return -ENOMEM;
1746}
1747
1748module_init(extfrag_debug_init);
1749#endif
v4.17
   1/*
   2 *  linux/mm/vmstat.c
   3 *
   4 *  Manages VM statistics
   5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   6 *
   7 *  zoned VM statistics
   8 *  Copyright (C) 2006 Silicon Graphics, Inc.,
   9 *		Christoph Lameter <christoph@lameter.com>
  10 *  Copyright (C) 2008-2014 Christoph Lameter
  11 */
  12#include <linux/fs.h>
  13#include <linux/mm.h>
  14#include <linux/err.h>
  15#include <linux/module.h>
  16#include <linux/slab.h>
  17#include <linux/cpu.h>
  18#include <linux/cpumask.h>
  19#include <linux/vmstat.h>
  20#include <linux/proc_fs.h>
  21#include <linux/seq_file.h>
  22#include <linux/debugfs.h>
  23#include <linux/sched.h>
  24#include <linux/math64.h>
  25#include <linux/writeback.h>
  26#include <linux/compaction.h>
  27#include <linux/mm_inline.h>
  28#include <linux/page_ext.h>
  29#include <linux/page_owner.h>
  30
  31#include "internal.h"
  32
  33#define NUMA_STATS_THRESHOLD (U16_MAX - 2)
  34
  35#ifdef CONFIG_NUMA
  36int sysctl_vm_numa_stat = ENABLE_NUMA_STAT;
  37
  38/* zero numa counters within a zone */
  39static void zero_zone_numa_counters(struct zone *zone)
  40{
  41	int item, cpu;
  42
  43	for (item = 0; item < NR_VM_NUMA_STAT_ITEMS; item++) {
  44		atomic_long_set(&zone->vm_numa_stat[item], 0);
  45		for_each_online_cpu(cpu)
  46			per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item]
  47						= 0;
  48	}
  49}
  50
  51/* zero numa counters of all the populated zones */
  52static void zero_zones_numa_counters(void)
  53{
  54	struct zone *zone;
  55
  56	for_each_populated_zone(zone)
  57		zero_zone_numa_counters(zone);
  58}
  59
  60/* zero global numa counters */
  61static void zero_global_numa_counters(void)
  62{
  63	int item;
  64
  65	for (item = 0; item < NR_VM_NUMA_STAT_ITEMS; item++)
  66		atomic_long_set(&vm_numa_stat[item], 0);
  67}
  68
  69static void invalid_numa_statistics(void)
  70{
  71	zero_zones_numa_counters();
  72	zero_global_numa_counters();
  73}
  74
  75static DEFINE_MUTEX(vm_numa_stat_lock);
  76
  77int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
  78		void __user *buffer, size_t *length, loff_t *ppos)
  79{
  80	int ret, oldval;
  81
  82	mutex_lock(&vm_numa_stat_lock);
  83	if (write)
  84		oldval = sysctl_vm_numa_stat;
  85	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
  86	if (ret || !write)
  87		goto out;
  88
  89	if (oldval == sysctl_vm_numa_stat)
  90		goto out;
  91	else if (sysctl_vm_numa_stat == ENABLE_NUMA_STAT) {
  92		static_branch_enable(&vm_numa_stat_key);
  93		pr_info("enable numa statistics\n");
  94	} else {
  95		static_branch_disable(&vm_numa_stat_key);
  96		invalid_numa_statistics();
  97		pr_info("disable numa statistics, and clear numa counters\n");
  98	}
  99
 100out:
 101	mutex_unlock(&vm_numa_stat_lock);
 102	return ret;
 103}
 104#endif
 105
 106#ifdef CONFIG_VM_EVENT_COUNTERS
 107DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
 108EXPORT_PER_CPU_SYMBOL(vm_event_states);
 109
 110static void sum_vm_events(unsigned long *ret)
 111{
 112	int cpu;
 113	int i;
 114
 115	memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
 116
 117	for_each_online_cpu(cpu) {
 118		struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
 119
 120		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
 121			ret[i] += this->event[i];
 122	}
 123}
 124
 125/*
 126 * Accumulate the vm event counters across all CPUs.
 127 * The result is unavoidably approximate - it can change
 128 * during and after execution of this function.
 129*/
 130void all_vm_events(unsigned long *ret)
 131{
 132	get_online_cpus();
 133	sum_vm_events(ret);
 134	put_online_cpus();
 135}
 136EXPORT_SYMBOL_GPL(all_vm_events);
 137
 138/*
 139 * Fold the foreign cpu events into our own.
 140 *
 141 * This is adding to the events on one processor
 142 * but keeps the global counts constant.
 143 */
 144void vm_events_fold_cpu(int cpu)
 145{
 146	struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
 147	int i;
 148
 149	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
 150		count_vm_events(i, fold_state->event[i]);
 151		fold_state->event[i] = 0;
 152	}
 153}
 154
 155#endif /* CONFIG_VM_EVENT_COUNTERS */
 156
 157/*
 158 * Manage combined zone based / global counters
 159 *
 160 * vm_stat contains the global counters
 161 */
 162atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
 163atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS] __cacheline_aligned_in_smp;
 164atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp;
 165EXPORT_SYMBOL(vm_zone_stat);
 166EXPORT_SYMBOL(vm_numa_stat);
 167EXPORT_SYMBOL(vm_node_stat);
 168
 169#ifdef CONFIG_SMP
 170
 171int calculate_pressure_threshold(struct zone *zone)
 172{
 173	int threshold;
 174	int watermark_distance;
 175
 176	/*
 177	 * As vmstats are not up to date, there is drift between the estimated
 178	 * and real values. For high thresholds and a high number of CPUs, it
 179	 * is possible for the min watermark to be breached while the estimated
 180	 * value looks fine. The pressure threshold is a reduced value such
 181	 * that even the maximum amount of drift will not accidentally breach
 182	 * the min watermark
 183	 */
 184	watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
 185	threshold = max(1, (int)(watermark_distance / num_online_cpus()));
 186
 187	/*
 188	 * Maximum threshold is 125
 189	 */
 190	threshold = min(125, threshold);
 191
 192	return threshold;
 193}
 194
 195int calculate_normal_threshold(struct zone *zone)
 196{
 197	int threshold;
 198	int mem;	/* memory in 128 MB units */
 199
 200	/*
 201	 * The threshold scales with the number of processors and the amount
 202	 * of memory per zone. More memory means that we can defer updates for
 203	 * longer, more processors could lead to more contention.
 204 	 * fls() is used to have a cheap way of logarithmic scaling.
 205	 *
 206	 * Some sample thresholds:
 207	 *
 208	 * Threshold	Processors	(fls)	Zonesize	fls(mem+1)
 209	 * ------------------------------------------------------------------
 210	 * 8		1		1	0.9-1 GB	4
 211	 * 16		2		2	0.9-1 GB	4
 212	 * 20 		2		2	1-2 GB		5
 213	 * 24		2		2	2-4 GB		6
 214	 * 28		2		2	4-8 GB		7
 215	 * 32		2		2	8-16 GB		8
 216	 * 4		2		2	<128M		1
 217	 * 30		4		3	2-4 GB		5
 218	 * 48		4		3	8-16 GB		8
 219	 * 32		8		4	1-2 GB		4
 220	 * 32		8		4	0.9-1GB		4
 221	 * 10		16		5	<128M		1
 222	 * 40		16		5	900M		4
 223	 * 70		64		7	2-4 GB		5
 224	 * 84		64		7	4-8 GB		6
 225	 * 108		512		9	4-8 GB		6
 226	 * 125		1024		10	8-16 GB		8
 227	 * 125		1024		10	16-32 GB	9
 228	 */
 229
 230	mem = zone->managed_pages >> (27 - PAGE_SHIFT);
 231
 232	threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
 233
 234	/*
 235	 * Maximum threshold is 125
 236	 */
 237	threshold = min(125, threshold);
 238
 239	return threshold;
 240}
 241
 242/*
 243 * Refresh the thresholds for each zone.
 244 */
 245void refresh_zone_stat_thresholds(void)
 246{
 247	struct pglist_data *pgdat;
 248	struct zone *zone;
 249	int cpu;
 250	int threshold;
 251
 252	/* Zero current pgdat thresholds */
 253	for_each_online_pgdat(pgdat) {
 254		for_each_online_cpu(cpu) {
 255			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0;
 256		}
 257	}
 258
 259	for_each_populated_zone(zone) {
 260		struct pglist_data *pgdat = zone->zone_pgdat;
 261		unsigned long max_drift, tolerate_drift;
 262
 263		threshold = calculate_normal_threshold(zone);
 264
 265		for_each_online_cpu(cpu) {
 266			int pgdat_threshold;
 267
 268			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
 269							= threshold;
 270
 271			/* Base nodestat threshold on the largest populated zone. */
 272			pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold;
 273			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold
 274				= max(threshold, pgdat_threshold);
 275		}
 276
 277		/*
 278		 * Only set percpu_drift_mark if there is a danger that
 279		 * NR_FREE_PAGES reports the low watermark is ok when in fact
 280		 * the min watermark could be breached by an allocation
 281		 */
 282		tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
 283		max_drift = num_online_cpus() * threshold;
 284		if (max_drift > tolerate_drift)
 285			zone->percpu_drift_mark = high_wmark_pages(zone) +
 286					max_drift;
 287	}
 288}
 289
 290void set_pgdat_percpu_threshold(pg_data_t *pgdat,
 291				int (*calculate_pressure)(struct zone *))
 292{
 293	struct zone *zone;
 294	int cpu;
 295	int threshold;
 296	int i;
 297
 298	for (i = 0; i < pgdat->nr_zones; i++) {
 299		zone = &pgdat->node_zones[i];
 300		if (!zone->percpu_drift_mark)
 301			continue;
 302
 303		threshold = (*calculate_pressure)(zone);
 304		for_each_online_cpu(cpu)
 305			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
 306							= threshold;
 307	}
 308}
 309
 310/*
 311 * For use when we know that interrupts are disabled,
 312 * or when we know that preemption is disabled and that
 313 * particular counter cannot be updated from interrupt context.
 314 */
 315void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 316			   long delta)
 317{
 318	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 319	s8 __percpu *p = pcp->vm_stat_diff + item;
 320	long x;
 321	long t;
 322
 323	x = delta + __this_cpu_read(*p);
 324
 325	t = __this_cpu_read(pcp->stat_threshold);
 326
 327	if (unlikely(x > t || x < -t)) {
 328		zone_page_state_add(x, zone, item);
 329		x = 0;
 330	}
 331	__this_cpu_write(*p, x);
 332}
 333EXPORT_SYMBOL(__mod_zone_page_state);
 334
 335void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
 336				long delta)
 337{
 338	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
 339	s8 __percpu *p = pcp->vm_node_stat_diff + item;
 340	long x;
 341	long t;
 342
 343	x = delta + __this_cpu_read(*p);
 344
 345	t = __this_cpu_read(pcp->stat_threshold);
 346
 347	if (unlikely(x > t || x < -t)) {
 348		node_page_state_add(x, pgdat, item);
 349		x = 0;
 350	}
 351	__this_cpu_write(*p, x);
 352}
 353EXPORT_SYMBOL(__mod_node_page_state);
 354
 355/*
 356 * Optimized increment and decrement functions.
 357 *
 358 * These are only for a single page and therefore can take a struct page *
 359 * argument instead of struct zone *. This allows the inclusion of the code
 360 * generated for page_zone(page) into the optimized functions.
 361 *
 362 * No overflow check is necessary and therefore the differential can be
 363 * incremented or decremented in place which may allow the compilers to
 364 * generate better code.
 365 * The increment or decrement is known and therefore one boundary check can
 366 * be omitted.
 367 *
 368 * NOTE: These functions are very performance sensitive. Change only
 369 * with care.
 370 *
 371 * Some processors have inc/dec instructions that are atomic vs an interrupt.
 372 * However, the code must first determine the differential location in a zone
 373 * based on the processor number and then inc/dec the counter. There is no
 374 * guarantee without disabling preemption that the processor will not change
 375 * in between and therefore the atomicity vs. interrupt cannot be exploited
 376 * in a useful way here.
 377 */
 378void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
 379{
 380	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 381	s8 __percpu *p = pcp->vm_stat_diff + item;
 382	s8 v, t;
 383
 384	v = __this_cpu_inc_return(*p);
 385	t = __this_cpu_read(pcp->stat_threshold);
 386	if (unlikely(v > t)) {
 387		s8 overstep = t >> 1;
 388
 389		zone_page_state_add(v + overstep, zone, item);
 390		__this_cpu_write(*p, -overstep);
 391	}
 392}
 393
 394void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 395{
 396	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
 397	s8 __percpu *p = pcp->vm_node_stat_diff + item;
 398	s8 v, t;
 399
 400	v = __this_cpu_inc_return(*p);
 401	t = __this_cpu_read(pcp->stat_threshold);
 402	if (unlikely(v > t)) {
 403		s8 overstep = t >> 1;
 404
 405		node_page_state_add(v + overstep, pgdat, item);
 406		__this_cpu_write(*p, -overstep);
 407	}
 408}
 409
 410void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
 411{
 412	__inc_zone_state(page_zone(page), item);
 413}
 414EXPORT_SYMBOL(__inc_zone_page_state);
 415
 416void __inc_node_page_state(struct page *page, enum node_stat_item item)
 417{
 418	__inc_node_state(page_pgdat(page), item);
 419}
 420EXPORT_SYMBOL(__inc_node_page_state);
 421
 422void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
 423{
 424	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 425	s8 __percpu *p = pcp->vm_stat_diff + item;
 426	s8 v, t;
 427
 428	v = __this_cpu_dec_return(*p);
 429	t = __this_cpu_read(pcp->stat_threshold);
 430	if (unlikely(v < - t)) {
 431		s8 overstep = t >> 1;
 432
 433		zone_page_state_add(v - overstep, zone, item);
 434		__this_cpu_write(*p, overstep);
 435	}
 436}
 437
 438void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 439{
 440	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
 441	s8 __percpu *p = pcp->vm_node_stat_diff + item;
 442	s8 v, t;
 443
 444	v = __this_cpu_dec_return(*p);
 445	t = __this_cpu_read(pcp->stat_threshold);
 446	if (unlikely(v < - t)) {
 447		s8 overstep = t >> 1;
 448
 449		node_page_state_add(v - overstep, pgdat, item);
 450		__this_cpu_write(*p, overstep);
 451	}
 452}
 453
 454void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
 455{
 456	__dec_zone_state(page_zone(page), item);
 457}
 458EXPORT_SYMBOL(__dec_zone_page_state);
 459
 460void __dec_node_page_state(struct page *page, enum node_stat_item item)
 461{
 462	__dec_node_state(page_pgdat(page), item);
 463}
 464EXPORT_SYMBOL(__dec_node_page_state);
 465
 466#ifdef CONFIG_HAVE_CMPXCHG_LOCAL
 467/*
 468 * If we have cmpxchg_local support then we do not need to incur the overhead
 469 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
 470 *
 471 * mod_state() modifies the zone counter state through atomic per cpu
 472 * operations.
 473 *
 474 * Overstep mode specifies how overstep should handled:
 475 *     0       No overstepping
 476 *     1       Overstepping half of threshold
 477 *     -1      Overstepping minus half of threshold
 478*/
 479static inline void mod_zone_state(struct zone *zone,
 480       enum zone_stat_item item, long delta, int overstep_mode)
 481{
 482	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 483	s8 __percpu *p = pcp->vm_stat_diff + item;
 484	long o, n, t, z;
 485
 486	do {
 487		z = 0;  /* overflow to zone counters */
 488
 489		/*
 490		 * The fetching of the stat_threshold is racy. We may apply
 491		 * a counter threshold to the wrong the cpu if we get
 492		 * rescheduled while executing here. However, the next
 493		 * counter update will apply the threshold again and
 494		 * therefore bring the counter under the threshold again.
 495		 *
 496		 * Most of the time the thresholds are the same anyways
 497		 * for all cpus in a zone.
 498		 */
 499		t = this_cpu_read(pcp->stat_threshold);
 500
 501		o = this_cpu_read(*p);
 502		n = delta + o;
 503
 504		if (n > t || n < -t) {
 505			int os = overstep_mode * (t >> 1) ;
 506
 507			/* Overflow must be added to zone counters */
 508			z = n + os;
 509			n = -os;
 510		}
 511	} while (this_cpu_cmpxchg(*p, o, n) != o);
 512
 513	if (z)
 514		zone_page_state_add(z, zone, item);
 515}
 516
 517void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 518			 long delta)
 519{
 520	mod_zone_state(zone, item, delta, 0);
 521}
 522EXPORT_SYMBOL(mod_zone_page_state);
 523
 
 
 
 
 
 524void inc_zone_page_state(struct page *page, enum zone_stat_item item)
 525{
 526	mod_zone_state(page_zone(page), item, 1, 1);
 527}
 528EXPORT_SYMBOL(inc_zone_page_state);
 529
 530void dec_zone_page_state(struct page *page, enum zone_stat_item item)
 531{
 532	mod_zone_state(page_zone(page), item, -1, -1);
 533}
 534EXPORT_SYMBOL(dec_zone_page_state);
 535
 536static inline void mod_node_state(struct pglist_data *pgdat,
 537       enum node_stat_item item, int delta, int overstep_mode)
 538{
 539	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
 540	s8 __percpu *p = pcp->vm_node_stat_diff + item;
 541	long o, n, t, z;
 542
 543	do {
 544		z = 0;  /* overflow to node counters */
 545
 546		/*
 547		 * The fetching of the stat_threshold is racy. We may apply
 548		 * a counter threshold to the wrong the cpu if we get
 549		 * rescheduled while executing here. However, the next
 550		 * counter update will apply the threshold again and
 551		 * therefore bring the counter under the threshold again.
 552		 *
 553		 * Most of the time the thresholds are the same anyways
 554		 * for all cpus in a node.
 555		 */
 556		t = this_cpu_read(pcp->stat_threshold);
 557
 558		o = this_cpu_read(*p);
 559		n = delta + o;
 560
 561		if (n > t || n < -t) {
 562			int os = overstep_mode * (t >> 1) ;
 563
 564			/* Overflow must be added to node counters */
 565			z = n + os;
 566			n = -os;
 567		}
 568	} while (this_cpu_cmpxchg(*p, o, n) != o);
 569
 570	if (z)
 571		node_page_state_add(z, pgdat, item);
 572}
 573
 574void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
 575					long delta)
 576{
 577	mod_node_state(pgdat, item, delta, 0);
 578}
 579EXPORT_SYMBOL(mod_node_page_state);
 580
 581void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 582{
 583	mod_node_state(pgdat, item, 1, 1);
 584}
 585
 586void inc_node_page_state(struct page *page, enum node_stat_item item)
 587{
 588	mod_node_state(page_pgdat(page), item, 1, 1);
 589}
 590EXPORT_SYMBOL(inc_node_page_state);
 591
 592void dec_node_page_state(struct page *page, enum node_stat_item item)
 593{
 594	mod_node_state(page_pgdat(page), item, -1, -1);
 595}
 596EXPORT_SYMBOL(dec_node_page_state);
 597#else
 598/*
 599 * Use interrupt disable to serialize counter updates
 600 */
 601void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 602			 long delta)
 603{
 604	unsigned long flags;
 605
 606	local_irq_save(flags);
 607	__mod_zone_page_state(zone, item, delta);
 608	local_irq_restore(flags);
 609}
 610EXPORT_SYMBOL(mod_zone_page_state);
 611
 
 
 
 
 
 
 
 
 
 612void inc_zone_page_state(struct page *page, enum zone_stat_item item)
 613{
 614	unsigned long flags;
 615	struct zone *zone;
 616
 617	zone = page_zone(page);
 618	local_irq_save(flags);
 619	__inc_zone_state(zone, item);
 620	local_irq_restore(flags);
 621}
 622EXPORT_SYMBOL(inc_zone_page_state);
 623
 624void dec_zone_page_state(struct page *page, enum zone_stat_item item)
 625{
 626	unsigned long flags;
 627
 628	local_irq_save(flags);
 629	__dec_zone_page_state(page, item);
 630	local_irq_restore(flags);
 631}
 632EXPORT_SYMBOL(dec_zone_page_state);
 
 633
 634void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 635{
 636	unsigned long flags;
 637
 638	local_irq_save(flags);
 639	__inc_node_state(pgdat, item);
 640	local_irq_restore(flags);
 641}
 642EXPORT_SYMBOL(inc_node_state);
 643
 644void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
 645					long delta)
 646{
 647	unsigned long flags;
 648
 649	local_irq_save(flags);
 650	__mod_node_page_state(pgdat, item, delta);
 651	local_irq_restore(flags);
 652}
 653EXPORT_SYMBOL(mod_node_page_state);
 654
 655void inc_node_page_state(struct page *page, enum node_stat_item item)
 656{
 657	unsigned long flags;
 658	struct pglist_data *pgdat;
 659
 660	pgdat = page_pgdat(page);
 661	local_irq_save(flags);
 662	__inc_node_state(pgdat, item);
 663	local_irq_restore(flags);
 664}
 665EXPORT_SYMBOL(inc_node_page_state);
 666
 667void dec_node_page_state(struct page *page, enum node_stat_item item)
 668{
 669	unsigned long flags;
 670
 671	local_irq_save(flags);
 672	__dec_node_page_state(page, item);
 673	local_irq_restore(flags);
 674}
 675EXPORT_SYMBOL(dec_node_page_state);
 676#endif
 677
 678/*
 679 * Fold a differential into the global counters.
 680 * Returns the number of counters updated.
 681 */
 682#ifdef CONFIG_NUMA
 683static int fold_diff(int *zone_diff, int *numa_diff, int *node_diff)
 684{
 685	int i;
 686	int changes = 0;
 687
 688	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 689		if (zone_diff[i]) {
 690			atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
 691			changes++;
 692	}
 693
 694	for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
 695		if (numa_diff[i]) {
 696			atomic_long_add(numa_diff[i], &vm_numa_stat[i]);
 697			changes++;
 698	}
 699
 700	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
 701		if (node_diff[i]) {
 702			atomic_long_add(node_diff[i], &vm_node_stat[i]);
 703			changes++;
 704	}
 705	return changes;
 706}
 707#else
 708static int fold_diff(int *zone_diff, int *node_diff)
 709{
 710	int i;
 711	int changes = 0;
 712
 713	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 714		if (zone_diff[i]) {
 715			atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
 716			changes++;
 717	}
 718
 719	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
 720		if (node_diff[i]) {
 721			atomic_long_add(node_diff[i], &vm_node_stat[i]);
 722			changes++;
 723	}
 724	return changes;
 725}
 726#endif /* CONFIG_NUMA */
 727
 728/*
 729 * Update the zone counters for the current cpu.
 730 *
 731 * Note that refresh_cpu_vm_stats strives to only access
 732 * node local memory. The per cpu pagesets on remote zones are placed
 733 * in the memory local to the processor using that pageset. So the
 734 * loop over all zones will access a series of cachelines local to
 735 * the processor.
 736 *
 737 * The call to zone_page_state_add updates the cachelines with the
 738 * statistics in the remote zone struct as well as the global cachelines
 739 * with the global counters. These could cause remote node cache line
 740 * bouncing and will have to be only done when necessary.
 741 *
 742 * The function returns the number of global counters updated.
 743 */
 744static int refresh_cpu_vm_stats(bool do_pagesets)
 745{
 746	struct pglist_data *pgdat;
 747	struct zone *zone;
 748	int i;
 749	int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
 750#ifdef CONFIG_NUMA
 751	int global_numa_diff[NR_VM_NUMA_STAT_ITEMS] = { 0, };
 752#endif
 753	int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
 754	int changes = 0;
 755
 756	for_each_populated_zone(zone) {
 757		struct per_cpu_pageset __percpu *p = zone->pageset;
 758
 759		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
 760			int v;
 761
 762			v = this_cpu_xchg(p->vm_stat_diff[i], 0);
 763			if (v) {
 764
 765				atomic_long_add(v, &zone->vm_stat[i]);
 766				global_zone_diff[i] += v;
 767#ifdef CONFIG_NUMA
 768				/* 3 seconds idle till flush */
 769				__this_cpu_write(p->expire, 3);
 770#endif
 771			}
 772		}
 773#ifdef CONFIG_NUMA
 774		for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) {
 775			int v;
 776
 777			v = this_cpu_xchg(p->vm_numa_stat_diff[i], 0);
 778			if (v) {
 779
 780				atomic_long_add(v, &zone->vm_numa_stat[i]);
 781				global_numa_diff[i] += v;
 782				__this_cpu_write(p->expire, 3);
 783			}
 784		}
 785
 786		if (do_pagesets) {
 787			cond_resched();
 788			/*
 789			 * Deal with draining the remote pageset of this
 790			 * processor
 791			 *
 792			 * Check if there are pages remaining in this pageset
 793			 * if not then there is nothing to expire.
 794			 */
 795			if (!__this_cpu_read(p->expire) ||
 796			       !__this_cpu_read(p->pcp.count))
 797				continue;
 798
 799			/*
 800			 * We never drain zones local to this processor.
 801			 */
 802			if (zone_to_nid(zone) == numa_node_id()) {
 803				__this_cpu_write(p->expire, 0);
 804				continue;
 805			}
 806
 807			if (__this_cpu_dec_return(p->expire))
 808				continue;
 809
 810			if (__this_cpu_read(p->pcp.count)) {
 811				drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
 812				changes++;
 813			}
 814		}
 815#endif
 816	}
 817
 818	for_each_online_pgdat(pgdat) {
 819		struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;
 820
 821		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
 822			int v;
 823
 824			v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
 825			if (v) {
 826				atomic_long_add(v, &pgdat->vm_stat[i]);
 827				global_node_diff[i] += v;
 828			}
 829		}
 830	}
 831
 832#ifdef CONFIG_NUMA
 833	changes += fold_diff(global_zone_diff, global_numa_diff,
 834			     global_node_diff);
 835#else
 836	changes += fold_diff(global_zone_diff, global_node_diff);
 837#endif
 838	return changes;
 839}
 840
 841/*
 842 * Fold the data for an offline cpu into the global array.
 843 * There cannot be any access by the offline cpu and therefore
 844 * synchronization is simplified.
 845 */
 846void cpu_vm_stats_fold(int cpu)
 847{
 848	struct pglist_data *pgdat;
 849	struct zone *zone;
 850	int i;
 851	int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
 852#ifdef CONFIG_NUMA
 853	int global_numa_diff[NR_VM_NUMA_STAT_ITEMS] = { 0, };
 854#endif
 855	int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
 856
 857	for_each_populated_zone(zone) {
 858		struct per_cpu_pageset *p;
 859
 860		p = per_cpu_ptr(zone->pageset, cpu);
 861
 862		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 863			if (p->vm_stat_diff[i]) {
 864				int v;
 865
 866				v = p->vm_stat_diff[i];
 867				p->vm_stat_diff[i] = 0;
 868				atomic_long_add(v, &zone->vm_stat[i]);
 869				global_zone_diff[i] += v;
 870			}
 871
 872#ifdef CONFIG_NUMA
 873		for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
 874			if (p->vm_numa_stat_diff[i]) {
 875				int v;
 876
 877				v = p->vm_numa_stat_diff[i];
 878				p->vm_numa_stat_diff[i] = 0;
 879				atomic_long_add(v, &zone->vm_numa_stat[i]);
 880				global_numa_diff[i] += v;
 881			}
 882#endif
 883	}
 884
 885	for_each_online_pgdat(pgdat) {
 886		struct per_cpu_nodestat *p;
 887
 888		p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
 889
 890		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
 891			if (p->vm_node_stat_diff[i]) {
 892				int v;
 893
 894				v = p->vm_node_stat_diff[i];
 895				p->vm_node_stat_diff[i] = 0;
 896				atomic_long_add(v, &pgdat->vm_stat[i]);
 897				global_node_diff[i] += v;
 898			}
 899	}
 900
 901#ifdef CONFIG_NUMA
 902	fold_diff(global_zone_diff, global_numa_diff, global_node_diff);
 903#else
 904	fold_diff(global_zone_diff, global_node_diff);
 905#endif
 906}
 907
 908/*
 909 * this is only called if !populated_zone(zone), which implies no other users of
 910 * pset->vm_stat_diff[] exsist.
 911 */
 912void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
 913{
 914	int i;
 915
 916	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 917		if (pset->vm_stat_diff[i]) {
 918			int v = pset->vm_stat_diff[i];
 919			pset->vm_stat_diff[i] = 0;
 920			atomic_long_add(v, &zone->vm_stat[i]);
 921			atomic_long_add(v, &vm_zone_stat[i]);
 922		}
 923
 924#ifdef CONFIG_NUMA
 925	for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
 926		if (pset->vm_numa_stat_diff[i]) {
 927			int v = pset->vm_numa_stat_diff[i];
 928
 929			pset->vm_numa_stat_diff[i] = 0;
 930			atomic_long_add(v, &zone->vm_numa_stat[i]);
 931			atomic_long_add(v, &vm_numa_stat[i]);
 932		}
 933#endif
 934}
 935#endif
 936
 937#ifdef CONFIG_NUMA
 938void __inc_numa_state(struct zone *zone,
 939				 enum numa_stat_item item)
 940{
 941	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 942	u16 __percpu *p = pcp->vm_numa_stat_diff + item;
 943	u16 v;
 944
 945	v = __this_cpu_inc_return(*p);
 946
 947	if (unlikely(v > NUMA_STATS_THRESHOLD)) {
 948		zone_numa_state_add(v, zone, item);
 949		__this_cpu_write(*p, 0);
 950	}
 951}
 952
 953/*
 954 * Determine the per node value of a stat item. This function
 955 * is called frequently in a NUMA machine, so try to be as
 956 * frugal as possible.
 
 
 
 
 
 957 */
 958unsigned long sum_zone_node_page_state(int node,
 959				 enum zone_stat_item item)
 960{
 961	struct zone *zones = NODE_DATA(node)->node_zones;
 962	int i;
 963	unsigned long count = 0;
 964
 965	for (i = 0; i < MAX_NR_ZONES; i++)
 966		count += zone_page_state(zones + i, item);
 967
 968	return count;
 
 
 
 969}
 970
 971/*
 972 * Determine the per node value of a numa stat item. To avoid deviation,
 973 * the per cpu stat number in vm_numa_stat_diff[] is also included.
 974 */
 975unsigned long sum_zone_numa_state(int node,
 976				 enum numa_stat_item item)
 977{
 978	struct zone *zones = NODE_DATA(node)->node_zones;
 979	int i;
 980	unsigned long count = 0;
 981
 982	for (i = 0; i < MAX_NR_ZONES; i++)
 983		count += zone_numa_state_snapshot(zones + i, item);
 984
 985	return count;
 
 
 
 
 
 
 
 
 986}
 987
 988/*
 989 * Determine the per node value of a stat item.
 990 */
 991unsigned long node_page_state(struct pglist_data *pgdat,
 992				enum node_stat_item item)
 993{
 994	long x = atomic_long_read(&pgdat->vm_stat[item]);
 995#ifdef CONFIG_SMP
 996	if (x < 0)
 997		x = 0;
 998#endif
 999	return x;
1000}
1001#endif
1002
1003#ifdef CONFIG_COMPACTION
1004
1005struct contig_page_info {
1006	unsigned long free_pages;
1007	unsigned long free_blocks_total;
1008	unsigned long free_blocks_suitable;
1009};
1010
1011/*
1012 * Calculate the number of free pages in a zone, how many contiguous
1013 * pages are free and how many are large enough to satisfy an allocation of
1014 * the target size. Note that this function makes no attempt to estimate
1015 * how many suitable free blocks there *might* be if MOVABLE pages were
1016 * migrated. Calculating that is possible, but expensive and can be
1017 * figured out from userspace
1018 */
1019static void fill_contig_page_info(struct zone *zone,
1020				unsigned int suitable_order,
1021				struct contig_page_info *info)
1022{
1023	unsigned int order;
1024
1025	info->free_pages = 0;
1026	info->free_blocks_total = 0;
1027	info->free_blocks_suitable = 0;
1028
1029	for (order = 0; order < MAX_ORDER; order++) {
1030		unsigned long blocks;
1031
1032		/* Count number of free blocks */
1033		blocks = zone->free_area[order].nr_free;
1034		info->free_blocks_total += blocks;
1035
1036		/* Count free base pages */
1037		info->free_pages += blocks << order;
1038
1039		/* Count the suitable free blocks */
1040		if (order >= suitable_order)
1041			info->free_blocks_suitable += blocks <<
1042						(order - suitable_order);
1043	}
1044}
1045
1046/*
1047 * A fragmentation index only makes sense if an allocation of a requested
1048 * size would fail. If that is true, the fragmentation index indicates
1049 * whether external fragmentation or a lack of memory was the problem.
1050 * The value can be used to determine if page reclaim or compaction
1051 * should be used
1052 */
1053static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
1054{
1055	unsigned long requested = 1UL << order;
1056
1057	if (WARN_ON_ONCE(order >= MAX_ORDER))
1058		return 0;
1059
1060	if (!info->free_blocks_total)
1061		return 0;
1062
1063	/* Fragmentation index only makes sense when a request would fail */
1064	if (info->free_blocks_suitable)
1065		return -1000;
1066
1067	/*
1068	 * Index is between 0 and 1 so return within 3 decimal places
1069	 *
1070	 * 0 => allocation would fail due to lack of memory
1071	 * 1 => allocation would fail due to fragmentation
1072	 */
1073	return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
1074}
1075
1076/* Same as __fragmentation index but allocs contig_page_info on stack */
1077int fragmentation_index(struct zone *zone, unsigned int order)
1078{
1079	struct contig_page_info info;
1080
1081	fill_contig_page_info(zone, order, &info);
1082	return __fragmentation_index(order, &info);
1083}
1084#endif
1085
1086#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
1087#ifdef CONFIG_ZONE_DMA
1088#define TEXT_FOR_DMA(xx) xx "_dma",
1089#else
1090#define TEXT_FOR_DMA(xx)
1091#endif
1092
1093#ifdef CONFIG_ZONE_DMA32
1094#define TEXT_FOR_DMA32(xx) xx "_dma32",
1095#else
1096#define TEXT_FOR_DMA32(xx)
1097#endif
1098
1099#ifdef CONFIG_HIGHMEM
1100#define TEXT_FOR_HIGHMEM(xx) xx "_high",
1101#else
1102#define TEXT_FOR_HIGHMEM(xx)
1103#endif
1104
1105#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
1106					TEXT_FOR_HIGHMEM(xx) xx "_movable",
1107
1108const char * const vmstat_text[] = {
1109	/* enum zone_stat_item countes */
1110	"nr_free_pages",
1111	"nr_zone_inactive_anon",
1112	"nr_zone_active_anon",
1113	"nr_zone_inactive_file",
1114	"nr_zone_active_file",
1115	"nr_zone_unevictable",
1116	"nr_zone_write_pending",
1117	"nr_mlock",
 
 
 
 
 
 
 
1118	"nr_page_table_pages",
1119	"nr_kernel_stack",
 
1120	"nr_bounce",
1121#if IS_ENABLED(CONFIG_ZSMALLOC)
1122	"nr_zspages",
1123#endif
1124	"nr_free_cma",
 
 
 
 
 
1125
1126	/* enum numa_stat_item counters */
1127#ifdef CONFIG_NUMA
1128	"numa_hit",
1129	"numa_miss",
1130	"numa_foreign",
1131	"numa_interleave",
1132	"numa_local",
1133	"numa_other",
1134#endif
1135
1136	/* Node-based counters */
1137	"nr_inactive_anon",
1138	"nr_active_anon",
1139	"nr_inactive_file",
1140	"nr_active_file",
1141	"nr_unevictable",
1142	"nr_slab_reclaimable",
1143	"nr_slab_unreclaimable",
1144	"nr_isolated_anon",
1145	"nr_isolated_file",
1146	"workingset_refault",
1147	"workingset_activate",
1148	"workingset_nodereclaim",
1149	"nr_anon_pages",
1150	"nr_mapped",
1151	"nr_file_pages",
1152	"nr_dirty",
1153	"nr_writeback",
1154	"nr_writeback_temp",
1155	"nr_shmem",
1156	"nr_shmem_hugepages",
1157	"nr_shmem_pmdmapped",
1158	"nr_anon_transparent_hugepages",
1159	"nr_unstable",
1160	"nr_vmscan_write",
1161	"nr_vmscan_immediate_reclaim",
1162	"nr_dirtied",
1163	"nr_written",
1164	"", /* nr_indirectly_reclaimable */
1165
1166	/* enum writeback_stat_item counters */
1167	"nr_dirty_threshold",
1168	"nr_dirty_background_threshold",
1169
1170#ifdef CONFIG_VM_EVENT_COUNTERS
1171	/* enum vm_event_item counters */
1172	"pgpgin",
1173	"pgpgout",
1174	"pswpin",
1175	"pswpout",
1176
1177	TEXTS_FOR_ZONES("pgalloc")
1178	TEXTS_FOR_ZONES("allocstall")
1179	TEXTS_FOR_ZONES("pgskip")
1180
1181	"pgfree",
1182	"pgactivate",
1183	"pgdeactivate",
1184	"pglazyfree",
1185
1186	"pgfault",
1187	"pgmajfault",
1188	"pglazyfreed",
1189
1190	"pgrefill",
1191	"pgsteal_kswapd",
1192	"pgsteal_direct",
1193	"pgscan_kswapd",
1194	"pgscan_direct",
1195	"pgscan_direct_throttle",
1196
1197#ifdef CONFIG_NUMA
1198	"zone_reclaim_failed",
1199#endif
1200	"pginodesteal",
1201	"slabs_scanned",
1202	"kswapd_inodesteal",
1203	"kswapd_low_wmark_hit_quickly",
1204	"kswapd_high_wmark_hit_quickly",
1205	"pageoutrun",
 
1206
1207	"pgrotated",
1208
1209	"drop_pagecache",
1210	"drop_slab",
1211	"oom_kill",
1212
1213#ifdef CONFIG_NUMA_BALANCING
1214	"numa_pte_updates",
1215	"numa_huge_pte_updates",
1216	"numa_hint_faults",
1217	"numa_hint_faults_local",
1218	"numa_pages_migrated",
1219#endif
1220#ifdef CONFIG_MIGRATION
1221	"pgmigrate_success",
1222	"pgmigrate_fail",
1223#endif
1224#ifdef CONFIG_COMPACTION
1225	"compact_migrate_scanned",
1226	"compact_free_scanned",
1227	"compact_isolated",
1228	"compact_stall",
1229	"compact_fail",
1230	"compact_success",
1231	"compact_daemon_wake",
1232	"compact_daemon_migrate_scanned",
1233	"compact_daemon_free_scanned",
1234#endif
1235
1236#ifdef CONFIG_HUGETLB_PAGE
1237	"htlb_buddy_alloc_success",
1238	"htlb_buddy_alloc_fail",
1239#endif
1240	"unevictable_pgs_culled",
1241	"unevictable_pgs_scanned",
1242	"unevictable_pgs_rescued",
1243	"unevictable_pgs_mlocked",
1244	"unevictable_pgs_munlocked",
1245	"unevictable_pgs_cleared",
1246	"unevictable_pgs_stranded",
1247
1248#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1249	"thp_fault_alloc",
1250	"thp_fault_fallback",
1251	"thp_collapse_alloc",
1252	"thp_collapse_alloc_failed",
1253	"thp_file_alloc",
1254	"thp_file_mapped",
1255	"thp_split_page",
1256	"thp_split_page_failed",
1257	"thp_deferred_split_page",
1258	"thp_split_pmd",
1259#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1260	"thp_split_pud",
1261#endif
1262	"thp_zero_page_alloc",
1263	"thp_zero_page_alloc_failed",
1264	"thp_swpout",
1265	"thp_swpout_fallback",
1266#endif
1267#ifdef CONFIG_MEMORY_BALLOON
1268	"balloon_inflate",
1269	"balloon_deflate",
1270#ifdef CONFIG_BALLOON_COMPACTION
1271	"balloon_migrate",
1272#endif
1273#endif /* CONFIG_MEMORY_BALLOON */
1274#ifdef CONFIG_DEBUG_TLBFLUSH
1275#ifdef CONFIG_SMP
1276	"nr_tlb_remote_flush",
1277	"nr_tlb_remote_flush_received",
1278#endif /* CONFIG_SMP */
1279	"nr_tlb_local_flush_all",
1280	"nr_tlb_local_flush_one",
1281#endif /* CONFIG_DEBUG_TLBFLUSH */
1282
1283#ifdef CONFIG_DEBUG_VM_VMACACHE
1284	"vmacache_find_calls",
1285	"vmacache_find_hits",
1286	"vmacache_full_flushes",
1287#endif
1288#ifdef CONFIG_SWAP
1289	"swap_ra",
1290	"swap_ra_hit",
1291#endif
1292#endif /* CONFIG_VM_EVENTS_COUNTERS */
1293};
1294#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
1295
 
1296#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
1297     defined(CONFIG_PROC_FS)
1298static void *frag_start(struct seq_file *m, loff_t *pos)
1299{
1300	pg_data_t *pgdat;
1301	loff_t node = *pos;
1302
1303	for (pgdat = first_online_pgdat();
1304	     pgdat && node;
1305	     pgdat = next_online_pgdat(pgdat))
1306		--node;
1307
1308	return pgdat;
1309}
1310
1311static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
1312{
1313	pg_data_t *pgdat = (pg_data_t *)arg;
1314
1315	(*pos)++;
1316	return next_online_pgdat(pgdat);
1317}
1318
1319static void frag_stop(struct seq_file *m, void *arg)
1320{
1321}
1322
1323/*
1324 * Walk zones in a node and print using a callback.
1325 * If @assert_populated is true, only use callback for zones that are populated.
1326 */
1327static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
1328		bool assert_populated, bool nolock,
1329		void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
1330{
1331	struct zone *zone;
1332	struct zone *node_zones = pgdat->node_zones;
1333	unsigned long flags;
1334
1335	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
1336		if (assert_populated && !populated_zone(zone))
1337			continue;
1338
1339		if (!nolock)
1340			spin_lock_irqsave(&zone->lock, flags);
1341		print(m, pgdat, zone);
1342		if (!nolock)
1343			spin_unlock_irqrestore(&zone->lock, flags);
1344	}
1345}
1346#endif
1347
1348#ifdef CONFIG_PROC_FS
1349static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
1350						struct zone *zone)
1351{
1352	int order;
1353
1354	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1355	for (order = 0; order < MAX_ORDER; ++order)
1356		seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
1357	seq_putc(m, '\n');
1358}
1359
1360/*
1361 * This walks the free areas for each zone.
1362 */
1363static int frag_show(struct seq_file *m, void *arg)
1364{
1365	pg_data_t *pgdat = (pg_data_t *)arg;
1366	walk_zones_in_node(m, pgdat, true, false, frag_show_print);
1367	return 0;
1368}
1369
1370static void pagetypeinfo_showfree_print(struct seq_file *m,
1371					pg_data_t *pgdat, struct zone *zone)
1372{
1373	int order, mtype;
1374
1375	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
1376		seq_printf(m, "Node %4d, zone %8s, type %12s ",
1377					pgdat->node_id,
1378					zone->name,
1379					migratetype_names[mtype]);
1380		for (order = 0; order < MAX_ORDER; ++order) {
1381			unsigned long freecount = 0;
1382			struct free_area *area;
1383			struct list_head *curr;
1384
1385			area = &(zone->free_area[order]);
1386
1387			list_for_each(curr, &area->free_list[mtype])
1388				freecount++;
1389			seq_printf(m, "%6lu ", freecount);
1390		}
1391		seq_putc(m, '\n');
1392	}
1393}
1394
1395/* Print out the free pages at each order for each migatetype */
1396static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
1397{
1398	int order;
1399	pg_data_t *pgdat = (pg_data_t *)arg;
1400
1401	/* Print header */
1402	seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
1403	for (order = 0; order < MAX_ORDER; ++order)
1404		seq_printf(m, "%6d ", order);
1405	seq_putc(m, '\n');
1406
1407	walk_zones_in_node(m, pgdat, true, false, pagetypeinfo_showfree_print);
1408
1409	return 0;
1410}
1411
1412static void pagetypeinfo_showblockcount_print(struct seq_file *m,
1413					pg_data_t *pgdat, struct zone *zone)
1414{
1415	int mtype;
1416	unsigned long pfn;
1417	unsigned long start_pfn = zone->zone_start_pfn;
1418	unsigned long end_pfn = zone_end_pfn(zone);
1419	unsigned long count[MIGRATE_TYPES] = { 0, };
1420
1421	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
1422		struct page *page;
1423
1424		page = pfn_to_online_page(pfn);
1425		if (!page)
1426			continue;
1427
 
 
1428		/* Watch for unexpected holes punched in the memmap */
1429		if (!memmap_valid_within(pfn, page, zone))
1430			continue;
1431
1432		if (page_zone(page) != zone)
1433			continue;
1434
1435		mtype = get_pageblock_migratetype(page);
1436
1437		if (mtype < MIGRATE_TYPES)
1438			count[mtype]++;
1439	}
1440
1441	/* Print counts */
1442	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1443	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1444		seq_printf(m, "%12lu ", count[mtype]);
1445	seq_putc(m, '\n');
1446}
1447
1448/* Print out the number of pageblocks for each migratetype */
1449static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1450{
1451	int mtype;
1452	pg_data_t *pgdat = (pg_data_t *)arg;
1453
1454	seq_printf(m, "\n%-23s", "Number of blocks type ");
1455	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1456		seq_printf(m, "%12s ", migratetype_names[mtype]);
1457	seq_putc(m, '\n');
1458	walk_zones_in_node(m, pgdat, true, false,
1459		pagetypeinfo_showblockcount_print);
1460
1461	return 0;
1462}
1463
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1464/*
1465 * Print out the number of pageblocks for each migratetype that contain pages
1466 * of other types. This gives an indication of how well fallbacks are being
1467 * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1468 * to determine what is going on
1469 */
1470static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1471{
1472#ifdef CONFIG_PAGE_OWNER
1473	int mtype;
1474
1475	if (!static_branch_unlikely(&page_owner_inited))
1476		return;
1477
1478	drain_all_pages(NULL);
1479
1480	seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1481	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1482		seq_printf(m, "%12s ", migratetype_names[mtype]);
1483	seq_putc(m, '\n');
1484
1485	walk_zones_in_node(m, pgdat, true, true,
1486		pagetypeinfo_showmixedcount_print);
1487#endif /* CONFIG_PAGE_OWNER */
1488}
1489
1490/*
1491 * This prints out statistics in relation to grouping pages by mobility.
1492 * It is expensive to collect so do not constantly read the file.
1493 */
1494static int pagetypeinfo_show(struct seq_file *m, void *arg)
1495{
1496	pg_data_t *pgdat = (pg_data_t *)arg;
1497
1498	/* check memoryless node */
1499	if (!node_state(pgdat->node_id, N_MEMORY))
1500		return 0;
1501
1502	seq_printf(m, "Page block order: %d\n", pageblock_order);
1503	seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
1504	seq_putc(m, '\n');
1505	pagetypeinfo_showfree(m, pgdat);
1506	pagetypeinfo_showblockcount(m, pgdat);
1507	pagetypeinfo_showmixedcount(m, pgdat);
1508
1509	return 0;
1510}
1511
1512static const struct seq_operations fragmentation_op = {
1513	.start	= frag_start,
1514	.next	= frag_next,
1515	.stop	= frag_stop,
1516	.show	= frag_show,
1517};
1518
1519static int fragmentation_open(struct inode *inode, struct file *file)
1520{
1521	return seq_open(file, &fragmentation_op);
1522}
1523
1524static const struct file_operations buddyinfo_file_operations = {
1525	.open		= fragmentation_open,
1526	.read		= seq_read,
1527	.llseek		= seq_lseek,
1528	.release	= seq_release,
1529};
1530
1531static const struct seq_operations pagetypeinfo_op = {
1532	.start	= frag_start,
1533	.next	= frag_next,
1534	.stop	= frag_stop,
1535	.show	= pagetypeinfo_show,
1536};
1537
1538static int pagetypeinfo_open(struct inode *inode, struct file *file)
1539{
1540	return seq_open(file, &pagetypeinfo_op);
1541}
1542
1543static const struct file_operations pagetypeinfo_file_operations = {
1544	.open		= pagetypeinfo_open,
1545	.read		= seq_read,
1546	.llseek		= seq_lseek,
1547	.release	= seq_release,
1548};
1549
1550static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
1551{
1552	int zid;
1553
1554	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1555		struct zone *compare = &pgdat->node_zones[zid];
1556
1557		if (populated_zone(compare))
1558			return zone == compare;
1559	}
1560
1561	return false;
1562}
1563
1564static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1565							struct zone *zone)
1566{
1567	int i;
1568	seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1569	if (is_zone_first_populated(pgdat, zone)) {
1570		seq_printf(m, "\n  per-node stats");
1571		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1572			seq_printf(m, "\n      %-12s %lu",
1573				vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
1574				NR_VM_NUMA_STAT_ITEMS],
1575				node_page_state(pgdat, i));
1576		}
1577	}
1578	seq_printf(m,
1579		   "\n  pages free     %lu"
1580		   "\n        min      %lu"
1581		   "\n        low      %lu"
1582		   "\n        high     %lu"
 
1583		   "\n        spanned  %lu"
1584		   "\n        present  %lu"
1585		   "\n        managed  %lu",
1586		   zone_page_state(zone, NR_FREE_PAGES),
1587		   min_wmark_pages(zone),
1588		   low_wmark_pages(zone),
1589		   high_wmark_pages(zone),
 
1590		   zone->spanned_pages,
1591		   zone->present_pages,
1592		   zone->managed_pages);
1593
 
 
 
 
1594	seq_printf(m,
1595		   "\n        protection: (%ld",
1596		   zone->lowmem_reserve[0]);
1597	for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1598		seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1599	seq_putc(m, ')');
1600
1601	/* If unpopulated, no other information is useful */
1602	if (!populated_zone(zone)) {
1603		seq_putc(m, '\n');
1604		return;
1605	}
1606
1607	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1608		seq_printf(m, "\n      %-12s %lu", vmstat_text[i],
1609				zone_page_state(zone, i));
1610
1611#ifdef CONFIG_NUMA
1612	for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
1613		seq_printf(m, "\n      %-12s %lu",
1614				vmstat_text[i + NR_VM_ZONE_STAT_ITEMS],
1615				zone_numa_state_snapshot(zone, i));
1616#endif
1617
1618	seq_printf(m, "\n  pagesets");
1619	for_each_online_cpu(i) {
1620		struct per_cpu_pageset *pageset;
1621
1622		pageset = per_cpu_ptr(zone->pageset, i);
1623		seq_printf(m,
1624			   "\n    cpu: %i"
1625			   "\n              count: %i"
1626			   "\n              high:  %i"
1627			   "\n              batch: %i",
1628			   i,
1629			   pageset->pcp.count,
1630			   pageset->pcp.high,
1631			   pageset->pcp.batch);
1632#ifdef CONFIG_SMP
1633		seq_printf(m, "\n  vm stats threshold: %d",
1634				pageset->stat_threshold);
1635#endif
1636	}
1637	seq_printf(m,
1638		   "\n  node_unreclaimable:  %u"
1639		   "\n  start_pfn:           %lu",
1640		   pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES,
1641		   zone->zone_start_pfn);
 
 
1642	seq_putc(m, '\n');
1643}
1644
1645/*
1646 * Output information about zones in @pgdat.  All zones are printed regardless
1647 * of whether they are populated or not: lowmem_reserve_ratio operates on the
1648 * set of all zones and userspace would not be aware of such zones if they are
1649 * suppressed here (zoneinfo displays the effect of lowmem_reserve_ratio).
1650 */
1651static int zoneinfo_show(struct seq_file *m, void *arg)
1652{
1653	pg_data_t *pgdat = (pg_data_t *)arg;
1654	walk_zones_in_node(m, pgdat, false, false, zoneinfo_show_print);
1655	return 0;
1656}
1657
1658static const struct seq_operations zoneinfo_op = {
1659	.start	= frag_start, /* iterate over all zones. The same as in
1660			       * fragmentation. */
1661	.next	= frag_next,
1662	.stop	= frag_stop,
1663	.show	= zoneinfo_show,
1664};
1665
1666static int zoneinfo_open(struct inode *inode, struct file *file)
1667{
1668	return seq_open(file, &zoneinfo_op);
1669}
1670
1671static const struct file_operations zoneinfo_file_operations = {
1672	.open		= zoneinfo_open,
1673	.read		= seq_read,
1674	.llseek		= seq_lseek,
1675	.release	= seq_release,
1676};
1677
1678enum writeback_stat_item {
1679	NR_DIRTY_THRESHOLD,
1680	NR_DIRTY_BG_THRESHOLD,
1681	NR_VM_WRITEBACK_STAT_ITEMS,
1682};
1683
1684static void *vmstat_start(struct seq_file *m, loff_t *pos)
1685{
1686	unsigned long *v;
1687	int i, stat_items_size;
1688
1689	if (*pos >= ARRAY_SIZE(vmstat_text))
1690		return NULL;
1691	stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
1692			  NR_VM_NUMA_STAT_ITEMS * sizeof(unsigned long) +
1693			  NR_VM_NODE_STAT_ITEMS * sizeof(unsigned long) +
1694			  NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
1695
1696#ifdef CONFIG_VM_EVENT_COUNTERS
1697	stat_items_size += sizeof(struct vm_event_state);
1698#endif
1699
1700	v = kmalloc(stat_items_size, GFP_KERNEL);
1701	m->private = v;
1702	if (!v)
1703		return ERR_PTR(-ENOMEM);
1704	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1705		v[i] = global_zone_page_state(i);
1706	v += NR_VM_ZONE_STAT_ITEMS;
1707
1708#ifdef CONFIG_NUMA
1709	for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
1710		v[i] = global_numa_state(i);
1711	v += NR_VM_NUMA_STAT_ITEMS;
1712#endif
1713
1714	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
1715		v[i] = global_node_page_state(i);
1716	v += NR_VM_NODE_STAT_ITEMS;
1717
1718	global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1719			    v + NR_DIRTY_THRESHOLD);
1720	v += NR_VM_WRITEBACK_STAT_ITEMS;
1721
1722#ifdef CONFIG_VM_EVENT_COUNTERS
1723	all_vm_events(v);
1724	v[PGPGIN] /= 2;		/* sectors -> kbytes */
1725	v[PGPGOUT] /= 2;
1726#endif
1727	return (unsigned long *)m->private + *pos;
1728}
1729
1730static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1731{
1732	(*pos)++;
1733	if (*pos >= ARRAY_SIZE(vmstat_text))
1734		return NULL;
1735	return (unsigned long *)m->private + *pos;
1736}
1737
1738static int vmstat_show(struct seq_file *m, void *arg)
1739{
1740	unsigned long *l = arg;
1741	unsigned long off = l - (unsigned long *)m->private;
1742
1743	/* Skip hidden vmstat items. */
1744	if (*vmstat_text[off] == '\0')
1745		return 0;
1746
1747	seq_puts(m, vmstat_text[off]);
1748	seq_put_decimal_ull(m, " ", *l);
1749	seq_putc(m, '\n');
1750	return 0;
1751}
1752
1753static void vmstat_stop(struct seq_file *m, void *arg)
1754{
1755	kfree(m->private);
1756	m->private = NULL;
1757}
1758
1759static const struct seq_operations vmstat_op = {
1760	.start	= vmstat_start,
1761	.next	= vmstat_next,
1762	.stop	= vmstat_stop,
1763	.show	= vmstat_show,
1764};
1765
1766static int vmstat_open(struct inode *inode, struct file *file)
1767{
1768	return seq_open(file, &vmstat_op);
1769}
1770
1771static const struct file_operations vmstat_file_operations = {
1772	.open		= vmstat_open,
1773	.read		= seq_read,
1774	.llseek		= seq_lseek,
1775	.release	= seq_release,
1776};
1777#endif /* CONFIG_PROC_FS */
1778
1779#ifdef CONFIG_SMP
 
1780static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1781int sysctl_stat_interval __read_mostly = HZ;
1782
1783#ifdef CONFIG_PROC_FS
1784static void refresh_vm_stats(struct work_struct *work)
1785{
1786	refresh_cpu_vm_stats(true);
1787}
1788
1789int vmstat_refresh(struct ctl_table *table, int write,
1790		   void __user *buffer, size_t *lenp, loff_t *ppos)
1791{
1792	long val;
1793	int err;
1794	int i;
1795
1796	/*
1797	 * The regular update, every sysctl_stat_interval, may come later
1798	 * than expected: leaving a significant amount in per_cpu buckets.
1799	 * This is particularly misleading when checking a quantity of HUGE
1800	 * pages, immediately after running a test.  /proc/sys/vm/stat_refresh,
1801	 * which can equally be echo'ed to or cat'ted from (by root),
1802	 * can be used to update the stats just before reading them.
1803	 *
1804	 * Oh, and since global_zone_page_state() etc. are so careful to hide
1805	 * transiently negative values, report an error here if any of
1806	 * the stats is negative, so we know to go looking for imbalance.
1807	 */
1808	err = schedule_on_each_cpu(refresh_vm_stats);
1809	if (err)
1810		return err;
1811	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
1812		val = atomic_long_read(&vm_zone_stat[i]);
1813		if (val < 0) {
1814			pr_warn("%s: %s %ld\n",
1815				__func__, vmstat_text[i], val);
1816			err = -EINVAL;
1817		}
1818	}
1819#ifdef CONFIG_NUMA
1820	for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) {
1821		val = atomic_long_read(&vm_numa_stat[i]);
1822		if (val < 0) {
1823			pr_warn("%s: %s %ld\n",
1824				__func__, vmstat_text[i + NR_VM_ZONE_STAT_ITEMS], val);
1825			err = -EINVAL;
1826		}
1827	}
1828#endif
1829	if (err)
1830		return err;
1831	if (write)
1832		*ppos += *lenp;
1833	else
1834		*lenp = 0;
1835	return 0;
1836}
1837#endif /* CONFIG_PROC_FS */
1838
1839static void vmstat_update(struct work_struct *w)
1840{
1841	if (refresh_cpu_vm_stats(true)) {
1842		/*
1843		 * Counters were updated so we expect more updates
1844		 * to occur in the future. Keep on running the
1845		 * update worker thread.
 
 
1846		 */
1847		preempt_disable();
1848		queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
 
1849				this_cpu_ptr(&vmstat_work),
1850				round_jiffies_relative(sysctl_stat_interval));
1851		preempt_enable();
 
 
 
 
 
 
 
 
 
1852	}
1853}
1854
1855/*
1856 * Switch off vmstat processing and then fold all the remaining differentials
1857 * until the diffs stay at zero. The function is used by NOHZ and can only be
1858 * invoked when tick processing is not active.
1859 */
1860/*
1861 * Check if the diffs for a certain cpu indicate that
1862 * an update is needed.
1863 */
1864static bool need_update(int cpu)
1865{
1866	struct zone *zone;
1867
1868	for_each_populated_zone(zone) {
1869		struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
1870
1871		BUILD_BUG_ON(sizeof(p->vm_stat_diff[0]) != 1);
1872#ifdef CONFIG_NUMA
1873		BUILD_BUG_ON(sizeof(p->vm_numa_stat_diff[0]) != 2);
1874#endif
1875
1876		/*
1877		 * The fast way of checking if there are any vmstat diffs.
1878		 * This works because the diffs are byte sized items.
1879		 */
1880		if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS))
1881			return true;
1882#ifdef CONFIG_NUMA
1883		if (memchr_inv(p->vm_numa_stat_diff, 0, NR_VM_NUMA_STAT_ITEMS))
1884			return true;
1885#endif
1886	}
1887	return false;
1888}
1889
1890/*
1891 * Switch off vmstat processing and then fold all the remaining differentials
1892 * until the diffs stay at zero. The function is used by NOHZ and can only be
1893 * invoked when tick processing is not active.
1894 */
1895void quiet_vmstat(void)
1896{
1897	if (system_state != SYSTEM_RUNNING)
1898		return;
1899
1900	if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
 
 
 
 
1901		return;
1902
1903	if (!need_update(smp_processor_id()))
1904		return;
1905
1906	/*
1907	 * Just refresh counters and do not care about the pending delayed
1908	 * vmstat_update. It doesn't fire that often to matter and canceling
1909	 * it would be too expensive from this path.
1910	 * vmstat_shepherd will take care about that for us.
1911	 */
1912	refresh_cpu_vm_stats(false);
1913}
1914
 
1915/*
1916 * Shepherd worker thread that checks the
1917 * differentials of processors that have their worker
1918 * threads for vm statistics updates disabled because of
1919 * inactivity.
1920 */
1921static void vmstat_shepherd(struct work_struct *w);
1922
1923static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
1924
1925static void vmstat_shepherd(struct work_struct *w)
1926{
1927	int cpu;
1928
1929	get_online_cpus();
1930	/* Check processors whose vmstat worker threads have been disabled */
1931	for_each_online_cpu(cpu) {
1932		struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
1933
1934		if (!delayed_work_pending(dw) && need_update(cpu))
1935			queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
 
 
 
 
 
 
 
 
 
1936	}
1937	put_online_cpus();
1938
1939	schedule_delayed_work(&shepherd,
1940		round_jiffies_relative(sysctl_stat_interval));
1941}
1942
1943static void __init start_shepherd_timer(void)
1944{
1945	int cpu;
1946
1947	for_each_possible_cpu(cpu)
1948		INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
1949			vmstat_update);
1950
 
 
 
 
 
1951	schedule_delayed_work(&shepherd,
1952		round_jiffies_relative(sysctl_stat_interval));
1953}
1954
1955static void __init init_cpu_node_state(void)
1956{
1957	int node;
1958
1959	for_each_online_node(node) {
1960		if (cpumask_weight(cpumask_of_node(node)) > 0)
1961			node_set_state(node, N_CPU);
1962	}
1963}
1964
1965static int vmstat_cpu_online(unsigned int cpu)
1966{
1967	refresh_zone_stat_thresholds();
1968	node_set_state(cpu_to_node(cpu), N_CPU);
1969	return 0;
1970}
1971
1972static int vmstat_cpu_down_prep(unsigned int cpu)
1973{
1974	cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
1975	return 0;
1976}
1977
1978static int vmstat_cpu_dead(unsigned int cpu)
1979{
1980	const struct cpumask *node_cpus;
1981	int node;
1982
1983	node = cpu_to_node(cpu);
1984
1985	refresh_zone_stat_thresholds();
1986	node_cpus = cpumask_of_node(node);
1987	if (cpumask_weight(node_cpus) > 0)
1988		return 0;
1989
1990	node_clear_state(node, N_CPU);
1991	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1992}
1993
 
 
1994#endif
1995
1996struct workqueue_struct *mm_percpu_wq;
1997
1998void __init init_mm_internals(void)
1999{
2000	int ret __maybe_unused;
2001
2002	mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0);
2003
2004#ifdef CONFIG_SMP
2005	ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
2006					NULL, vmstat_cpu_dead);
2007	if (ret < 0)
2008		pr_err("vmstat: failed to register 'dead' hotplug state\n");
2009
2010	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online",
2011					vmstat_cpu_online,
2012					vmstat_cpu_down_prep);
2013	if (ret < 0)
2014		pr_err("vmstat: failed to register 'online' hotplug state\n");
2015
2016	get_online_cpus();
2017	init_cpu_node_state();
2018	put_online_cpus();
2019
2020	start_shepherd_timer();
 
2021#endif
2022#ifdef CONFIG_PROC_FS
2023	proc_create("buddyinfo", 0444, NULL, &buddyinfo_file_operations);
2024	proc_create("pagetypeinfo", 0444, NULL, &pagetypeinfo_file_operations);
2025	proc_create("vmstat", 0444, NULL, &vmstat_file_operations);
2026	proc_create("zoneinfo", 0444, NULL, &zoneinfo_file_operations);
2027#endif
 
2028}
 
2029
2030#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
2031
2032/*
2033 * Return an index indicating how much of the available free memory is
2034 * unusable for an allocation of the requested size.
2035 */
2036static int unusable_free_index(unsigned int order,
2037				struct contig_page_info *info)
2038{
2039	/* No free memory is interpreted as all free memory is unusable */
2040	if (info->free_pages == 0)
2041		return 1000;
2042
2043	/*
2044	 * Index should be a value between 0 and 1. Return a value to 3
2045	 * decimal places.
2046	 *
2047	 * 0 => no fragmentation
2048	 * 1 => high fragmentation
2049	 */
2050	return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
2051
2052}
2053
2054static void unusable_show_print(struct seq_file *m,
2055					pg_data_t *pgdat, struct zone *zone)
2056{
2057	unsigned int order;
2058	int index;
2059	struct contig_page_info info;
2060
2061	seq_printf(m, "Node %d, zone %8s ",
2062				pgdat->node_id,
2063				zone->name);
2064	for (order = 0; order < MAX_ORDER; ++order) {
2065		fill_contig_page_info(zone, order, &info);
2066		index = unusable_free_index(order, &info);
2067		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2068	}
2069
2070	seq_putc(m, '\n');
2071}
2072
2073/*
2074 * Display unusable free space index
2075 *
2076 * The unusable free space index measures how much of the available free
2077 * memory cannot be used to satisfy an allocation of a given size and is a
2078 * value between 0 and 1. The higher the value, the more of free memory is
2079 * unusable and by implication, the worse the external fragmentation is. This
2080 * can be expressed as a percentage by multiplying by 100.
2081 */
2082static int unusable_show(struct seq_file *m, void *arg)
2083{
2084	pg_data_t *pgdat = (pg_data_t *)arg;
2085
2086	/* check memoryless node */
2087	if (!node_state(pgdat->node_id, N_MEMORY))
2088		return 0;
2089
2090	walk_zones_in_node(m, pgdat, true, false, unusable_show_print);
2091
2092	return 0;
2093}
2094
2095static const struct seq_operations unusable_op = {
2096	.start	= frag_start,
2097	.next	= frag_next,
2098	.stop	= frag_stop,
2099	.show	= unusable_show,
2100};
2101
2102static int unusable_open(struct inode *inode, struct file *file)
2103{
2104	return seq_open(file, &unusable_op);
2105}
2106
2107static const struct file_operations unusable_file_ops = {
2108	.open		= unusable_open,
2109	.read		= seq_read,
2110	.llseek		= seq_lseek,
2111	.release	= seq_release,
2112};
2113
2114static void extfrag_show_print(struct seq_file *m,
2115					pg_data_t *pgdat, struct zone *zone)
2116{
2117	unsigned int order;
2118	int index;
2119
2120	/* Alloc on stack as interrupts are disabled for zone walk */
2121	struct contig_page_info info;
2122
2123	seq_printf(m, "Node %d, zone %8s ",
2124				pgdat->node_id,
2125				zone->name);
2126	for (order = 0; order < MAX_ORDER; ++order) {
2127		fill_contig_page_info(zone, order, &info);
2128		index = __fragmentation_index(order, &info);
2129		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2130	}
2131
2132	seq_putc(m, '\n');
2133}
2134
2135/*
2136 * Display fragmentation index for orders that allocations would fail for
2137 */
2138static int extfrag_show(struct seq_file *m, void *arg)
2139{
2140	pg_data_t *pgdat = (pg_data_t *)arg;
2141
2142	walk_zones_in_node(m, pgdat, true, false, extfrag_show_print);
2143
2144	return 0;
2145}
2146
2147static const struct seq_operations extfrag_op = {
2148	.start	= frag_start,
2149	.next	= frag_next,
2150	.stop	= frag_stop,
2151	.show	= extfrag_show,
2152};
2153
2154static int extfrag_open(struct inode *inode, struct file *file)
2155{
2156	return seq_open(file, &extfrag_op);
2157}
2158
2159static const struct file_operations extfrag_file_ops = {
2160	.open		= extfrag_open,
2161	.read		= seq_read,
2162	.llseek		= seq_lseek,
2163	.release	= seq_release,
2164};
2165
2166static int __init extfrag_debug_init(void)
2167{
2168	struct dentry *extfrag_debug_root;
2169
2170	extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
2171	if (!extfrag_debug_root)
2172		return -ENOMEM;
2173
2174	if (!debugfs_create_file("unusable_index", 0444,
2175			extfrag_debug_root, NULL, &unusable_file_ops))
2176		goto fail;
2177
2178	if (!debugfs_create_file("extfrag_index", 0444,
2179			extfrag_debug_root, NULL, &extfrag_file_ops))
2180		goto fail;
2181
2182	return 0;
2183fail:
2184	debugfs_remove_recursive(extfrag_debug_root);
2185	return -ENOMEM;
2186}
2187
2188module_init(extfrag_debug_init);
2189#endif