Linux Audio

Check our new training course

Linux BSP upgrade and security maintenance

Need help to get security updates for your Linux BSP?
Loading...
v3.1
   1/*
   2 *  linux/mm/vmstat.c
   3 *
   4 *  Manages VM statistics
   5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   6 *
   7 *  zoned VM statistics
   8 *  Copyright (C) 2006 Silicon Graphics, Inc.,
   9 *		Christoph Lameter <christoph@lameter.com>
  10 */
  11#include <linux/fs.h>
  12#include <linux/mm.h>
  13#include <linux/err.h>
  14#include <linux/module.h>
  15#include <linux/slab.h>
  16#include <linux/cpu.h>
  17#include <linux/vmstat.h>
  18#include <linux/sched.h>
  19#include <linux/math64.h>
  20#include <linux/writeback.h>
  21#include <linux/compaction.h>
 
 
 
  22
  23#ifdef CONFIG_VM_EVENT_COUNTERS
  24DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
  25EXPORT_PER_CPU_SYMBOL(vm_event_states);
  26
  27static void sum_vm_events(unsigned long *ret)
  28{
  29	int cpu;
  30	int i;
  31
  32	memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
  33
  34	for_each_online_cpu(cpu) {
  35		struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
  36
  37		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
  38			ret[i] += this->event[i];
  39	}
  40}
  41
  42/*
  43 * Accumulate the vm event counters across all CPUs.
  44 * The result is unavoidably approximate - it can change
  45 * during and after execution of this function.
  46*/
  47void all_vm_events(unsigned long *ret)
  48{
  49	get_online_cpus();
  50	sum_vm_events(ret);
  51	put_online_cpus();
  52}
  53EXPORT_SYMBOL_GPL(all_vm_events);
  54
  55#ifdef CONFIG_HOTPLUG
  56/*
  57 * Fold the foreign cpu events into our own.
  58 *
  59 * This is adding to the events on one processor
  60 * but keeps the global counts constant.
  61 */
  62void vm_events_fold_cpu(int cpu)
  63{
  64	struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
  65	int i;
  66
  67	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
  68		count_vm_events(i, fold_state->event[i]);
  69		fold_state->event[i] = 0;
  70	}
  71}
  72#endif /* CONFIG_HOTPLUG */
  73
  74#endif /* CONFIG_VM_EVENT_COUNTERS */
  75
  76/*
  77 * Manage combined zone based / global counters
  78 *
  79 * vm_stat contains the global counters
  80 */
  81atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
  82EXPORT_SYMBOL(vm_stat);
  83
  84#ifdef CONFIG_SMP
  85
  86int calculate_pressure_threshold(struct zone *zone)
  87{
  88	int threshold;
  89	int watermark_distance;
  90
  91	/*
  92	 * As vmstats are not up to date, there is drift between the estimated
  93	 * and real values. For high thresholds and a high number of CPUs, it
  94	 * is possible for the min watermark to be breached while the estimated
  95	 * value looks fine. The pressure threshold is a reduced value such
  96	 * that even the maximum amount of drift will not accidentally breach
  97	 * the min watermark
  98	 */
  99	watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
 100	threshold = max(1, (int)(watermark_distance / num_online_cpus()));
 101
 102	/*
 103	 * Maximum threshold is 125
 104	 */
 105	threshold = min(125, threshold);
 106
 107	return threshold;
 108}
 109
 110int calculate_normal_threshold(struct zone *zone)
 111{
 112	int threshold;
 113	int mem;	/* memory in 128 MB units */
 114
 115	/*
 116	 * The threshold scales with the number of processors and the amount
 117	 * of memory per zone. More memory means that we can defer updates for
 118	 * longer, more processors could lead to more contention.
 119 	 * fls() is used to have a cheap way of logarithmic scaling.
 120	 *
 121	 * Some sample thresholds:
 122	 *
 123	 * Threshold	Processors	(fls)	Zonesize	fls(mem+1)
 124	 * ------------------------------------------------------------------
 125	 * 8		1		1	0.9-1 GB	4
 126	 * 16		2		2	0.9-1 GB	4
 127	 * 20 		2		2	1-2 GB		5
 128	 * 24		2		2	2-4 GB		6
 129	 * 28		2		2	4-8 GB		7
 130	 * 32		2		2	8-16 GB		8
 131	 * 4		2		2	<128M		1
 132	 * 30		4		3	2-4 GB		5
 133	 * 48		4		3	8-16 GB		8
 134	 * 32		8		4	1-2 GB		4
 135	 * 32		8		4	0.9-1GB		4
 136	 * 10		16		5	<128M		1
 137	 * 40		16		5	900M		4
 138	 * 70		64		7	2-4 GB		5
 139	 * 84		64		7	4-8 GB		6
 140	 * 108		512		9	4-8 GB		6
 141	 * 125		1024		10	8-16 GB		8
 142	 * 125		1024		10	16-32 GB	9
 143	 */
 144
 145	mem = zone->present_pages >> (27 - PAGE_SHIFT);
 146
 147	threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
 148
 149	/*
 150	 * Maximum threshold is 125
 151	 */
 152	threshold = min(125, threshold);
 153
 154	return threshold;
 155}
 156
 157/*
 158 * Refresh the thresholds for each zone.
 159 */
 160void refresh_zone_stat_thresholds(void)
 161{
 162	struct zone *zone;
 163	int cpu;
 164	int threshold;
 165
 166	for_each_populated_zone(zone) {
 167		unsigned long max_drift, tolerate_drift;
 168
 169		threshold = calculate_normal_threshold(zone);
 170
 171		for_each_online_cpu(cpu)
 172			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
 173							= threshold;
 174
 175		/*
 176		 * Only set percpu_drift_mark if there is a danger that
 177		 * NR_FREE_PAGES reports the low watermark is ok when in fact
 178		 * the min watermark could be breached by an allocation
 179		 */
 180		tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
 181		max_drift = num_online_cpus() * threshold;
 182		if (max_drift > tolerate_drift)
 183			zone->percpu_drift_mark = high_wmark_pages(zone) +
 184					max_drift;
 185	}
 186}
 187
 188void set_pgdat_percpu_threshold(pg_data_t *pgdat,
 189				int (*calculate_pressure)(struct zone *))
 190{
 191	struct zone *zone;
 192	int cpu;
 193	int threshold;
 194	int i;
 195
 196	for (i = 0; i < pgdat->nr_zones; i++) {
 197		zone = &pgdat->node_zones[i];
 198		if (!zone->percpu_drift_mark)
 199			continue;
 200
 201		threshold = (*calculate_pressure)(zone);
 202		for_each_possible_cpu(cpu)
 203			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
 204							= threshold;
 205	}
 206}
 207
 208/*
 209 * For use when we know that interrupts are disabled.
 210 */
 211void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 212				int delta)
 213{
 214	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 215	s8 __percpu *p = pcp->vm_stat_diff + item;
 216	long x;
 217	long t;
 218
 219	x = delta + __this_cpu_read(*p);
 220
 221	t = __this_cpu_read(pcp->stat_threshold);
 222
 223	if (unlikely(x > t || x < -t)) {
 224		zone_page_state_add(x, zone, item);
 225		x = 0;
 226	}
 227	__this_cpu_write(*p, x);
 228}
 229EXPORT_SYMBOL(__mod_zone_page_state);
 230
 231/*
 232 * Optimized increment and decrement functions.
 233 *
 234 * These are only for a single page and therefore can take a struct page *
 235 * argument instead of struct zone *. This allows the inclusion of the code
 236 * generated for page_zone(page) into the optimized functions.
 237 *
 238 * No overflow check is necessary and therefore the differential can be
 239 * incremented or decremented in place which may allow the compilers to
 240 * generate better code.
 241 * The increment or decrement is known and therefore one boundary check can
 242 * be omitted.
 243 *
 244 * NOTE: These functions are very performance sensitive. Change only
 245 * with care.
 246 *
 247 * Some processors have inc/dec instructions that are atomic vs an interrupt.
 248 * However, the code must first determine the differential location in a zone
 249 * based on the processor number and then inc/dec the counter. There is no
 250 * guarantee without disabling preemption that the processor will not change
 251 * in between and therefore the atomicity vs. interrupt cannot be exploited
 252 * in a useful way here.
 253 */
 254void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
 255{
 256	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 257	s8 __percpu *p = pcp->vm_stat_diff + item;
 258	s8 v, t;
 259
 260	v = __this_cpu_inc_return(*p);
 261	t = __this_cpu_read(pcp->stat_threshold);
 262	if (unlikely(v > t)) {
 263		s8 overstep = t >> 1;
 264
 265		zone_page_state_add(v + overstep, zone, item);
 266		__this_cpu_write(*p, -overstep);
 267	}
 268}
 269
 270void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
 271{
 272	__inc_zone_state(page_zone(page), item);
 273}
 274EXPORT_SYMBOL(__inc_zone_page_state);
 275
 276void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
 277{
 278	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 279	s8 __percpu *p = pcp->vm_stat_diff + item;
 280	s8 v, t;
 281
 282	v = __this_cpu_dec_return(*p);
 283	t = __this_cpu_read(pcp->stat_threshold);
 284	if (unlikely(v < - t)) {
 285		s8 overstep = t >> 1;
 286
 287		zone_page_state_add(v - overstep, zone, item);
 288		__this_cpu_write(*p, overstep);
 289	}
 290}
 291
 292void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
 293{
 294	__dec_zone_state(page_zone(page), item);
 295}
 296EXPORT_SYMBOL(__dec_zone_page_state);
 297
 298#ifdef CONFIG_CMPXCHG_LOCAL
 299/*
 300 * If we have cmpxchg_local support then we do not need to incur the overhead
 301 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
 302 *
 303 * mod_state() modifies the zone counter state through atomic per cpu
 304 * operations.
 305 *
 306 * Overstep mode specifies how overstep should handled:
 307 *     0       No overstepping
 308 *     1       Overstepping half of threshold
 309 *     -1      Overstepping minus half of threshold
 310*/
 311static inline void mod_state(struct zone *zone,
 312       enum zone_stat_item item, int delta, int overstep_mode)
 313{
 314	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 315	s8 __percpu *p = pcp->vm_stat_diff + item;
 316	long o, n, t, z;
 317
 318	do {
 319		z = 0;  /* overflow to zone counters */
 320
 321		/*
 322		 * The fetching of the stat_threshold is racy. We may apply
 323		 * a counter threshold to the wrong the cpu if we get
 324		 * rescheduled while executing here. However, the next
 325		 * counter update will apply the threshold again and
 326		 * therefore bring the counter under the threshold again.
 327		 *
 328		 * Most of the time the thresholds are the same anyways
 329		 * for all cpus in a zone.
 330		 */
 331		t = this_cpu_read(pcp->stat_threshold);
 332
 333		o = this_cpu_read(*p);
 334		n = delta + o;
 335
 336		if (n > t || n < -t) {
 337			int os = overstep_mode * (t >> 1) ;
 338
 339			/* Overflow must be added to zone counters */
 340			z = n + os;
 341			n = -os;
 342		}
 343	} while (this_cpu_cmpxchg(*p, o, n) != o);
 344
 345	if (z)
 346		zone_page_state_add(z, zone, item);
 347}
 348
 349void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 350					int delta)
 351{
 352	mod_state(zone, item, delta, 0);
 353}
 354EXPORT_SYMBOL(mod_zone_page_state);
 355
 356void inc_zone_state(struct zone *zone, enum zone_stat_item item)
 357{
 358	mod_state(zone, item, 1, 1);
 359}
 360
 361void inc_zone_page_state(struct page *page, enum zone_stat_item item)
 362{
 363	mod_state(page_zone(page), item, 1, 1);
 364}
 365EXPORT_SYMBOL(inc_zone_page_state);
 366
 367void dec_zone_page_state(struct page *page, enum zone_stat_item item)
 368{
 369	mod_state(page_zone(page), item, -1, -1);
 370}
 371EXPORT_SYMBOL(dec_zone_page_state);
 372#else
 373/*
 374 * Use interrupt disable to serialize counter updates
 375 */
 376void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 377					int delta)
 378{
 379	unsigned long flags;
 380
 381	local_irq_save(flags);
 382	__mod_zone_page_state(zone, item, delta);
 383	local_irq_restore(flags);
 384}
 385EXPORT_SYMBOL(mod_zone_page_state);
 386
 387void inc_zone_state(struct zone *zone, enum zone_stat_item item)
 388{
 389	unsigned long flags;
 390
 391	local_irq_save(flags);
 392	__inc_zone_state(zone, item);
 393	local_irq_restore(flags);
 394}
 395
 396void inc_zone_page_state(struct page *page, enum zone_stat_item item)
 397{
 398	unsigned long flags;
 399	struct zone *zone;
 400
 401	zone = page_zone(page);
 402	local_irq_save(flags);
 403	__inc_zone_state(zone, item);
 404	local_irq_restore(flags);
 405}
 406EXPORT_SYMBOL(inc_zone_page_state);
 407
 408void dec_zone_page_state(struct page *page, enum zone_stat_item item)
 409{
 410	unsigned long flags;
 411
 412	local_irq_save(flags);
 413	__dec_zone_page_state(page, item);
 414	local_irq_restore(flags);
 415}
 416EXPORT_SYMBOL(dec_zone_page_state);
 417#endif
 418
 
 
 
 
 
 
 
 
 
 419/*
 420 * Update the zone counters for one cpu.
 421 *
 422 * The cpu specified must be either the current cpu or a processor that
 423 * is not online. If it is the current cpu then the execution thread must
 424 * be pinned to the current cpu.
 425 *
 426 * Note that refresh_cpu_vm_stats strives to only access
 427 * node local memory. The per cpu pagesets on remote zones are placed
 428 * in the memory local to the processor using that pageset. So the
 429 * loop over all zones will access a series of cachelines local to
 430 * the processor.
 431 *
 432 * The call to zone_page_state_add updates the cachelines with the
 433 * statistics in the remote zone struct as well as the global cachelines
 434 * with the global counters. These could cause remote node cache line
 435 * bouncing and will have to be only done when necessary.
 436 */
 437void refresh_cpu_vm_stats(int cpu)
 438{
 439	struct zone *zone;
 440	int i;
 441	int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
 442
 443	for_each_populated_zone(zone) {
 444		struct per_cpu_pageset *p;
 445
 446		p = per_cpu_ptr(zone->pageset, cpu);
 
 447
 448		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 449			if (p->vm_stat_diff[i]) {
 450				unsigned long flags;
 451				int v;
 452
 453				local_irq_save(flags);
 454				v = p->vm_stat_diff[i];
 455				p->vm_stat_diff[i] = 0;
 456				local_irq_restore(flags);
 457				atomic_long_add(v, &zone->vm_stat[i]);
 458				global_diff[i] += v;
 459#ifdef CONFIG_NUMA
 460				/* 3 seconds idle till flush */
 461				p->expire = 3;
 462#endif
 463			}
 
 464		cond_resched();
 465#ifdef CONFIG_NUMA
 466		/*
 467		 * Deal with draining the remote pageset of this
 468		 * processor
 469		 *
 470		 * Check if there are pages remaining in this pageset
 471		 * if not then there is nothing to expire.
 472		 */
 473		if (!p->expire || !p->pcp.count)
 
 474			continue;
 475
 476		/*
 477		 * We never drain zones local to this processor.
 478		 */
 479		if (zone_to_nid(zone) == numa_node_id()) {
 480			p->expire = 0;
 481			continue;
 482		}
 483
 484		p->expire--;
 485		if (p->expire)
 486			continue;
 487
 488		if (p->pcp.count)
 489			drain_zone_pages(zone, &p->pcp);
 490#endif
 491	}
 
 
 492
 493	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 494		if (global_diff[i])
 495			atomic_long_add(global_diff[i], &vm_stat[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 496}
 497
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 498#endif
 499
 500#ifdef CONFIG_NUMA
 501/*
 502 * zonelist = the list of zones passed to the allocator
 503 * z 	    = the zone from which the allocation occurred.
 504 *
 505 * Must be called with interrupts disabled.
 506 *
 507 * When __GFP_OTHER_NODE is set assume the node of the preferred
 508 * zone is the local node. This is useful for daemons who allocate
 509 * memory on behalf of other processes.
 510 */
 511void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags)
 512{
 513	if (z->zone_pgdat == preferred_zone->zone_pgdat) {
 514		__inc_zone_state(z, NUMA_HIT);
 515	} else {
 516		__inc_zone_state(z, NUMA_MISS);
 517		__inc_zone_state(preferred_zone, NUMA_FOREIGN);
 518	}
 519	if (z->node == ((flags & __GFP_OTHER_NODE) ?
 520			preferred_zone->node : numa_node_id()))
 521		__inc_zone_state(z, NUMA_LOCAL);
 522	else
 523		__inc_zone_state(z, NUMA_OTHER);
 524}
 525#endif
 526
 527#ifdef CONFIG_COMPACTION
 528
 529struct contig_page_info {
 530	unsigned long free_pages;
 531	unsigned long free_blocks_total;
 532	unsigned long free_blocks_suitable;
 533};
 534
 535/*
 536 * Calculate the number of free pages in a zone, how many contiguous
 537 * pages are free and how many are large enough to satisfy an allocation of
 538 * the target size. Note that this function makes no attempt to estimate
 539 * how many suitable free blocks there *might* be if MOVABLE pages were
 540 * migrated. Calculating that is possible, but expensive and can be
 541 * figured out from userspace
 542 */
 543static void fill_contig_page_info(struct zone *zone,
 544				unsigned int suitable_order,
 545				struct contig_page_info *info)
 546{
 547	unsigned int order;
 548
 549	info->free_pages = 0;
 550	info->free_blocks_total = 0;
 551	info->free_blocks_suitable = 0;
 552
 553	for (order = 0; order < MAX_ORDER; order++) {
 554		unsigned long blocks;
 555
 556		/* Count number of free blocks */
 557		blocks = zone->free_area[order].nr_free;
 558		info->free_blocks_total += blocks;
 559
 560		/* Count free base pages */
 561		info->free_pages += blocks << order;
 562
 563		/* Count the suitable free blocks */
 564		if (order >= suitable_order)
 565			info->free_blocks_suitable += blocks <<
 566						(order - suitable_order);
 567	}
 568}
 569
 570/*
 571 * A fragmentation index only makes sense if an allocation of a requested
 572 * size would fail. If that is true, the fragmentation index indicates
 573 * whether external fragmentation or a lack of memory was the problem.
 574 * The value can be used to determine if page reclaim or compaction
 575 * should be used
 576 */
 577static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
 578{
 579	unsigned long requested = 1UL << order;
 580
 581	if (!info->free_blocks_total)
 582		return 0;
 583
 584	/* Fragmentation index only makes sense when a request would fail */
 585	if (info->free_blocks_suitable)
 586		return -1000;
 587
 588	/*
 589	 * Index is between 0 and 1 so return within 3 decimal places
 590	 *
 591	 * 0 => allocation would fail due to lack of memory
 592	 * 1 => allocation would fail due to fragmentation
 593	 */
 594	return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
 595}
 596
 597/* Same as __fragmentation index but allocs contig_page_info on stack */
 598int fragmentation_index(struct zone *zone, unsigned int order)
 599{
 600	struct contig_page_info info;
 601
 602	fill_contig_page_info(zone, order, &info);
 603	return __fragmentation_index(order, &info);
 604}
 605#endif
 606
 607#if defined(CONFIG_PROC_FS) || defined(CONFIG_COMPACTION)
 608#include <linux/proc_fs.h>
 609#include <linux/seq_file.h>
 610
 611static char * const migratetype_names[MIGRATE_TYPES] = {
 612	"Unmovable",
 613	"Reclaimable",
 614	"Movable",
 615	"Reserve",
 
 
 
 
 616	"Isolate",
 
 617};
 618
 619static void *frag_start(struct seq_file *m, loff_t *pos)
 620{
 621	pg_data_t *pgdat;
 622	loff_t node = *pos;
 623	for (pgdat = first_online_pgdat();
 624	     pgdat && node;
 625	     pgdat = next_online_pgdat(pgdat))
 626		--node;
 627
 628	return pgdat;
 629}
 630
 631static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
 632{
 633	pg_data_t *pgdat = (pg_data_t *)arg;
 634
 635	(*pos)++;
 636	return next_online_pgdat(pgdat);
 637}
 638
 639static void frag_stop(struct seq_file *m, void *arg)
 640{
 641}
 642
 643/* Walk all the zones in a node and print using a callback */
 644static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
 645		void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
 646{
 647	struct zone *zone;
 648	struct zone *node_zones = pgdat->node_zones;
 649	unsigned long flags;
 650
 651	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
 652		if (!populated_zone(zone))
 653			continue;
 654
 655		spin_lock_irqsave(&zone->lock, flags);
 656		print(m, pgdat, zone);
 657		spin_unlock_irqrestore(&zone->lock, flags);
 658	}
 659}
 660#endif
 661
 662#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
 663#ifdef CONFIG_ZONE_DMA
 664#define TEXT_FOR_DMA(xx) xx "_dma",
 665#else
 666#define TEXT_FOR_DMA(xx)
 667#endif
 668
 669#ifdef CONFIG_ZONE_DMA32
 670#define TEXT_FOR_DMA32(xx) xx "_dma32",
 671#else
 672#define TEXT_FOR_DMA32(xx)
 673#endif
 674
 675#ifdef CONFIG_HIGHMEM
 676#define TEXT_FOR_HIGHMEM(xx) xx "_high",
 677#else
 678#define TEXT_FOR_HIGHMEM(xx)
 679#endif
 680
 681#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
 682					TEXT_FOR_HIGHMEM(xx) xx "_movable",
 683
 684const char * const vmstat_text[] = {
 685	/* Zoned VM counters */
 686	"nr_free_pages",
 
 687	"nr_inactive_anon",
 688	"nr_active_anon",
 689	"nr_inactive_file",
 690	"nr_active_file",
 691	"nr_unevictable",
 692	"nr_mlock",
 693	"nr_anon_pages",
 694	"nr_mapped",
 695	"nr_file_pages",
 696	"nr_dirty",
 697	"nr_writeback",
 698	"nr_slab_reclaimable",
 699	"nr_slab_unreclaimable",
 700	"nr_page_table_pages",
 701	"nr_kernel_stack",
 702	"nr_unstable",
 703	"nr_bounce",
 704	"nr_vmscan_write",
 
 705	"nr_writeback_temp",
 706	"nr_isolated_anon",
 707	"nr_isolated_file",
 708	"nr_shmem",
 709	"nr_dirtied",
 710	"nr_written",
 711
 712#ifdef CONFIG_NUMA
 713	"numa_hit",
 714	"numa_miss",
 715	"numa_foreign",
 716	"numa_interleave",
 717	"numa_local",
 718	"numa_other",
 719#endif
 
 
 
 720	"nr_anon_transparent_hugepages",
 
 721	"nr_dirty_threshold",
 722	"nr_dirty_background_threshold",
 723
 724#ifdef CONFIG_VM_EVENT_COUNTERS
 725	"pgpgin",
 726	"pgpgout",
 727	"pswpin",
 728	"pswpout",
 729
 730	TEXTS_FOR_ZONES("pgalloc")
 731
 732	"pgfree",
 733	"pgactivate",
 734	"pgdeactivate",
 735
 736	"pgfault",
 737	"pgmajfault",
 738
 739	TEXTS_FOR_ZONES("pgrefill")
 740	TEXTS_FOR_ZONES("pgsteal")
 
 741	TEXTS_FOR_ZONES("pgscan_kswapd")
 742	TEXTS_FOR_ZONES("pgscan_direct")
 
 743
 744#ifdef CONFIG_NUMA
 745	"zone_reclaim_failed",
 746#endif
 747	"pginodesteal",
 748	"slabs_scanned",
 749	"kswapd_steal",
 750	"kswapd_inodesteal",
 751	"kswapd_low_wmark_hit_quickly",
 752	"kswapd_high_wmark_hit_quickly",
 753	"kswapd_skip_congestion_wait",
 754	"pageoutrun",
 755	"allocstall",
 756
 757	"pgrotated",
 758
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 759#ifdef CONFIG_COMPACTION
 760	"compact_blocks_moved",
 761	"compact_pages_moved",
 762	"compact_pagemigrate_failed",
 763	"compact_stall",
 764	"compact_fail",
 765	"compact_success",
 766#endif
 767
 768#ifdef CONFIG_HUGETLB_PAGE
 769	"htlb_buddy_alloc_success",
 770	"htlb_buddy_alloc_fail",
 771#endif
 772	"unevictable_pgs_culled",
 773	"unevictable_pgs_scanned",
 774	"unevictable_pgs_rescued",
 775	"unevictable_pgs_mlocked",
 776	"unevictable_pgs_munlocked",
 777	"unevictable_pgs_cleared",
 778	"unevictable_pgs_stranded",
 779	"unevictable_pgs_mlockfreed",
 780
 781#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 782	"thp_fault_alloc",
 783	"thp_fault_fallback",
 784	"thp_collapse_alloc",
 785	"thp_collapse_alloc_failed",
 786	"thp_split",
 
 
 787#endif
 
 
 
 
 
 
 
 
 788
 789#endif /* CONFIG_VM_EVENTS_COUNTERS */
 790};
 791#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
 792
 793
 794#ifdef CONFIG_PROC_FS
 795static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
 796						struct zone *zone)
 797{
 798	int order;
 799
 800	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
 801	for (order = 0; order < MAX_ORDER; ++order)
 802		seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
 803	seq_putc(m, '\n');
 804}
 805
 806/*
 807 * This walks the free areas for each zone.
 808 */
 809static int frag_show(struct seq_file *m, void *arg)
 810{
 811	pg_data_t *pgdat = (pg_data_t *)arg;
 812	walk_zones_in_node(m, pgdat, frag_show_print);
 813	return 0;
 814}
 815
 816static void pagetypeinfo_showfree_print(struct seq_file *m,
 817					pg_data_t *pgdat, struct zone *zone)
 818{
 819	int order, mtype;
 820
 821	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
 822		seq_printf(m, "Node %4d, zone %8s, type %12s ",
 823					pgdat->node_id,
 824					zone->name,
 825					migratetype_names[mtype]);
 826		for (order = 0; order < MAX_ORDER; ++order) {
 827			unsigned long freecount = 0;
 828			struct free_area *area;
 829			struct list_head *curr;
 830
 831			area = &(zone->free_area[order]);
 832
 833			list_for_each(curr, &area->free_list[mtype])
 834				freecount++;
 835			seq_printf(m, "%6lu ", freecount);
 836		}
 837		seq_putc(m, '\n');
 838	}
 839}
 840
 841/* Print out the free pages at each order for each migatetype */
 842static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
 843{
 844	int order;
 845	pg_data_t *pgdat = (pg_data_t *)arg;
 846
 847	/* Print header */
 848	seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
 849	for (order = 0; order < MAX_ORDER; ++order)
 850		seq_printf(m, "%6d ", order);
 851	seq_putc(m, '\n');
 852
 853	walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
 854
 855	return 0;
 856}
 857
 858static void pagetypeinfo_showblockcount_print(struct seq_file *m,
 859					pg_data_t *pgdat, struct zone *zone)
 860{
 861	int mtype;
 862	unsigned long pfn;
 863	unsigned long start_pfn = zone->zone_start_pfn;
 864	unsigned long end_pfn = start_pfn + zone->spanned_pages;
 865	unsigned long count[MIGRATE_TYPES] = { 0, };
 866
 867	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
 868		struct page *page;
 869
 870		if (!pfn_valid(pfn))
 871			continue;
 872
 873		page = pfn_to_page(pfn);
 874
 875		/* Watch for unexpected holes punched in the memmap */
 876		if (!memmap_valid_within(pfn, page, zone))
 877			continue;
 878
 879		mtype = get_pageblock_migratetype(page);
 880
 881		if (mtype < MIGRATE_TYPES)
 882			count[mtype]++;
 883	}
 884
 885	/* Print counts */
 886	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
 887	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
 888		seq_printf(m, "%12lu ", count[mtype]);
 889	seq_putc(m, '\n');
 890}
 891
 892/* Print out the free pages at each order for each migratetype */
 893static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
 894{
 895	int mtype;
 896	pg_data_t *pgdat = (pg_data_t *)arg;
 897
 898	seq_printf(m, "\n%-23s", "Number of blocks type ");
 899	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
 900		seq_printf(m, "%12s ", migratetype_names[mtype]);
 901	seq_putc(m, '\n');
 902	walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
 903
 904	return 0;
 905}
 906
 907/*
 908 * This prints out statistics in relation to grouping pages by mobility.
 909 * It is expensive to collect so do not constantly read the file.
 910 */
 911static int pagetypeinfo_show(struct seq_file *m, void *arg)
 912{
 913	pg_data_t *pgdat = (pg_data_t *)arg;
 914
 915	/* check memoryless node */
 916	if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
 917		return 0;
 918
 919	seq_printf(m, "Page block order: %d\n", pageblock_order);
 920	seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
 921	seq_putc(m, '\n');
 922	pagetypeinfo_showfree(m, pgdat);
 923	pagetypeinfo_showblockcount(m, pgdat);
 924
 925	return 0;
 926}
 927
 928static const struct seq_operations fragmentation_op = {
 929	.start	= frag_start,
 930	.next	= frag_next,
 931	.stop	= frag_stop,
 932	.show	= frag_show,
 933};
 934
 935static int fragmentation_open(struct inode *inode, struct file *file)
 936{
 937	return seq_open(file, &fragmentation_op);
 938}
 939
 940static const struct file_operations fragmentation_file_operations = {
 941	.open		= fragmentation_open,
 942	.read		= seq_read,
 943	.llseek		= seq_lseek,
 944	.release	= seq_release,
 945};
 946
 947static const struct seq_operations pagetypeinfo_op = {
 948	.start	= frag_start,
 949	.next	= frag_next,
 950	.stop	= frag_stop,
 951	.show	= pagetypeinfo_show,
 952};
 953
 954static int pagetypeinfo_open(struct inode *inode, struct file *file)
 955{
 956	return seq_open(file, &pagetypeinfo_op);
 957}
 958
 959static const struct file_operations pagetypeinfo_file_ops = {
 960	.open		= pagetypeinfo_open,
 961	.read		= seq_read,
 962	.llseek		= seq_lseek,
 963	.release	= seq_release,
 964};
 965
 966static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
 967							struct zone *zone)
 968{
 969	int i;
 970	seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
 971	seq_printf(m,
 972		   "\n  pages free     %lu"
 973		   "\n        min      %lu"
 974		   "\n        low      %lu"
 975		   "\n        high     %lu"
 976		   "\n        scanned  %lu"
 977		   "\n        spanned  %lu"
 978		   "\n        present  %lu",
 
 979		   zone_page_state(zone, NR_FREE_PAGES),
 980		   min_wmark_pages(zone),
 981		   low_wmark_pages(zone),
 982		   high_wmark_pages(zone),
 983		   zone->pages_scanned,
 984		   zone->spanned_pages,
 985		   zone->present_pages);
 
 986
 987	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 988		seq_printf(m, "\n    %-12s %lu", vmstat_text[i],
 989				zone_page_state(zone, i));
 990
 991	seq_printf(m,
 992		   "\n        protection: (%lu",
 993		   zone->lowmem_reserve[0]);
 994	for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
 995		seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
 996	seq_printf(m,
 997		   ")"
 998		   "\n  pagesets");
 999	for_each_online_cpu(i) {
1000		struct per_cpu_pageset *pageset;
1001
1002		pageset = per_cpu_ptr(zone->pageset, i);
1003		seq_printf(m,
1004			   "\n    cpu: %i"
1005			   "\n              count: %i"
1006			   "\n              high:  %i"
1007			   "\n              batch: %i",
1008			   i,
1009			   pageset->pcp.count,
1010			   pageset->pcp.high,
1011			   pageset->pcp.batch);
1012#ifdef CONFIG_SMP
1013		seq_printf(m, "\n  vm stats threshold: %d",
1014				pageset->stat_threshold);
1015#endif
1016	}
1017	seq_printf(m,
1018		   "\n  all_unreclaimable: %u"
1019		   "\n  start_pfn:         %lu"
1020		   "\n  inactive_ratio:    %u",
1021		   zone->all_unreclaimable,
1022		   zone->zone_start_pfn,
1023		   zone->inactive_ratio);
1024	seq_putc(m, '\n');
1025}
1026
1027/*
1028 * Output information about zones in @pgdat.
1029 */
1030static int zoneinfo_show(struct seq_file *m, void *arg)
1031{
1032	pg_data_t *pgdat = (pg_data_t *)arg;
1033	walk_zones_in_node(m, pgdat, zoneinfo_show_print);
1034	return 0;
1035}
1036
1037static const struct seq_operations zoneinfo_op = {
1038	.start	= frag_start, /* iterate over all zones. The same as in
1039			       * fragmentation. */
1040	.next	= frag_next,
1041	.stop	= frag_stop,
1042	.show	= zoneinfo_show,
1043};
1044
1045static int zoneinfo_open(struct inode *inode, struct file *file)
1046{
1047	return seq_open(file, &zoneinfo_op);
1048}
1049
1050static const struct file_operations proc_zoneinfo_file_operations = {
1051	.open		= zoneinfo_open,
1052	.read		= seq_read,
1053	.llseek		= seq_lseek,
1054	.release	= seq_release,
1055};
1056
1057enum writeback_stat_item {
1058	NR_DIRTY_THRESHOLD,
1059	NR_DIRTY_BG_THRESHOLD,
1060	NR_VM_WRITEBACK_STAT_ITEMS,
1061};
1062
1063static void *vmstat_start(struct seq_file *m, loff_t *pos)
1064{
1065	unsigned long *v;
1066	int i, stat_items_size;
1067
1068	if (*pos >= ARRAY_SIZE(vmstat_text))
1069		return NULL;
1070	stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
1071			  NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
1072
1073#ifdef CONFIG_VM_EVENT_COUNTERS
1074	stat_items_size += sizeof(struct vm_event_state);
1075#endif
1076
1077	v = kmalloc(stat_items_size, GFP_KERNEL);
1078	m->private = v;
1079	if (!v)
1080		return ERR_PTR(-ENOMEM);
1081	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1082		v[i] = global_page_state(i);
1083	v += NR_VM_ZONE_STAT_ITEMS;
1084
1085	global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1086			    v + NR_DIRTY_THRESHOLD);
1087	v += NR_VM_WRITEBACK_STAT_ITEMS;
1088
1089#ifdef CONFIG_VM_EVENT_COUNTERS
1090	all_vm_events(v);
1091	v[PGPGIN] /= 2;		/* sectors -> kbytes */
1092	v[PGPGOUT] /= 2;
1093#endif
1094	return (unsigned long *)m->private + *pos;
1095}
1096
1097static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1098{
1099	(*pos)++;
1100	if (*pos >= ARRAY_SIZE(vmstat_text))
1101		return NULL;
1102	return (unsigned long *)m->private + *pos;
1103}
1104
1105static int vmstat_show(struct seq_file *m, void *arg)
1106{
1107	unsigned long *l = arg;
1108	unsigned long off = l - (unsigned long *)m->private;
1109
1110	seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
1111	return 0;
1112}
1113
1114static void vmstat_stop(struct seq_file *m, void *arg)
1115{
1116	kfree(m->private);
1117	m->private = NULL;
1118}
1119
1120static const struct seq_operations vmstat_op = {
1121	.start	= vmstat_start,
1122	.next	= vmstat_next,
1123	.stop	= vmstat_stop,
1124	.show	= vmstat_show,
1125};
1126
1127static int vmstat_open(struct inode *inode, struct file *file)
1128{
1129	return seq_open(file, &vmstat_op);
1130}
1131
1132static const struct file_operations proc_vmstat_file_operations = {
1133	.open		= vmstat_open,
1134	.read		= seq_read,
1135	.llseek		= seq_lseek,
1136	.release	= seq_release,
1137};
1138#endif /* CONFIG_PROC_FS */
1139
1140#ifdef CONFIG_SMP
1141static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1142int sysctl_stat_interval __read_mostly = HZ;
1143
1144static void vmstat_update(struct work_struct *w)
1145{
1146	refresh_cpu_vm_stats(smp_processor_id());
1147	schedule_delayed_work(&__get_cpu_var(vmstat_work),
1148		round_jiffies_relative(sysctl_stat_interval));
1149}
1150
1151static void __cpuinit start_cpu_timer(int cpu)
1152{
1153	struct delayed_work *work = &per_cpu(vmstat_work, cpu);
1154
1155	INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update);
1156	schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
1157}
1158
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1159/*
1160 * Use the cpu notifier to insure that the thresholds are recalculated
1161 * when necessary.
1162 */
1163static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
1164		unsigned long action,
1165		void *hcpu)
1166{
1167	long cpu = (long)hcpu;
1168
1169	switch (action) {
1170	case CPU_ONLINE:
1171	case CPU_ONLINE_FROZEN:
1172		refresh_zone_stat_thresholds();
1173		start_cpu_timer(cpu);
1174		node_set_state(cpu_to_node(cpu), N_CPU);
1175		break;
1176	case CPU_DOWN_PREPARE:
1177	case CPU_DOWN_PREPARE_FROZEN:
1178		cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
1179		per_cpu(vmstat_work, cpu).work.func = NULL;
1180		break;
1181	case CPU_DOWN_FAILED:
1182	case CPU_DOWN_FAILED_FROZEN:
1183		start_cpu_timer(cpu);
1184		break;
1185	case CPU_DEAD:
1186	case CPU_DEAD_FROZEN:
1187		refresh_zone_stat_thresholds();
 
1188		break;
1189	default:
1190		break;
1191	}
1192	return NOTIFY_OK;
1193}
1194
1195static struct notifier_block __cpuinitdata vmstat_notifier =
1196	{ &vmstat_cpuup_callback, NULL, 0 };
1197#endif
1198
1199static int __init setup_vmstat(void)
1200{
1201#ifdef CONFIG_SMP
1202	int cpu;
1203
1204	register_cpu_notifier(&vmstat_notifier);
 
1205
1206	for_each_online_cpu(cpu)
1207		start_cpu_timer(cpu);
 
 
 
1208#endif
1209#ifdef CONFIG_PROC_FS
1210	proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
1211	proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
1212	proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
1213	proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
1214#endif
1215	return 0;
1216}
1217module_init(setup_vmstat)
1218
1219#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1220#include <linux/debugfs.h>
1221
1222static struct dentry *extfrag_debug_root;
1223
1224/*
1225 * Return an index indicating how much of the available free memory is
1226 * unusable for an allocation of the requested size.
1227 */
1228static int unusable_free_index(unsigned int order,
1229				struct contig_page_info *info)
1230{
1231	/* No free memory is interpreted as all free memory is unusable */
1232	if (info->free_pages == 0)
1233		return 1000;
1234
1235	/*
1236	 * Index should be a value between 0 and 1. Return a value to 3
1237	 * decimal places.
1238	 *
1239	 * 0 => no fragmentation
1240	 * 1 => high fragmentation
1241	 */
1242	return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
1243
1244}
1245
1246static void unusable_show_print(struct seq_file *m,
1247					pg_data_t *pgdat, struct zone *zone)
1248{
1249	unsigned int order;
1250	int index;
1251	struct contig_page_info info;
1252
1253	seq_printf(m, "Node %d, zone %8s ",
1254				pgdat->node_id,
1255				zone->name);
1256	for (order = 0; order < MAX_ORDER; ++order) {
1257		fill_contig_page_info(zone, order, &info);
1258		index = unusable_free_index(order, &info);
1259		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1260	}
1261
1262	seq_putc(m, '\n');
1263}
1264
1265/*
1266 * Display unusable free space index
1267 *
1268 * The unusable free space index measures how much of the available free
1269 * memory cannot be used to satisfy an allocation of a given size and is a
1270 * value between 0 and 1. The higher the value, the more of free memory is
1271 * unusable and by implication, the worse the external fragmentation is. This
1272 * can be expressed as a percentage by multiplying by 100.
1273 */
1274static int unusable_show(struct seq_file *m, void *arg)
1275{
1276	pg_data_t *pgdat = (pg_data_t *)arg;
1277
1278	/* check memoryless node */
1279	if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
1280		return 0;
1281
1282	walk_zones_in_node(m, pgdat, unusable_show_print);
1283
1284	return 0;
1285}
1286
1287static const struct seq_operations unusable_op = {
1288	.start	= frag_start,
1289	.next	= frag_next,
1290	.stop	= frag_stop,
1291	.show	= unusable_show,
1292};
1293
1294static int unusable_open(struct inode *inode, struct file *file)
1295{
1296	return seq_open(file, &unusable_op);
1297}
1298
1299static const struct file_operations unusable_file_ops = {
1300	.open		= unusable_open,
1301	.read		= seq_read,
1302	.llseek		= seq_lseek,
1303	.release	= seq_release,
1304};
1305
1306static void extfrag_show_print(struct seq_file *m,
1307					pg_data_t *pgdat, struct zone *zone)
1308{
1309	unsigned int order;
1310	int index;
1311
1312	/* Alloc on stack as interrupts are disabled for zone walk */
1313	struct contig_page_info info;
1314
1315	seq_printf(m, "Node %d, zone %8s ",
1316				pgdat->node_id,
1317				zone->name);
1318	for (order = 0; order < MAX_ORDER; ++order) {
1319		fill_contig_page_info(zone, order, &info);
1320		index = __fragmentation_index(order, &info);
1321		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1322	}
1323
1324	seq_putc(m, '\n');
1325}
1326
1327/*
1328 * Display fragmentation index for orders that allocations would fail for
1329 */
1330static int extfrag_show(struct seq_file *m, void *arg)
1331{
1332	pg_data_t *pgdat = (pg_data_t *)arg;
1333
1334	walk_zones_in_node(m, pgdat, extfrag_show_print);
1335
1336	return 0;
1337}
1338
1339static const struct seq_operations extfrag_op = {
1340	.start	= frag_start,
1341	.next	= frag_next,
1342	.stop	= frag_stop,
1343	.show	= extfrag_show,
1344};
1345
1346static int extfrag_open(struct inode *inode, struct file *file)
1347{
1348	return seq_open(file, &extfrag_op);
1349}
1350
1351static const struct file_operations extfrag_file_ops = {
1352	.open		= extfrag_open,
1353	.read		= seq_read,
1354	.llseek		= seq_lseek,
1355	.release	= seq_release,
1356};
1357
1358static int __init extfrag_debug_init(void)
1359{
 
 
1360	extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
1361	if (!extfrag_debug_root)
1362		return -ENOMEM;
1363
1364	if (!debugfs_create_file("unusable_index", 0444,
1365			extfrag_debug_root, NULL, &unusable_file_ops))
1366		return -ENOMEM;
1367
1368	if (!debugfs_create_file("extfrag_index", 0444,
1369			extfrag_debug_root, NULL, &extfrag_file_ops))
1370		return -ENOMEM;
1371
1372	return 0;
 
 
 
1373}
1374
1375module_init(extfrag_debug_init);
1376#endif
v3.15
   1/*
   2 *  linux/mm/vmstat.c
   3 *
   4 *  Manages VM statistics
   5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   6 *
   7 *  zoned VM statistics
   8 *  Copyright (C) 2006 Silicon Graphics, Inc.,
   9 *		Christoph Lameter <christoph@lameter.com>
  10 */
  11#include <linux/fs.h>
  12#include <linux/mm.h>
  13#include <linux/err.h>
  14#include <linux/module.h>
  15#include <linux/slab.h>
  16#include <linux/cpu.h>
  17#include <linux/vmstat.h>
  18#include <linux/sched.h>
  19#include <linux/math64.h>
  20#include <linux/writeback.h>
  21#include <linux/compaction.h>
  22#include <linux/mm_inline.h>
  23
  24#include "internal.h"
  25
  26#ifdef CONFIG_VM_EVENT_COUNTERS
  27DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
  28EXPORT_PER_CPU_SYMBOL(vm_event_states);
  29
  30static void sum_vm_events(unsigned long *ret)
  31{
  32	int cpu;
  33	int i;
  34
  35	memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
  36
  37	for_each_online_cpu(cpu) {
  38		struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
  39
  40		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
  41			ret[i] += this->event[i];
  42	}
  43}
  44
  45/*
  46 * Accumulate the vm event counters across all CPUs.
  47 * The result is unavoidably approximate - it can change
  48 * during and after execution of this function.
  49*/
  50void all_vm_events(unsigned long *ret)
  51{
  52	get_online_cpus();
  53	sum_vm_events(ret);
  54	put_online_cpus();
  55}
  56EXPORT_SYMBOL_GPL(all_vm_events);
  57
 
  58/*
  59 * Fold the foreign cpu events into our own.
  60 *
  61 * This is adding to the events on one processor
  62 * but keeps the global counts constant.
  63 */
  64void vm_events_fold_cpu(int cpu)
  65{
  66	struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
  67	int i;
  68
  69	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
  70		count_vm_events(i, fold_state->event[i]);
  71		fold_state->event[i] = 0;
  72	}
  73}
 
  74
  75#endif /* CONFIG_VM_EVENT_COUNTERS */
  76
  77/*
  78 * Manage combined zone based / global counters
  79 *
  80 * vm_stat contains the global counters
  81 */
  82atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
  83EXPORT_SYMBOL(vm_stat);
  84
  85#ifdef CONFIG_SMP
  86
  87int calculate_pressure_threshold(struct zone *zone)
  88{
  89	int threshold;
  90	int watermark_distance;
  91
  92	/*
  93	 * As vmstats are not up to date, there is drift between the estimated
  94	 * and real values. For high thresholds and a high number of CPUs, it
  95	 * is possible for the min watermark to be breached while the estimated
  96	 * value looks fine. The pressure threshold is a reduced value such
  97	 * that even the maximum amount of drift will not accidentally breach
  98	 * the min watermark
  99	 */
 100	watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
 101	threshold = max(1, (int)(watermark_distance / num_online_cpus()));
 102
 103	/*
 104	 * Maximum threshold is 125
 105	 */
 106	threshold = min(125, threshold);
 107
 108	return threshold;
 109}
 110
 111int calculate_normal_threshold(struct zone *zone)
 112{
 113	int threshold;
 114	int mem;	/* memory in 128 MB units */
 115
 116	/*
 117	 * The threshold scales with the number of processors and the amount
 118	 * of memory per zone. More memory means that we can defer updates for
 119	 * longer, more processors could lead to more contention.
 120 	 * fls() is used to have a cheap way of logarithmic scaling.
 121	 *
 122	 * Some sample thresholds:
 123	 *
 124	 * Threshold	Processors	(fls)	Zonesize	fls(mem+1)
 125	 * ------------------------------------------------------------------
 126	 * 8		1		1	0.9-1 GB	4
 127	 * 16		2		2	0.9-1 GB	4
 128	 * 20 		2		2	1-2 GB		5
 129	 * 24		2		2	2-4 GB		6
 130	 * 28		2		2	4-8 GB		7
 131	 * 32		2		2	8-16 GB		8
 132	 * 4		2		2	<128M		1
 133	 * 30		4		3	2-4 GB		5
 134	 * 48		4		3	8-16 GB		8
 135	 * 32		8		4	1-2 GB		4
 136	 * 32		8		4	0.9-1GB		4
 137	 * 10		16		5	<128M		1
 138	 * 40		16		5	900M		4
 139	 * 70		64		7	2-4 GB		5
 140	 * 84		64		7	4-8 GB		6
 141	 * 108		512		9	4-8 GB		6
 142	 * 125		1024		10	8-16 GB		8
 143	 * 125		1024		10	16-32 GB	9
 144	 */
 145
 146	mem = zone->managed_pages >> (27 - PAGE_SHIFT);
 147
 148	threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
 149
 150	/*
 151	 * Maximum threshold is 125
 152	 */
 153	threshold = min(125, threshold);
 154
 155	return threshold;
 156}
 157
 158/*
 159 * Refresh the thresholds for each zone.
 160 */
 161void refresh_zone_stat_thresholds(void)
 162{
 163	struct zone *zone;
 164	int cpu;
 165	int threshold;
 166
 167	for_each_populated_zone(zone) {
 168		unsigned long max_drift, tolerate_drift;
 169
 170		threshold = calculate_normal_threshold(zone);
 171
 172		for_each_online_cpu(cpu)
 173			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
 174							= threshold;
 175
 176		/*
 177		 * Only set percpu_drift_mark if there is a danger that
 178		 * NR_FREE_PAGES reports the low watermark is ok when in fact
 179		 * the min watermark could be breached by an allocation
 180		 */
 181		tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
 182		max_drift = num_online_cpus() * threshold;
 183		if (max_drift > tolerate_drift)
 184			zone->percpu_drift_mark = high_wmark_pages(zone) +
 185					max_drift;
 186	}
 187}
 188
 189void set_pgdat_percpu_threshold(pg_data_t *pgdat,
 190				int (*calculate_pressure)(struct zone *))
 191{
 192	struct zone *zone;
 193	int cpu;
 194	int threshold;
 195	int i;
 196
 197	for (i = 0; i < pgdat->nr_zones; i++) {
 198		zone = &pgdat->node_zones[i];
 199		if (!zone->percpu_drift_mark)
 200			continue;
 201
 202		threshold = (*calculate_pressure)(zone);
 203		for_each_possible_cpu(cpu)
 204			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
 205							= threshold;
 206	}
 207}
 208
 209/*
 210 * For use when we know that interrupts are disabled.
 211 */
 212void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 213				int delta)
 214{
 215	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 216	s8 __percpu *p = pcp->vm_stat_diff + item;
 217	long x;
 218	long t;
 219
 220	x = delta + __this_cpu_read(*p);
 221
 222	t = __this_cpu_read(pcp->stat_threshold);
 223
 224	if (unlikely(x > t || x < -t)) {
 225		zone_page_state_add(x, zone, item);
 226		x = 0;
 227	}
 228	__this_cpu_write(*p, x);
 229}
 230EXPORT_SYMBOL(__mod_zone_page_state);
 231
 232/*
 233 * Optimized increment and decrement functions.
 234 *
 235 * These are only for a single page and therefore can take a struct page *
 236 * argument instead of struct zone *. This allows the inclusion of the code
 237 * generated for page_zone(page) into the optimized functions.
 238 *
 239 * No overflow check is necessary and therefore the differential can be
 240 * incremented or decremented in place which may allow the compilers to
 241 * generate better code.
 242 * The increment or decrement is known and therefore one boundary check can
 243 * be omitted.
 244 *
 245 * NOTE: These functions are very performance sensitive. Change only
 246 * with care.
 247 *
 248 * Some processors have inc/dec instructions that are atomic vs an interrupt.
 249 * However, the code must first determine the differential location in a zone
 250 * based on the processor number and then inc/dec the counter. There is no
 251 * guarantee without disabling preemption that the processor will not change
 252 * in between and therefore the atomicity vs. interrupt cannot be exploited
 253 * in a useful way here.
 254 */
 255void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
 256{
 257	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 258	s8 __percpu *p = pcp->vm_stat_diff + item;
 259	s8 v, t;
 260
 261	v = __this_cpu_inc_return(*p);
 262	t = __this_cpu_read(pcp->stat_threshold);
 263	if (unlikely(v > t)) {
 264		s8 overstep = t >> 1;
 265
 266		zone_page_state_add(v + overstep, zone, item);
 267		__this_cpu_write(*p, -overstep);
 268	}
 269}
 270
 271void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
 272{
 273	__inc_zone_state(page_zone(page), item);
 274}
 275EXPORT_SYMBOL(__inc_zone_page_state);
 276
 277void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
 278{
 279	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 280	s8 __percpu *p = pcp->vm_stat_diff + item;
 281	s8 v, t;
 282
 283	v = __this_cpu_dec_return(*p);
 284	t = __this_cpu_read(pcp->stat_threshold);
 285	if (unlikely(v < - t)) {
 286		s8 overstep = t >> 1;
 287
 288		zone_page_state_add(v - overstep, zone, item);
 289		__this_cpu_write(*p, overstep);
 290	}
 291}
 292
 293void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
 294{
 295	__dec_zone_state(page_zone(page), item);
 296}
 297EXPORT_SYMBOL(__dec_zone_page_state);
 298
 299#ifdef CONFIG_HAVE_CMPXCHG_LOCAL
 300/*
 301 * If we have cmpxchg_local support then we do not need to incur the overhead
 302 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
 303 *
 304 * mod_state() modifies the zone counter state through atomic per cpu
 305 * operations.
 306 *
 307 * Overstep mode specifies how overstep should handled:
 308 *     0       No overstepping
 309 *     1       Overstepping half of threshold
 310 *     -1      Overstepping minus half of threshold
 311*/
 312static inline void mod_state(struct zone *zone,
 313       enum zone_stat_item item, int delta, int overstep_mode)
 314{
 315	struct per_cpu_pageset __percpu *pcp = zone->pageset;
 316	s8 __percpu *p = pcp->vm_stat_diff + item;
 317	long o, n, t, z;
 318
 319	do {
 320		z = 0;  /* overflow to zone counters */
 321
 322		/*
 323		 * The fetching of the stat_threshold is racy. We may apply
 324		 * a counter threshold to the wrong the cpu if we get
 325		 * rescheduled while executing here. However, the next
 326		 * counter update will apply the threshold again and
 327		 * therefore bring the counter under the threshold again.
 328		 *
 329		 * Most of the time the thresholds are the same anyways
 330		 * for all cpus in a zone.
 331		 */
 332		t = this_cpu_read(pcp->stat_threshold);
 333
 334		o = this_cpu_read(*p);
 335		n = delta + o;
 336
 337		if (n > t || n < -t) {
 338			int os = overstep_mode * (t >> 1) ;
 339
 340			/* Overflow must be added to zone counters */
 341			z = n + os;
 342			n = -os;
 343		}
 344	} while (this_cpu_cmpxchg(*p, o, n) != o);
 345
 346	if (z)
 347		zone_page_state_add(z, zone, item);
 348}
 349
 350void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 351					int delta)
 352{
 353	mod_state(zone, item, delta, 0);
 354}
 355EXPORT_SYMBOL(mod_zone_page_state);
 356
 357void inc_zone_state(struct zone *zone, enum zone_stat_item item)
 358{
 359	mod_state(zone, item, 1, 1);
 360}
 361
 362void inc_zone_page_state(struct page *page, enum zone_stat_item item)
 363{
 364	mod_state(page_zone(page), item, 1, 1);
 365}
 366EXPORT_SYMBOL(inc_zone_page_state);
 367
 368void dec_zone_page_state(struct page *page, enum zone_stat_item item)
 369{
 370	mod_state(page_zone(page), item, -1, -1);
 371}
 372EXPORT_SYMBOL(dec_zone_page_state);
 373#else
 374/*
 375 * Use interrupt disable to serialize counter updates
 376 */
 377void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 378					int delta)
 379{
 380	unsigned long flags;
 381
 382	local_irq_save(flags);
 383	__mod_zone_page_state(zone, item, delta);
 384	local_irq_restore(flags);
 385}
 386EXPORT_SYMBOL(mod_zone_page_state);
 387
 388void inc_zone_state(struct zone *zone, enum zone_stat_item item)
 389{
 390	unsigned long flags;
 391
 392	local_irq_save(flags);
 393	__inc_zone_state(zone, item);
 394	local_irq_restore(flags);
 395}
 396
 397void inc_zone_page_state(struct page *page, enum zone_stat_item item)
 398{
 399	unsigned long flags;
 400	struct zone *zone;
 401
 402	zone = page_zone(page);
 403	local_irq_save(flags);
 404	__inc_zone_state(zone, item);
 405	local_irq_restore(flags);
 406}
 407EXPORT_SYMBOL(inc_zone_page_state);
 408
 409void dec_zone_page_state(struct page *page, enum zone_stat_item item)
 410{
 411	unsigned long flags;
 412
 413	local_irq_save(flags);
 414	__dec_zone_page_state(page, item);
 415	local_irq_restore(flags);
 416}
 417EXPORT_SYMBOL(dec_zone_page_state);
 418#endif
 419
 420static inline void fold_diff(int *diff)
 421{
 422	int i;
 423
 424	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 425		if (diff[i])
 426			atomic_long_add(diff[i], &vm_stat[i]);
 427}
 428
 429/*
 430 * Update the zone counters for the current cpu.
 
 
 
 
 431 *
 432 * Note that refresh_cpu_vm_stats strives to only access
 433 * node local memory. The per cpu pagesets on remote zones are placed
 434 * in the memory local to the processor using that pageset. So the
 435 * loop over all zones will access a series of cachelines local to
 436 * the processor.
 437 *
 438 * The call to zone_page_state_add updates the cachelines with the
 439 * statistics in the remote zone struct as well as the global cachelines
 440 * with the global counters. These could cause remote node cache line
 441 * bouncing and will have to be only done when necessary.
 442 */
 443static void refresh_cpu_vm_stats(void)
 444{
 445	struct zone *zone;
 446	int i;
 447	int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
 448
 449	for_each_populated_zone(zone) {
 450		struct per_cpu_pageset __percpu *p = zone->pageset;
 451
 452		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
 453			int v;
 454
 455			v = this_cpu_xchg(p->vm_stat_diff[i], 0);
 456			if (v) {
 
 
 457
 
 
 
 
 458				atomic_long_add(v, &zone->vm_stat[i]);
 459				global_diff[i] += v;
 460#ifdef CONFIG_NUMA
 461				/* 3 seconds idle till flush */
 462				__this_cpu_write(p->expire, 3);
 463#endif
 464			}
 465		}
 466		cond_resched();
 467#ifdef CONFIG_NUMA
 468		/*
 469		 * Deal with draining the remote pageset of this
 470		 * processor
 471		 *
 472		 * Check if there are pages remaining in this pageset
 473		 * if not then there is nothing to expire.
 474		 */
 475		if (!__this_cpu_read(p->expire) ||
 476			       !__this_cpu_read(p->pcp.count))
 477			continue;
 478
 479		/*
 480		 * We never drain zones local to this processor.
 481		 */
 482		if (zone_to_nid(zone) == numa_node_id()) {
 483			__this_cpu_write(p->expire, 0);
 484			continue;
 485		}
 486
 487
 488		if (__this_cpu_dec_return(p->expire))
 489			continue;
 490
 491		if (__this_cpu_read(p->pcp.count))
 492			drain_zone_pages(zone, __this_cpu_ptr(&p->pcp));
 493#endif
 494	}
 495	fold_diff(global_diff);
 496}
 497
 498/*
 499 * Fold the data for an offline cpu into the global array.
 500 * There cannot be any access by the offline cpu and therefore
 501 * synchronization is simplified.
 502 */
 503void cpu_vm_stats_fold(int cpu)
 504{
 505	struct zone *zone;
 506	int i;
 507	int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
 508
 509	for_each_populated_zone(zone) {
 510		struct per_cpu_pageset *p;
 511
 512		p = per_cpu_ptr(zone->pageset, cpu);
 513
 514		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 515			if (p->vm_stat_diff[i]) {
 516				int v;
 517
 518				v = p->vm_stat_diff[i];
 519				p->vm_stat_diff[i] = 0;
 520				atomic_long_add(v, &zone->vm_stat[i]);
 521				global_diff[i] += v;
 522			}
 523	}
 524
 525	fold_diff(global_diff);
 526}
 527
 528/*
 529 * this is only called if !populated_zone(zone), which implies no other users of
 530 * pset->vm_stat_diff[] exsist.
 531 */
 532void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
 533{
 534	int i;
 535
 536	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 537		if (pset->vm_stat_diff[i]) {
 538			int v = pset->vm_stat_diff[i];
 539			pset->vm_stat_diff[i] = 0;
 540			atomic_long_add(v, &zone->vm_stat[i]);
 541			atomic_long_add(v, &vm_stat[i]);
 542		}
 543}
 544#endif
 545
 546#ifdef CONFIG_NUMA
 547/*
 548 * zonelist = the list of zones passed to the allocator
 549 * z 	    = the zone from which the allocation occurred.
 550 *
 551 * Must be called with interrupts disabled.
 552 *
 553 * When __GFP_OTHER_NODE is set assume the node of the preferred
 554 * zone is the local node. This is useful for daemons who allocate
 555 * memory on behalf of other processes.
 556 */
 557void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags)
 558{
 559	if (z->zone_pgdat == preferred_zone->zone_pgdat) {
 560		__inc_zone_state(z, NUMA_HIT);
 561	} else {
 562		__inc_zone_state(z, NUMA_MISS);
 563		__inc_zone_state(preferred_zone, NUMA_FOREIGN);
 564	}
 565	if (z->node == ((flags & __GFP_OTHER_NODE) ?
 566			preferred_zone->node : numa_node_id()))
 567		__inc_zone_state(z, NUMA_LOCAL);
 568	else
 569		__inc_zone_state(z, NUMA_OTHER);
 570}
 571#endif
 572
 573#ifdef CONFIG_COMPACTION
 574
 575struct contig_page_info {
 576	unsigned long free_pages;
 577	unsigned long free_blocks_total;
 578	unsigned long free_blocks_suitable;
 579};
 580
 581/*
 582 * Calculate the number of free pages in a zone, how many contiguous
 583 * pages are free and how many are large enough to satisfy an allocation of
 584 * the target size. Note that this function makes no attempt to estimate
 585 * how many suitable free blocks there *might* be if MOVABLE pages were
 586 * migrated. Calculating that is possible, but expensive and can be
 587 * figured out from userspace
 588 */
 589static void fill_contig_page_info(struct zone *zone,
 590				unsigned int suitable_order,
 591				struct contig_page_info *info)
 592{
 593	unsigned int order;
 594
 595	info->free_pages = 0;
 596	info->free_blocks_total = 0;
 597	info->free_blocks_suitable = 0;
 598
 599	for (order = 0; order < MAX_ORDER; order++) {
 600		unsigned long blocks;
 601
 602		/* Count number of free blocks */
 603		blocks = zone->free_area[order].nr_free;
 604		info->free_blocks_total += blocks;
 605
 606		/* Count free base pages */
 607		info->free_pages += blocks << order;
 608
 609		/* Count the suitable free blocks */
 610		if (order >= suitable_order)
 611			info->free_blocks_suitable += blocks <<
 612						(order - suitable_order);
 613	}
 614}
 615
 616/*
 617 * A fragmentation index only makes sense if an allocation of a requested
 618 * size would fail. If that is true, the fragmentation index indicates
 619 * whether external fragmentation or a lack of memory was the problem.
 620 * The value can be used to determine if page reclaim or compaction
 621 * should be used
 622 */
 623static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
 624{
 625	unsigned long requested = 1UL << order;
 626
 627	if (!info->free_blocks_total)
 628		return 0;
 629
 630	/* Fragmentation index only makes sense when a request would fail */
 631	if (info->free_blocks_suitable)
 632		return -1000;
 633
 634	/*
 635	 * Index is between 0 and 1 so return within 3 decimal places
 636	 *
 637	 * 0 => allocation would fail due to lack of memory
 638	 * 1 => allocation would fail due to fragmentation
 639	 */
 640	return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
 641}
 642
 643/* Same as __fragmentation index but allocs contig_page_info on stack */
 644int fragmentation_index(struct zone *zone, unsigned int order)
 645{
 646	struct contig_page_info info;
 647
 648	fill_contig_page_info(zone, order, &info);
 649	return __fragmentation_index(order, &info);
 650}
 651#endif
 652
 653#if defined(CONFIG_PROC_FS) || defined(CONFIG_COMPACTION)
 654#include <linux/proc_fs.h>
 655#include <linux/seq_file.h>
 656
 657static char * const migratetype_names[MIGRATE_TYPES] = {
 658	"Unmovable",
 659	"Reclaimable",
 660	"Movable",
 661	"Reserve",
 662#ifdef CONFIG_CMA
 663	"CMA",
 664#endif
 665#ifdef CONFIG_MEMORY_ISOLATION
 666	"Isolate",
 667#endif
 668};
 669
 670static void *frag_start(struct seq_file *m, loff_t *pos)
 671{
 672	pg_data_t *pgdat;
 673	loff_t node = *pos;
 674	for (pgdat = first_online_pgdat();
 675	     pgdat && node;
 676	     pgdat = next_online_pgdat(pgdat))
 677		--node;
 678
 679	return pgdat;
 680}
 681
 682static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
 683{
 684	pg_data_t *pgdat = (pg_data_t *)arg;
 685
 686	(*pos)++;
 687	return next_online_pgdat(pgdat);
 688}
 689
 690static void frag_stop(struct seq_file *m, void *arg)
 691{
 692}
 693
 694/* Walk all the zones in a node and print using a callback */
 695static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
 696		void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
 697{
 698	struct zone *zone;
 699	struct zone *node_zones = pgdat->node_zones;
 700	unsigned long flags;
 701
 702	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
 703		if (!populated_zone(zone))
 704			continue;
 705
 706		spin_lock_irqsave(&zone->lock, flags);
 707		print(m, pgdat, zone);
 708		spin_unlock_irqrestore(&zone->lock, flags);
 709	}
 710}
 711#endif
 712
 713#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
 714#ifdef CONFIG_ZONE_DMA
 715#define TEXT_FOR_DMA(xx) xx "_dma",
 716#else
 717#define TEXT_FOR_DMA(xx)
 718#endif
 719
 720#ifdef CONFIG_ZONE_DMA32
 721#define TEXT_FOR_DMA32(xx) xx "_dma32",
 722#else
 723#define TEXT_FOR_DMA32(xx)
 724#endif
 725
 726#ifdef CONFIG_HIGHMEM
 727#define TEXT_FOR_HIGHMEM(xx) xx "_high",
 728#else
 729#define TEXT_FOR_HIGHMEM(xx)
 730#endif
 731
 732#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
 733					TEXT_FOR_HIGHMEM(xx) xx "_movable",
 734
 735const char * const vmstat_text[] = {
 736	/* Zoned VM counters */
 737	"nr_free_pages",
 738	"nr_alloc_batch",
 739	"nr_inactive_anon",
 740	"nr_active_anon",
 741	"nr_inactive_file",
 742	"nr_active_file",
 743	"nr_unevictable",
 744	"nr_mlock",
 745	"nr_anon_pages",
 746	"nr_mapped",
 747	"nr_file_pages",
 748	"nr_dirty",
 749	"nr_writeback",
 750	"nr_slab_reclaimable",
 751	"nr_slab_unreclaimable",
 752	"nr_page_table_pages",
 753	"nr_kernel_stack",
 754	"nr_unstable",
 755	"nr_bounce",
 756	"nr_vmscan_write",
 757	"nr_vmscan_immediate_reclaim",
 758	"nr_writeback_temp",
 759	"nr_isolated_anon",
 760	"nr_isolated_file",
 761	"nr_shmem",
 762	"nr_dirtied",
 763	"nr_written",
 764
 765#ifdef CONFIG_NUMA
 766	"numa_hit",
 767	"numa_miss",
 768	"numa_foreign",
 769	"numa_interleave",
 770	"numa_local",
 771	"numa_other",
 772#endif
 773	"workingset_refault",
 774	"workingset_activate",
 775	"workingset_nodereclaim",
 776	"nr_anon_transparent_hugepages",
 777	"nr_free_cma",
 778	"nr_dirty_threshold",
 779	"nr_dirty_background_threshold",
 780
 781#ifdef CONFIG_VM_EVENT_COUNTERS
 782	"pgpgin",
 783	"pgpgout",
 784	"pswpin",
 785	"pswpout",
 786
 787	TEXTS_FOR_ZONES("pgalloc")
 788
 789	"pgfree",
 790	"pgactivate",
 791	"pgdeactivate",
 792
 793	"pgfault",
 794	"pgmajfault",
 795
 796	TEXTS_FOR_ZONES("pgrefill")
 797	TEXTS_FOR_ZONES("pgsteal_kswapd")
 798	TEXTS_FOR_ZONES("pgsteal_direct")
 799	TEXTS_FOR_ZONES("pgscan_kswapd")
 800	TEXTS_FOR_ZONES("pgscan_direct")
 801	"pgscan_direct_throttle",
 802
 803#ifdef CONFIG_NUMA
 804	"zone_reclaim_failed",
 805#endif
 806	"pginodesteal",
 807	"slabs_scanned",
 
 808	"kswapd_inodesteal",
 809	"kswapd_low_wmark_hit_quickly",
 810	"kswapd_high_wmark_hit_quickly",
 
 811	"pageoutrun",
 812	"allocstall",
 813
 814	"pgrotated",
 815
 816	"drop_pagecache",
 817	"drop_slab",
 818
 819#ifdef CONFIG_NUMA_BALANCING
 820	"numa_pte_updates",
 821	"numa_huge_pte_updates",
 822	"numa_hint_faults",
 823	"numa_hint_faults_local",
 824	"numa_pages_migrated",
 825#endif
 826#ifdef CONFIG_MIGRATION
 827	"pgmigrate_success",
 828	"pgmigrate_fail",
 829#endif
 830#ifdef CONFIG_COMPACTION
 831	"compact_migrate_scanned",
 832	"compact_free_scanned",
 833	"compact_isolated",
 834	"compact_stall",
 835	"compact_fail",
 836	"compact_success",
 837#endif
 838
 839#ifdef CONFIG_HUGETLB_PAGE
 840	"htlb_buddy_alloc_success",
 841	"htlb_buddy_alloc_fail",
 842#endif
 843	"unevictable_pgs_culled",
 844	"unevictable_pgs_scanned",
 845	"unevictable_pgs_rescued",
 846	"unevictable_pgs_mlocked",
 847	"unevictable_pgs_munlocked",
 848	"unevictable_pgs_cleared",
 849	"unevictable_pgs_stranded",
 
 850
 851#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 852	"thp_fault_alloc",
 853	"thp_fault_fallback",
 854	"thp_collapse_alloc",
 855	"thp_collapse_alloc_failed",
 856	"thp_split",
 857	"thp_zero_page_alloc",
 858	"thp_zero_page_alloc_failed",
 859#endif
 860#ifdef CONFIG_DEBUG_TLBFLUSH
 861#ifdef CONFIG_SMP
 862	"nr_tlb_remote_flush",
 863	"nr_tlb_remote_flush_received",
 864#endif /* CONFIG_SMP */
 865	"nr_tlb_local_flush_all",
 866	"nr_tlb_local_flush_one",
 867#endif /* CONFIG_DEBUG_TLBFLUSH */
 868
 869#endif /* CONFIG_VM_EVENTS_COUNTERS */
 870};
 871#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
 872
 873
 874#ifdef CONFIG_PROC_FS
 875static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
 876						struct zone *zone)
 877{
 878	int order;
 879
 880	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
 881	for (order = 0; order < MAX_ORDER; ++order)
 882		seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
 883	seq_putc(m, '\n');
 884}
 885
 886/*
 887 * This walks the free areas for each zone.
 888 */
 889static int frag_show(struct seq_file *m, void *arg)
 890{
 891	pg_data_t *pgdat = (pg_data_t *)arg;
 892	walk_zones_in_node(m, pgdat, frag_show_print);
 893	return 0;
 894}
 895
 896static void pagetypeinfo_showfree_print(struct seq_file *m,
 897					pg_data_t *pgdat, struct zone *zone)
 898{
 899	int order, mtype;
 900
 901	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
 902		seq_printf(m, "Node %4d, zone %8s, type %12s ",
 903					pgdat->node_id,
 904					zone->name,
 905					migratetype_names[mtype]);
 906		for (order = 0; order < MAX_ORDER; ++order) {
 907			unsigned long freecount = 0;
 908			struct free_area *area;
 909			struct list_head *curr;
 910
 911			area = &(zone->free_area[order]);
 912
 913			list_for_each(curr, &area->free_list[mtype])
 914				freecount++;
 915			seq_printf(m, "%6lu ", freecount);
 916		}
 917		seq_putc(m, '\n');
 918	}
 919}
 920
 921/* Print out the free pages at each order for each migatetype */
 922static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
 923{
 924	int order;
 925	pg_data_t *pgdat = (pg_data_t *)arg;
 926
 927	/* Print header */
 928	seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
 929	for (order = 0; order < MAX_ORDER; ++order)
 930		seq_printf(m, "%6d ", order);
 931	seq_putc(m, '\n');
 932
 933	walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
 934
 935	return 0;
 936}
 937
 938static void pagetypeinfo_showblockcount_print(struct seq_file *m,
 939					pg_data_t *pgdat, struct zone *zone)
 940{
 941	int mtype;
 942	unsigned long pfn;
 943	unsigned long start_pfn = zone->zone_start_pfn;
 944	unsigned long end_pfn = zone_end_pfn(zone);
 945	unsigned long count[MIGRATE_TYPES] = { 0, };
 946
 947	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
 948		struct page *page;
 949
 950		if (!pfn_valid(pfn))
 951			continue;
 952
 953		page = pfn_to_page(pfn);
 954
 955		/* Watch for unexpected holes punched in the memmap */
 956		if (!memmap_valid_within(pfn, page, zone))
 957			continue;
 958
 959		mtype = get_pageblock_migratetype(page);
 960
 961		if (mtype < MIGRATE_TYPES)
 962			count[mtype]++;
 963	}
 964
 965	/* Print counts */
 966	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
 967	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
 968		seq_printf(m, "%12lu ", count[mtype]);
 969	seq_putc(m, '\n');
 970}
 971
 972/* Print out the free pages at each order for each migratetype */
 973static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
 974{
 975	int mtype;
 976	pg_data_t *pgdat = (pg_data_t *)arg;
 977
 978	seq_printf(m, "\n%-23s", "Number of blocks type ");
 979	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
 980		seq_printf(m, "%12s ", migratetype_names[mtype]);
 981	seq_putc(m, '\n');
 982	walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
 983
 984	return 0;
 985}
 986
 987/*
 988 * This prints out statistics in relation to grouping pages by mobility.
 989 * It is expensive to collect so do not constantly read the file.
 990 */
 991static int pagetypeinfo_show(struct seq_file *m, void *arg)
 992{
 993	pg_data_t *pgdat = (pg_data_t *)arg;
 994
 995	/* check memoryless node */
 996	if (!node_state(pgdat->node_id, N_MEMORY))
 997		return 0;
 998
 999	seq_printf(m, "Page block order: %d\n", pageblock_order);
1000	seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
1001	seq_putc(m, '\n');
1002	pagetypeinfo_showfree(m, pgdat);
1003	pagetypeinfo_showblockcount(m, pgdat);
1004
1005	return 0;
1006}
1007
1008static const struct seq_operations fragmentation_op = {
1009	.start	= frag_start,
1010	.next	= frag_next,
1011	.stop	= frag_stop,
1012	.show	= frag_show,
1013};
1014
1015static int fragmentation_open(struct inode *inode, struct file *file)
1016{
1017	return seq_open(file, &fragmentation_op);
1018}
1019
1020static const struct file_operations fragmentation_file_operations = {
1021	.open		= fragmentation_open,
1022	.read		= seq_read,
1023	.llseek		= seq_lseek,
1024	.release	= seq_release,
1025};
1026
1027static const struct seq_operations pagetypeinfo_op = {
1028	.start	= frag_start,
1029	.next	= frag_next,
1030	.stop	= frag_stop,
1031	.show	= pagetypeinfo_show,
1032};
1033
1034static int pagetypeinfo_open(struct inode *inode, struct file *file)
1035{
1036	return seq_open(file, &pagetypeinfo_op);
1037}
1038
1039static const struct file_operations pagetypeinfo_file_ops = {
1040	.open		= pagetypeinfo_open,
1041	.read		= seq_read,
1042	.llseek		= seq_lseek,
1043	.release	= seq_release,
1044};
1045
1046static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1047							struct zone *zone)
1048{
1049	int i;
1050	seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1051	seq_printf(m,
1052		   "\n  pages free     %lu"
1053		   "\n        min      %lu"
1054		   "\n        low      %lu"
1055		   "\n        high     %lu"
1056		   "\n        scanned  %lu"
1057		   "\n        spanned  %lu"
1058		   "\n        present  %lu"
1059		   "\n        managed  %lu",
1060		   zone_page_state(zone, NR_FREE_PAGES),
1061		   min_wmark_pages(zone),
1062		   low_wmark_pages(zone),
1063		   high_wmark_pages(zone),
1064		   zone->pages_scanned,
1065		   zone->spanned_pages,
1066		   zone->present_pages,
1067		   zone->managed_pages);
1068
1069	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1070		seq_printf(m, "\n    %-12s %lu", vmstat_text[i],
1071				zone_page_state(zone, i));
1072
1073	seq_printf(m,
1074		   "\n        protection: (%lu",
1075		   zone->lowmem_reserve[0]);
1076	for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1077		seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
1078	seq_printf(m,
1079		   ")"
1080		   "\n  pagesets");
1081	for_each_online_cpu(i) {
1082		struct per_cpu_pageset *pageset;
1083
1084		pageset = per_cpu_ptr(zone->pageset, i);
1085		seq_printf(m,
1086			   "\n    cpu: %i"
1087			   "\n              count: %i"
1088			   "\n              high:  %i"
1089			   "\n              batch: %i",
1090			   i,
1091			   pageset->pcp.count,
1092			   pageset->pcp.high,
1093			   pageset->pcp.batch);
1094#ifdef CONFIG_SMP
1095		seq_printf(m, "\n  vm stats threshold: %d",
1096				pageset->stat_threshold);
1097#endif
1098	}
1099	seq_printf(m,
1100		   "\n  all_unreclaimable: %u"
1101		   "\n  start_pfn:         %lu"
1102		   "\n  inactive_ratio:    %u",
1103		   !zone_reclaimable(zone),
1104		   zone->zone_start_pfn,
1105		   zone->inactive_ratio);
1106	seq_putc(m, '\n');
1107}
1108
1109/*
1110 * Output information about zones in @pgdat.
1111 */
1112static int zoneinfo_show(struct seq_file *m, void *arg)
1113{
1114	pg_data_t *pgdat = (pg_data_t *)arg;
1115	walk_zones_in_node(m, pgdat, zoneinfo_show_print);
1116	return 0;
1117}
1118
1119static const struct seq_operations zoneinfo_op = {
1120	.start	= frag_start, /* iterate over all zones. The same as in
1121			       * fragmentation. */
1122	.next	= frag_next,
1123	.stop	= frag_stop,
1124	.show	= zoneinfo_show,
1125};
1126
1127static int zoneinfo_open(struct inode *inode, struct file *file)
1128{
1129	return seq_open(file, &zoneinfo_op);
1130}
1131
1132static const struct file_operations proc_zoneinfo_file_operations = {
1133	.open		= zoneinfo_open,
1134	.read		= seq_read,
1135	.llseek		= seq_lseek,
1136	.release	= seq_release,
1137};
1138
1139enum writeback_stat_item {
1140	NR_DIRTY_THRESHOLD,
1141	NR_DIRTY_BG_THRESHOLD,
1142	NR_VM_WRITEBACK_STAT_ITEMS,
1143};
1144
1145static void *vmstat_start(struct seq_file *m, loff_t *pos)
1146{
1147	unsigned long *v;
1148	int i, stat_items_size;
1149
1150	if (*pos >= ARRAY_SIZE(vmstat_text))
1151		return NULL;
1152	stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
1153			  NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
1154
1155#ifdef CONFIG_VM_EVENT_COUNTERS
1156	stat_items_size += sizeof(struct vm_event_state);
1157#endif
1158
1159	v = kmalloc(stat_items_size, GFP_KERNEL);
1160	m->private = v;
1161	if (!v)
1162		return ERR_PTR(-ENOMEM);
1163	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1164		v[i] = global_page_state(i);
1165	v += NR_VM_ZONE_STAT_ITEMS;
1166
1167	global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1168			    v + NR_DIRTY_THRESHOLD);
1169	v += NR_VM_WRITEBACK_STAT_ITEMS;
1170
1171#ifdef CONFIG_VM_EVENT_COUNTERS
1172	all_vm_events(v);
1173	v[PGPGIN] /= 2;		/* sectors -> kbytes */
1174	v[PGPGOUT] /= 2;
1175#endif
1176	return (unsigned long *)m->private + *pos;
1177}
1178
1179static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1180{
1181	(*pos)++;
1182	if (*pos >= ARRAY_SIZE(vmstat_text))
1183		return NULL;
1184	return (unsigned long *)m->private + *pos;
1185}
1186
1187static int vmstat_show(struct seq_file *m, void *arg)
1188{
1189	unsigned long *l = arg;
1190	unsigned long off = l - (unsigned long *)m->private;
1191
1192	seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
1193	return 0;
1194}
1195
1196static void vmstat_stop(struct seq_file *m, void *arg)
1197{
1198	kfree(m->private);
1199	m->private = NULL;
1200}
1201
1202static const struct seq_operations vmstat_op = {
1203	.start	= vmstat_start,
1204	.next	= vmstat_next,
1205	.stop	= vmstat_stop,
1206	.show	= vmstat_show,
1207};
1208
1209static int vmstat_open(struct inode *inode, struct file *file)
1210{
1211	return seq_open(file, &vmstat_op);
1212}
1213
1214static const struct file_operations proc_vmstat_file_operations = {
1215	.open		= vmstat_open,
1216	.read		= seq_read,
1217	.llseek		= seq_lseek,
1218	.release	= seq_release,
1219};
1220#endif /* CONFIG_PROC_FS */
1221
1222#ifdef CONFIG_SMP
1223static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1224int sysctl_stat_interval __read_mostly = HZ;
1225
1226static void vmstat_update(struct work_struct *w)
1227{
1228	refresh_cpu_vm_stats();
1229	schedule_delayed_work(&__get_cpu_var(vmstat_work),
1230		round_jiffies_relative(sysctl_stat_interval));
1231}
1232
1233static void start_cpu_timer(int cpu)
1234{
1235	struct delayed_work *work = &per_cpu(vmstat_work, cpu);
1236
1237	INIT_DEFERRABLE_WORK(work, vmstat_update);
1238	schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
1239}
1240
1241static void vmstat_cpu_dead(int node)
1242{
1243	int cpu;
1244
1245	get_online_cpus();
1246	for_each_online_cpu(cpu)
1247		if (cpu_to_node(cpu) == node)
1248			goto end;
1249
1250	node_clear_state(node, N_CPU);
1251end:
1252	put_online_cpus();
1253}
1254
1255/*
1256 * Use the cpu notifier to insure that the thresholds are recalculated
1257 * when necessary.
1258 */
1259static int vmstat_cpuup_callback(struct notifier_block *nfb,
1260		unsigned long action,
1261		void *hcpu)
1262{
1263	long cpu = (long)hcpu;
1264
1265	switch (action) {
1266	case CPU_ONLINE:
1267	case CPU_ONLINE_FROZEN:
1268		refresh_zone_stat_thresholds();
1269		start_cpu_timer(cpu);
1270		node_set_state(cpu_to_node(cpu), N_CPU);
1271		break;
1272	case CPU_DOWN_PREPARE:
1273	case CPU_DOWN_PREPARE_FROZEN:
1274		cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
1275		per_cpu(vmstat_work, cpu).work.func = NULL;
1276		break;
1277	case CPU_DOWN_FAILED:
1278	case CPU_DOWN_FAILED_FROZEN:
1279		start_cpu_timer(cpu);
1280		break;
1281	case CPU_DEAD:
1282	case CPU_DEAD_FROZEN:
1283		refresh_zone_stat_thresholds();
1284		vmstat_cpu_dead(cpu_to_node(cpu));
1285		break;
1286	default:
1287		break;
1288	}
1289	return NOTIFY_OK;
1290}
1291
1292static struct notifier_block vmstat_notifier =
1293	{ &vmstat_cpuup_callback, NULL, 0 };
1294#endif
1295
1296static int __init setup_vmstat(void)
1297{
1298#ifdef CONFIG_SMP
1299	int cpu;
1300
1301	cpu_notifier_register_begin();
1302	__register_cpu_notifier(&vmstat_notifier);
1303
1304	for_each_online_cpu(cpu) {
1305		start_cpu_timer(cpu);
1306		node_set_state(cpu_to_node(cpu), N_CPU);
1307	}
1308	cpu_notifier_register_done();
1309#endif
1310#ifdef CONFIG_PROC_FS
1311	proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
1312	proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
1313	proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
1314	proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
1315#endif
1316	return 0;
1317}
1318module_init(setup_vmstat)
1319
1320#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1321#include <linux/debugfs.h>
1322
 
1323
1324/*
1325 * Return an index indicating how much of the available free memory is
1326 * unusable for an allocation of the requested size.
1327 */
1328static int unusable_free_index(unsigned int order,
1329				struct contig_page_info *info)
1330{
1331	/* No free memory is interpreted as all free memory is unusable */
1332	if (info->free_pages == 0)
1333		return 1000;
1334
1335	/*
1336	 * Index should be a value between 0 and 1. Return a value to 3
1337	 * decimal places.
1338	 *
1339	 * 0 => no fragmentation
1340	 * 1 => high fragmentation
1341	 */
1342	return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
1343
1344}
1345
1346static void unusable_show_print(struct seq_file *m,
1347					pg_data_t *pgdat, struct zone *zone)
1348{
1349	unsigned int order;
1350	int index;
1351	struct contig_page_info info;
1352
1353	seq_printf(m, "Node %d, zone %8s ",
1354				pgdat->node_id,
1355				zone->name);
1356	for (order = 0; order < MAX_ORDER; ++order) {
1357		fill_contig_page_info(zone, order, &info);
1358		index = unusable_free_index(order, &info);
1359		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1360	}
1361
1362	seq_putc(m, '\n');
1363}
1364
1365/*
1366 * Display unusable free space index
1367 *
1368 * The unusable free space index measures how much of the available free
1369 * memory cannot be used to satisfy an allocation of a given size and is a
1370 * value between 0 and 1. The higher the value, the more of free memory is
1371 * unusable and by implication, the worse the external fragmentation is. This
1372 * can be expressed as a percentage by multiplying by 100.
1373 */
1374static int unusable_show(struct seq_file *m, void *arg)
1375{
1376	pg_data_t *pgdat = (pg_data_t *)arg;
1377
1378	/* check memoryless node */
1379	if (!node_state(pgdat->node_id, N_MEMORY))
1380		return 0;
1381
1382	walk_zones_in_node(m, pgdat, unusable_show_print);
1383
1384	return 0;
1385}
1386
1387static const struct seq_operations unusable_op = {
1388	.start	= frag_start,
1389	.next	= frag_next,
1390	.stop	= frag_stop,
1391	.show	= unusable_show,
1392};
1393
1394static int unusable_open(struct inode *inode, struct file *file)
1395{
1396	return seq_open(file, &unusable_op);
1397}
1398
1399static const struct file_operations unusable_file_ops = {
1400	.open		= unusable_open,
1401	.read		= seq_read,
1402	.llseek		= seq_lseek,
1403	.release	= seq_release,
1404};
1405
1406static void extfrag_show_print(struct seq_file *m,
1407					pg_data_t *pgdat, struct zone *zone)
1408{
1409	unsigned int order;
1410	int index;
1411
1412	/* Alloc on stack as interrupts are disabled for zone walk */
1413	struct contig_page_info info;
1414
1415	seq_printf(m, "Node %d, zone %8s ",
1416				pgdat->node_id,
1417				zone->name);
1418	for (order = 0; order < MAX_ORDER; ++order) {
1419		fill_contig_page_info(zone, order, &info);
1420		index = __fragmentation_index(order, &info);
1421		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1422	}
1423
1424	seq_putc(m, '\n');
1425}
1426
1427/*
1428 * Display fragmentation index for orders that allocations would fail for
1429 */
1430static int extfrag_show(struct seq_file *m, void *arg)
1431{
1432	pg_data_t *pgdat = (pg_data_t *)arg;
1433
1434	walk_zones_in_node(m, pgdat, extfrag_show_print);
1435
1436	return 0;
1437}
1438
1439static const struct seq_operations extfrag_op = {
1440	.start	= frag_start,
1441	.next	= frag_next,
1442	.stop	= frag_stop,
1443	.show	= extfrag_show,
1444};
1445
1446static int extfrag_open(struct inode *inode, struct file *file)
1447{
1448	return seq_open(file, &extfrag_op);
1449}
1450
1451static const struct file_operations extfrag_file_ops = {
1452	.open		= extfrag_open,
1453	.read		= seq_read,
1454	.llseek		= seq_lseek,
1455	.release	= seq_release,
1456};
1457
1458static int __init extfrag_debug_init(void)
1459{
1460	struct dentry *extfrag_debug_root;
1461
1462	extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
1463	if (!extfrag_debug_root)
1464		return -ENOMEM;
1465
1466	if (!debugfs_create_file("unusable_index", 0444,
1467			extfrag_debug_root, NULL, &unusable_file_ops))
1468		goto fail;
1469
1470	if (!debugfs_create_file("extfrag_index", 0444,
1471			extfrag_debug_root, NULL, &extfrag_file_ops))
1472		goto fail;
1473
1474	return 0;
1475fail:
1476	debugfs_remove_recursive(extfrag_debug_root);
1477	return -ENOMEM;
1478}
1479
1480module_init(extfrag_debug_init);
1481#endif