Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/mm/vmstat.c
4 *
5 * Manages VM statistics
6 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
7 *
8 * zoned VM statistics
9 * Copyright (C) 2006 Silicon Graphics, Inc.,
10 * Christoph Lameter <christoph@lameter.com>
11 * Copyright (C) 2008-2014 Christoph Lameter
12 */
13#include <linux/fs.h>
14#include <linux/mm.h>
15#include <linux/err.h>
16#include <linux/module.h>
17#include <linux/slab.h>
18#include <linux/cpu.h>
19#include <linux/cpumask.h>
20#include <linux/vmstat.h>
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
23#include <linux/debugfs.h>
24#include <linux/sched.h>
25#include <linux/math64.h>
26#include <linux/writeback.h>
27#include <linux/compaction.h>
28#include <linux/mm_inline.h>
29#include <linux/page_ext.h>
30#include <linux/page_owner.h>
31
32#include "internal.h"
33
34#define NUMA_STATS_THRESHOLD (U16_MAX - 2)
35
36#ifdef CONFIG_NUMA
37int sysctl_vm_numa_stat = ENABLE_NUMA_STAT;
38
39/* zero numa counters within a zone */
40static void zero_zone_numa_counters(struct zone *zone)
41{
42 int item, cpu;
43
44 for (item = 0; item < NR_VM_NUMA_STAT_ITEMS; item++) {
45 atomic_long_set(&zone->vm_numa_stat[item], 0);
46 for_each_online_cpu(cpu)
47 per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item]
48 = 0;
49 }
50}
51
52/* zero numa counters of all the populated zones */
53static void zero_zones_numa_counters(void)
54{
55 struct zone *zone;
56
57 for_each_populated_zone(zone)
58 zero_zone_numa_counters(zone);
59}
60
61/* zero global numa counters */
62static void zero_global_numa_counters(void)
63{
64 int item;
65
66 for (item = 0; item < NR_VM_NUMA_STAT_ITEMS; item++)
67 atomic_long_set(&vm_numa_stat[item], 0);
68}
69
70static void invalid_numa_statistics(void)
71{
72 zero_zones_numa_counters();
73 zero_global_numa_counters();
74}
75
76static DEFINE_MUTEX(vm_numa_stat_lock);
77
78int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
79 void *buffer, size_t *length, loff_t *ppos)
80{
81 int ret, oldval;
82
83 mutex_lock(&vm_numa_stat_lock);
84 if (write)
85 oldval = sysctl_vm_numa_stat;
86 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
87 if (ret || !write)
88 goto out;
89
90 if (oldval == sysctl_vm_numa_stat)
91 goto out;
92 else if (sysctl_vm_numa_stat == ENABLE_NUMA_STAT) {
93 static_branch_enable(&vm_numa_stat_key);
94 pr_info("enable numa statistics\n");
95 } else {
96 static_branch_disable(&vm_numa_stat_key);
97 invalid_numa_statistics();
98 pr_info("disable numa statistics, and clear numa counters\n");
99 }
100
101out:
102 mutex_unlock(&vm_numa_stat_lock);
103 return ret;
104}
105#endif
106
107#ifdef CONFIG_VM_EVENT_COUNTERS
108DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
109EXPORT_PER_CPU_SYMBOL(vm_event_states);
110
111static void sum_vm_events(unsigned long *ret)
112{
113 int cpu;
114 int i;
115
116 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
117
118 for_each_online_cpu(cpu) {
119 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
120
121 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
122 ret[i] += this->event[i];
123 }
124}
125
126/*
127 * Accumulate the vm event counters across all CPUs.
128 * The result is unavoidably approximate - it can change
129 * during and after execution of this function.
130*/
131void all_vm_events(unsigned long *ret)
132{
133 get_online_cpus();
134 sum_vm_events(ret);
135 put_online_cpus();
136}
137EXPORT_SYMBOL_GPL(all_vm_events);
138
139/*
140 * Fold the foreign cpu events into our own.
141 *
142 * This is adding to the events on one processor
143 * but keeps the global counts constant.
144 */
145void vm_events_fold_cpu(int cpu)
146{
147 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
148 int i;
149
150 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
151 count_vm_events(i, fold_state->event[i]);
152 fold_state->event[i] = 0;
153 }
154}
155
156#endif /* CONFIG_VM_EVENT_COUNTERS */
157
158/*
159 * Manage combined zone based / global counters
160 *
161 * vm_stat contains the global counters
162 */
163atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
164atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS] __cacheline_aligned_in_smp;
165atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp;
166EXPORT_SYMBOL(vm_zone_stat);
167EXPORT_SYMBOL(vm_numa_stat);
168EXPORT_SYMBOL(vm_node_stat);
169
170#ifdef CONFIG_SMP
171
172int calculate_pressure_threshold(struct zone *zone)
173{
174 int threshold;
175 int watermark_distance;
176
177 /*
178 * As vmstats are not up to date, there is drift between the estimated
179 * and real values. For high thresholds and a high number of CPUs, it
180 * is possible for the min watermark to be breached while the estimated
181 * value looks fine. The pressure threshold is a reduced value such
182 * that even the maximum amount of drift will not accidentally breach
183 * the min watermark
184 */
185 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
186 threshold = max(1, (int)(watermark_distance / num_online_cpus()));
187
188 /*
189 * Maximum threshold is 125
190 */
191 threshold = min(125, threshold);
192
193 return threshold;
194}
195
196int calculate_normal_threshold(struct zone *zone)
197{
198 int threshold;
199 int mem; /* memory in 128 MB units */
200
201 /*
202 * The threshold scales with the number of processors and the amount
203 * of memory per zone. More memory means that we can defer updates for
204 * longer, more processors could lead to more contention.
205 * fls() is used to have a cheap way of logarithmic scaling.
206 *
207 * Some sample thresholds:
208 *
209 * Threshold Processors (fls) Zonesize fls(mem+1)
210 * ------------------------------------------------------------------
211 * 8 1 1 0.9-1 GB 4
212 * 16 2 2 0.9-1 GB 4
213 * 20 2 2 1-2 GB 5
214 * 24 2 2 2-4 GB 6
215 * 28 2 2 4-8 GB 7
216 * 32 2 2 8-16 GB 8
217 * 4 2 2 <128M 1
218 * 30 4 3 2-4 GB 5
219 * 48 4 3 8-16 GB 8
220 * 32 8 4 1-2 GB 4
221 * 32 8 4 0.9-1GB 4
222 * 10 16 5 <128M 1
223 * 40 16 5 900M 4
224 * 70 64 7 2-4 GB 5
225 * 84 64 7 4-8 GB 6
226 * 108 512 9 4-8 GB 6
227 * 125 1024 10 8-16 GB 8
228 * 125 1024 10 16-32 GB 9
229 */
230
231 mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT);
232
233 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
234
235 /*
236 * Maximum threshold is 125
237 */
238 threshold = min(125, threshold);
239
240 return threshold;
241}
242
243/*
244 * Refresh the thresholds for each zone.
245 */
246void refresh_zone_stat_thresholds(void)
247{
248 struct pglist_data *pgdat;
249 struct zone *zone;
250 int cpu;
251 int threshold;
252
253 /* Zero current pgdat thresholds */
254 for_each_online_pgdat(pgdat) {
255 for_each_online_cpu(cpu) {
256 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0;
257 }
258 }
259
260 for_each_populated_zone(zone) {
261 struct pglist_data *pgdat = zone->zone_pgdat;
262 unsigned long max_drift, tolerate_drift;
263
264 threshold = calculate_normal_threshold(zone);
265
266 for_each_online_cpu(cpu) {
267 int pgdat_threshold;
268
269 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
270 = threshold;
271
272 /* Base nodestat threshold on the largest populated zone. */
273 pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold;
274 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold
275 = max(threshold, pgdat_threshold);
276 }
277
278 /*
279 * Only set percpu_drift_mark if there is a danger that
280 * NR_FREE_PAGES reports the low watermark is ok when in fact
281 * the min watermark could be breached by an allocation
282 */
283 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
284 max_drift = num_online_cpus() * threshold;
285 if (max_drift > tolerate_drift)
286 zone->percpu_drift_mark = high_wmark_pages(zone) +
287 max_drift;
288 }
289}
290
291void set_pgdat_percpu_threshold(pg_data_t *pgdat,
292 int (*calculate_pressure)(struct zone *))
293{
294 struct zone *zone;
295 int cpu;
296 int threshold;
297 int i;
298
299 for (i = 0; i < pgdat->nr_zones; i++) {
300 zone = &pgdat->node_zones[i];
301 if (!zone->percpu_drift_mark)
302 continue;
303
304 threshold = (*calculate_pressure)(zone);
305 for_each_online_cpu(cpu)
306 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
307 = threshold;
308 }
309}
310
311/*
312 * For use when we know that interrupts are disabled,
313 * or when we know that preemption is disabled and that
314 * particular counter cannot be updated from interrupt context.
315 */
316void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
317 long delta)
318{
319 struct per_cpu_pageset __percpu *pcp = zone->pageset;
320 s8 __percpu *p = pcp->vm_stat_diff + item;
321 long x;
322 long t;
323
324 x = delta + __this_cpu_read(*p);
325
326 t = __this_cpu_read(pcp->stat_threshold);
327
328 if (unlikely(x > t || x < -t)) {
329 zone_page_state_add(x, zone, item);
330 x = 0;
331 }
332 __this_cpu_write(*p, x);
333}
334EXPORT_SYMBOL(__mod_zone_page_state);
335
336void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
337 long delta)
338{
339 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
340 s8 __percpu *p = pcp->vm_node_stat_diff + item;
341 long x;
342 long t;
343
344 if (vmstat_item_in_bytes(item)) {
345 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
346 delta >>= PAGE_SHIFT;
347 }
348
349 x = delta + __this_cpu_read(*p);
350
351 t = __this_cpu_read(pcp->stat_threshold);
352
353 if (unlikely(x > t || x < -t)) {
354 node_page_state_add(x, pgdat, item);
355 x = 0;
356 }
357 __this_cpu_write(*p, x);
358}
359EXPORT_SYMBOL(__mod_node_page_state);
360
361/*
362 * Optimized increment and decrement functions.
363 *
364 * These are only for a single page and therefore can take a struct page *
365 * argument instead of struct zone *. This allows the inclusion of the code
366 * generated for page_zone(page) into the optimized functions.
367 *
368 * No overflow check is necessary and therefore the differential can be
369 * incremented or decremented in place which may allow the compilers to
370 * generate better code.
371 * The increment or decrement is known and therefore one boundary check can
372 * be omitted.
373 *
374 * NOTE: These functions are very performance sensitive. Change only
375 * with care.
376 *
377 * Some processors have inc/dec instructions that are atomic vs an interrupt.
378 * However, the code must first determine the differential location in a zone
379 * based on the processor number and then inc/dec the counter. There is no
380 * guarantee without disabling preemption that the processor will not change
381 * in between and therefore the atomicity vs. interrupt cannot be exploited
382 * in a useful way here.
383 */
384void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
385{
386 struct per_cpu_pageset __percpu *pcp = zone->pageset;
387 s8 __percpu *p = pcp->vm_stat_diff + item;
388 s8 v, t;
389
390 v = __this_cpu_inc_return(*p);
391 t = __this_cpu_read(pcp->stat_threshold);
392 if (unlikely(v > t)) {
393 s8 overstep = t >> 1;
394
395 zone_page_state_add(v + overstep, zone, item);
396 __this_cpu_write(*p, -overstep);
397 }
398}
399
400void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
401{
402 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
403 s8 __percpu *p = pcp->vm_node_stat_diff + item;
404 s8 v, t;
405
406 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
407
408 v = __this_cpu_inc_return(*p);
409 t = __this_cpu_read(pcp->stat_threshold);
410 if (unlikely(v > t)) {
411 s8 overstep = t >> 1;
412
413 node_page_state_add(v + overstep, pgdat, item);
414 __this_cpu_write(*p, -overstep);
415 }
416}
417
418void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
419{
420 __inc_zone_state(page_zone(page), item);
421}
422EXPORT_SYMBOL(__inc_zone_page_state);
423
424void __inc_node_page_state(struct page *page, enum node_stat_item item)
425{
426 __inc_node_state(page_pgdat(page), item);
427}
428EXPORT_SYMBOL(__inc_node_page_state);
429
430void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
431{
432 struct per_cpu_pageset __percpu *pcp = zone->pageset;
433 s8 __percpu *p = pcp->vm_stat_diff + item;
434 s8 v, t;
435
436 v = __this_cpu_dec_return(*p);
437 t = __this_cpu_read(pcp->stat_threshold);
438 if (unlikely(v < - t)) {
439 s8 overstep = t >> 1;
440
441 zone_page_state_add(v - overstep, zone, item);
442 __this_cpu_write(*p, overstep);
443 }
444}
445
446void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
447{
448 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
449 s8 __percpu *p = pcp->vm_node_stat_diff + item;
450 s8 v, t;
451
452 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
453
454 v = __this_cpu_dec_return(*p);
455 t = __this_cpu_read(pcp->stat_threshold);
456 if (unlikely(v < - t)) {
457 s8 overstep = t >> 1;
458
459 node_page_state_add(v - overstep, pgdat, item);
460 __this_cpu_write(*p, overstep);
461 }
462}
463
464void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
465{
466 __dec_zone_state(page_zone(page), item);
467}
468EXPORT_SYMBOL(__dec_zone_page_state);
469
470void __dec_node_page_state(struct page *page, enum node_stat_item item)
471{
472 __dec_node_state(page_pgdat(page), item);
473}
474EXPORT_SYMBOL(__dec_node_page_state);
475
476#ifdef CONFIG_HAVE_CMPXCHG_LOCAL
477/*
478 * If we have cmpxchg_local support then we do not need to incur the overhead
479 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
480 *
481 * mod_state() modifies the zone counter state through atomic per cpu
482 * operations.
483 *
484 * Overstep mode specifies how overstep should handled:
485 * 0 No overstepping
486 * 1 Overstepping half of threshold
487 * -1 Overstepping minus half of threshold
488*/
489static inline void mod_zone_state(struct zone *zone,
490 enum zone_stat_item item, long delta, int overstep_mode)
491{
492 struct per_cpu_pageset __percpu *pcp = zone->pageset;
493 s8 __percpu *p = pcp->vm_stat_diff + item;
494 long o, n, t, z;
495
496 do {
497 z = 0; /* overflow to zone counters */
498
499 /*
500 * The fetching of the stat_threshold is racy. We may apply
501 * a counter threshold to the wrong the cpu if we get
502 * rescheduled while executing here. However, the next
503 * counter update will apply the threshold again and
504 * therefore bring the counter under the threshold again.
505 *
506 * Most of the time the thresholds are the same anyways
507 * for all cpus in a zone.
508 */
509 t = this_cpu_read(pcp->stat_threshold);
510
511 o = this_cpu_read(*p);
512 n = delta + o;
513
514 if (n > t || n < -t) {
515 int os = overstep_mode * (t >> 1) ;
516
517 /* Overflow must be added to zone counters */
518 z = n + os;
519 n = -os;
520 }
521 } while (this_cpu_cmpxchg(*p, o, n) != o);
522
523 if (z)
524 zone_page_state_add(z, zone, item);
525}
526
527void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
528 long delta)
529{
530 mod_zone_state(zone, item, delta, 0);
531}
532EXPORT_SYMBOL(mod_zone_page_state);
533
534void inc_zone_page_state(struct page *page, enum zone_stat_item item)
535{
536 mod_zone_state(page_zone(page), item, 1, 1);
537}
538EXPORT_SYMBOL(inc_zone_page_state);
539
540void dec_zone_page_state(struct page *page, enum zone_stat_item item)
541{
542 mod_zone_state(page_zone(page), item, -1, -1);
543}
544EXPORT_SYMBOL(dec_zone_page_state);
545
546static inline void mod_node_state(struct pglist_data *pgdat,
547 enum node_stat_item item, int delta, int overstep_mode)
548{
549 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
550 s8 __percpu *p = pcp->vm_node_stat_diff + item;
551 long o, n, t, z;
552
553 if (vmstat_item_in_bytes(item)) {
554 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
555 delta >>= PAGE_SHIFT;
556 }
557
558 do {
559 z = 0; /* overflow to node counters */
560
561 /*
562 * The fetching of the stat_threshold is racy. We may apply
563 * a counter threshold to the wrong the cpu if we get
564 * rescheduled while executing here. However, the next
565 * counter update will apply the threshold again and
566 * therefore bring the counter under the threshold again.
567 *
568 * Most of the time the thresholds are the same anyways
569 * for all cpus in a node.
570 */
571 t = this_cpu_read(pcp->stat_threshold);
572
573 o = this_cpu_read(*p);
574 n = delta + o;
575
576 if (n > t || n < -t) {
577 int os = overstep_mode * (t >> 1) ;
578
579 /* Overflow must be added to node counters */
580 z = n + os;
581 n = -os;
582 }
583 } while (this_cpu_cmpxchg(*p, o, n) != o);
584
585 if (z)
586 node_page_state_add(z, pgdat, item);
587}
588
589void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
590 long delta)
591{
592 mod_node_state(pgdat, item, delta, 0);
593}
594EXPORT_SYMBOL(mod_node_page_state);
595
596void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
597{
598 mod_node_state(pgdat, item, 1, 1);
599}
600
601void inc_node_page_state(struct page *page, enum node_stat_item item)
602{
603 mod_node_state(page_pgdat(page), item, 1, 1);
604}
605EXPORT_SYMBOL(inc_node_page_state);
606
607void dec_node_page_state(struct page *page, enum node_stat_item item)
608{
609 mod_node_state(page_pgdat(page), item, -1, -1);
610}
611EXPORT_SYMBOL(dec_node_page_state);
612#else
613/*
614 * Use interrupt disable to serialize counter updates
615 */
616void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
617 long delta)
618{
619 unsigned long flags;
620
621 local_irq_save(flags);
622 __mod_zone_page_state(zone, item, delta);
623 local_irq_restore(flags);
624}
625EXPORT_SYMBOL(mod_zone_page_state);
626
627void inc_zone_page_state(struct page *page, enum zone_stat_item item)
628{
629 unsigned long flags;
630 struct zone *zone;
631
632 zone = page_zone(page);
633 local_irq_save(flags);
634 __inc_zone_state(zone, item);
635 local_irq_restore(flags);
636}
637EXPORT_SYMBOL(inc_zone_page_state);
638
639void dec_zone_page_state(struct page *page, enum zone_stat_item item)
640{
641 unsigned long flags;
642
643 local_irq_save(flags);
644 __dec_zone_page_state(page, item);
645 local_irq_restore(flags);
646}
647EXPORT_SYMBOL(dec_zone_page_state);
648
649void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
650{
651 unsigned long flags;
652
653 local_irq_save(flags);
654 __inc_node_state(pgdat, item);
655 local_irq_restore(flags);
656}
657EXPORT_SYMBOL(inc_node_state);
658
659void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
660 long delta)
661{
662 unsigned long flags;
663
664 local_irq_save(flags);
665 __mod_node_page_state(pgdat, item, delta);
666 local_irq_restore(flags);
667}
668EXPORT_SYMBOL(mod_node_page_state);
669
670void inc_node_page_state(struct page *page, enum node_stat_item item)
671{
672 unsigned long flags;
673 struct pglist_data *pgdat;
674
675 pgdat = page_pgdat(page);
676 local_irq_save(flags);
677 __inc_node_state(pgdat, item);
678 local_irq_restore(flags);
679}
680EXPORT_SYMBOL(inc_node_page_state);
681
682void dec_node_page_state(struct page *page, enum node_stat_item item)
683{
684 unsigned long flags;
685
686 local_irq_save(flags);
687 __dec_node_page_state(page, item);
688 local_irq_restore(flags);
689}
690EXPORT_SYMBOL(dec_node_page_state);
691#endif
692
693/*
694 * Fold a differential into the global counters.
695 * Returns the number of counters updated.
696 */
697#ifdef CONFIG_NUMA
698static int fold_diff(int *zone_diff, int *numa_diff, int *node_diff)
699{
700 int i;
701 int changes = 0;
702
703 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
704 if (zone_diff[i]) {
705 atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
706 changes++;
707 }
708
709 for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
710 if (numa_diff[i]) {
711 atomic_long_add(numa_diff[i], &vm_numa_stat[i]);
712 changes++;
713 }
714
715 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
716 if (node_diff[i]) {
717 atomic_long_add(node_diff[i], &vm_node_stat[i]);
718 changes++;
719 }
720 return changes;
721}
722#else
723static int fold_diff(int *zone_diff, int *node_diff)
724{
725 int i;
726 int changes = 0;
727
728 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
729 if (zone_diff[i]) {
730 atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
731 changes++;
732 }
733
734 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
735 if (node_diff[i]) {
736 atomic_long_add(node_diff[i], &vm_node_stat[i]);
737 changes++;
738 }
739 return changes;
740}
741#endif /* CONFIG_NUMA */
742
743/*
744 * Update the zone counters for the current cpu.
745 *
746 * Note that refresh_cpu_vm_stats strives to only access
747 * node local memory. The per cpu pagesets on remote zones are placed
748 * in the memory local to the processor using that pageset. So the
749 * loop over all zones will access a series of cachelines local to
750 * the processor.
751 *
752 * The call to zone_page_state_add updates the cachelines with the
753 * statistics in the remote zone struct as well as the global cachelines
754 * with the global counters. These could cause remote node cache line
755 * bouncing and will have to be only done when necessary.
756 *
757 * The function returns the number of global counters updated.
758 */
759static int refresh_cpu_vm_stats(bool do_pagesets)
760{
761 struct pglist_data *pgdat;
762 struct zone *zone;
763 int i;
764 int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
765#ifdef CONFIG_NUMA
766 int global_numa_diff[NR_VM_NUMA_STAT_ITEMS] = { 0, };
767#endif
768 int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
769 int changes = 0;
770
771 for_each_populated_zone(zone) {
772 struct per_cpu_pageset __percpu *p = zone->pageset;
773
774 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
775 int v;
776
777 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
778 if (v) {
779
780 atomic_long_add(v, &zone->vm_stat[i]);
781 global_zone_diff[i] += v;
782#ifdef CONFIG_NUMA
783 /* 3 seconds idle till flush */
784 __this_cpu_write(p->expire, 3);
785#endif
786 }
787 }
788#ifdef CONFIG_NUMA
789 for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) {
790 int v;
791
792 v = this_cpu_xchg(p->vm_numa_stat_diff[i], 0);
793 if (v) {
794
795 atomic_long_add(v, &zone->vm_numa_stat[i]);
796 global_numa_diff[i] += v;
797 __this_cpu_write(p->expire, 3);
798 }
799 }
800
801 if (do_pagesets) {
802 cond_resched();
803 /*
804 * Deal with draining the remote pageset of this
805 * processor
806 *
807 * Check if there are pages remaining in this pageset
808 * if not then there is nothing to expire.
809 */
810 if (!__this_cpu_read(p->expire) ||
811 !__this_cpu_read(p->pcp.count))
812 continue;
813
814 /*
815 * We never drain zones local to this processor.
816 */
817 if (zone_to_nid(zone) == numa_node_id()) {
818 __this_cpu_write(p->expire, 0);
819 continue;
820 }
821
822 if (__this_cpu_dec_return(p->expire))
823 continue;
824
825 if (__this_cpu_read(p->pcp.count)) {
826 drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
827 changes++;
828 }
829 }
830#endif
831 }
832
833 for_each_online_pgdat(pgdat) {
834 struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;
835
836 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
837 int v;
838
839 v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
840 if (v) {
841 atomic_long_add(v, &pgdat->vm_stat[i]);
842 global_node_diff[i] += v;
843 }
844 }
845 }
846
847#ifdef CONFIG_NUMA
848 changes += fold_diff(global_zone_diff, global_numa_diff,
849 global_node_diff);
850#else
851 changes += fold_diff(global_zone_diff, global_node_diff);
852#endif
853 return changes;
854}
855
856/*
857 * Fold the data for an offline cpu into the global array.
858 * There cannot be any access by the offline cpu and therefore
859 * synchronization is simplified.
860 */
861void cpu_vm_stats_fold(int cpu)
862{
863 struct pglist_data *pgdat;
864 struct zone *zone;
865 int i;
866 int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
867#ifdef CONFIG_NUMA
868 int global_numa_diff[NR_VM_NUMA_STAT_ITEMS] = { 0, };
869#endif
870 int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
871
872 for_each_populated_zone(zone) {
873 struct per_cpu_pageset *p;
874
875 p = per_cpu_ptr(zone->pageset, cpu);
876
877 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
878 if (p->vm_stat_diff[i]) {
879 int v;
880
881 v = p->vm_stat_diff[i];
882 p->vm_stat_diff[i] = 0;
883 atomic_long_add(v, &zone->vm_stat[i]);
884 global_zone_diff[i] += v;
885 }
886
887#ifdef CONFIG_NUMA
888 for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
889 if (p->vm_numa_stat_diff[i]) {
890 int v;
891
892 v = p->vm_numa_stat_diff[i];
893 p->vm_numa_stat_diff[i] = 0;
894 atomic_long_add(v, &zone->vm_numa_stat[i]);
895 global_numa_diff[i] += v;
896 }
897#endif
898 }
899
900 for_each_online_pgdat(pgdat) {
901 struct per_cpu_nodestat *p;
902
903 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
904
905 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
906 if (p->vm_node_stat_diff[i]) {
907 int v;
908
909 v = p->vm_node_stat_diff[i];
910 p->vm_node_stat_diff[i] = 0;
911 atomic_long_add(v, &pgdat->vm_stat[i]);
912 global_node_diff[i] += v;
913 }
914 }
915
916#ifdef CONFIG_NUMA
917 fold_diff(global_zone_diff, global_numa_diff, global_node_diff);
918#else
919 fold_diff(global_zone_diff, global_node_diff);
920#endif
921}
922
923/*
924 * this is only called if !populated_zone(zone), which implies no other users of
925 * pset->vm_stat_diff[] exsist.
926 */
927void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
928{
929 int i;
930
931 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
932 if (pset->vm_stat_diff[i]) {
933 int v = pset->vm_stat_diff[i];
934 pset->vm_stat_diff[i] = 0;
935 atomic_long_add(v, &zone->vm_stat[i]);
936 atomic_long_add(v, &vm_zone_stat[i]);
937 }
938
939#ifdef CONFIG_NUMA
940 for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
941 if (pset->vm_numa_stat_diff[i]) {
942 int v = pset->vm_numa_stat_diff[i];
943
944 pset->vm_numa_stat_diff[i] = 0;
945 atomic_long_add(v, &zone->vm_numa_stat[i]);
946 atomic_long_add(v, &vm_numa_stat[i]);
947 }
948#endif
949}
950#endif
951
952#ifdef CONFIG_NUMA
953void __inc_numa_state(struct zone *zone,
954 enum numa_stat_item item)
955{
956 struct per_cpu_pageset __percpu *pcp = zone->pageset;
957 u16 __percpu *p = pcp->vm_numa_stat_diff + item;
958 u16 v;
959
960 v = __this_cpu_inc_return(*p);
961
962 if (unlikely(v > NUMA_STATS_THRESHOLD)) {
963 zone_numa_state_add(v, zone, item);
964 __this_cpu_write(*p, 0);
965 }
966}
967
968/*
969 * Determine the per node value of a stat item. This function
970 * is called frequently in a NUMA machine, so try to be as
971 * frugal as possible.
972 */
973unsigned long sum_zone_node_page_state(int node,
974 enum zone_stat_item item)
975{
976 struct zone *zones = NODE_DATA(node)->node_zones;
977 int i;
978 unsigned long count = 0;
979
980 for (i = 0; i < MAX_NR_ZONES; i++)
981 count += zone_page_state(zones + i, item);
982
983 return count;
984}
985
986/*
987 * Determine the per node value of a numa stat item. To avoid deviation,
988 * the per cpu stat number in vm_numa_stat_diff[] is also included.
989 */
990unsigned long sum_zone_numa_state(int node,
991 enum numa_stat_item item)
992{
993 struct zone *zones = NODE_DATA(node)->node_zones;
994 int i;
995 unsigned long count = 0;
996
997 for (i = 0; i < MAX_NR_ZONES; i++)
998 count += zone_numa_state_snapshot(zones + i, item);
999
1000 return count;
1001}
1002
1003/*
1004 * Determine the per node value of a stat item.
1005 */
1006unsigned long node_page_state_pages(struct pglist_data *pgdat,
1007 enum node_stat_item item)
1008{
1009 long x = atomic_long_read(&pgdat->vm_stat[item]);
1010#ifdef CONFIG_SMP
1011 if (x < 0)
1012 x = 0;
1013#endif
1014 return x;
1015}
1016
1017unsigned long node_page_state(struct pglist_data *pgdat,
1018 enum node_stat_item item)
1019{
1020 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
1021
1022 return node_page_state_pages(pgdat, item);
1023}
1024#endif
1025
1026#ifdef CONFIG_COMPACTION
1027
1028struct contig_page_info {
1029 unsigned long free_pages;
1030 unsigned long free_blocks_total;
1031 unsigned long free_blocks_suitable;
1032};
1033
1034/*
1035 * Calculate the number of free pages in a zone, how many contiguous
1036 * pages are free and how many are large enough to satisfy an allocation of
1037 * the target size. Note that this function makes no attempt to estimate
1038 * how many suitable free blocks there *might* be if MOVABLE pages were
1039 * migrated. Calculating that is possible, but expensive and can be
1040 * figured out from userspace
1041 */
1042static void fill_contig_page_info(struct zone *zone,
1043 unsigned int suitable_order,
1044 struct contig_page_info *info)
1045{
1046 unsigned int order;
1047
1048 info->free_pages = 0;
1049 info->free_blocks_total = 0;
1050 info->free_blocks_suitable = 0;
1051
1052 for (order = 0; order < MAX_ORDER; order++) {
1053 unsigned long blocks;
1054
1055 /* Count number of free blocks */
1056 blocks = zone->free_area[order].nr_free;
1057 info->free_blocks_total += blocks;
1058
1059 /* Count free base pages */
1060 info->free_pages += blocks << order;
1061
1062 /* Count the suitable free blocks */
1063 if (order >= suitable_order)
1064 info->free_blocks_suitable += blocks <<
1065 (order - suitable_order);
1066 }
1067}
1068
1069/*
1070 * A fragmentation index only makes sense if an allocation of a requested
1071 * size would fail. If that is true, the fragmentation index indicates
1072 * whether external fragmentation or a lack of memory was the problem.
1073 * The value can be used to determine if page reclaim or compaction
1074 * should be used
1075 */
1076static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
1077{
1078 unsigned long requested = 1UL << order;
1079
1080 if (WARN_ON_ONCE(order >= MAX_ORDER))
1081 return 0;
1082
1083 if (!info->free_blocks_total)
1084 return 0;
1085
1086 /* Fragmentation index only makes sense when a request would fail */
1087 if (info->free_blocks_suitable)
1088 return -1000;
1089
1090 /*
1091 * Index is between 0 and 1 so return within 3 decimal places
1092 *
1093 * 0 => allocation would fail due to lack of memory
1094 * 1 => allocation would fail due to fragmentation
1095 */
1096 return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
1097}
1098
1099/*
1100 * Calculates external fragmentation within a zone wrt the given order.
1101 * It is defined as the percentage of pages found in blocks of size
1102 * less than 1 << order. It returns values in range [0, 100].
1103 */
1104unsigned int extfrag_for_order(struct zone *zone, unsigned int order)
1105{
1106 struct contig_page_info info;
1107
1108 fill_contig_page_info(zone, order, &info);
1109 if (info.free_pages == 0)
1110 return 0;
1111
1112 return div_u64((info.free_pages -
1113 (info.free_blocks_suitable << order)) * 100,
1114 info.free_pages);
1115}
1116
1117/* Same as __fragmentation index but allocs contig_page_info on stack */
1118int fragmentation_index(struct zone *zone, unsigned int order)
1119{
1120 struct contig_page_info info;
1121
1122 fill_contig_page_info(zone, order, &info);
1123 return __fragmentation_index(order, &info);
1124}
1125#endif
1126
1127#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || \
1128 defined(CONFIG_NUMA) || defined(CONFIG_MEMCG)
1129#ifdef CONFIG_ZONE_DMA
1130#define TEXT_FOR_DMA(xx) xx "_dma",
1131#else
1132#define TEXT_FOR_DMA(xx)
1133#endif
1134
1135#ifdef CONFIG_ZONE_DMA32
1136#define TEXT_FOR_DMA32(xx) xx "_dma32",
1137#else
1138#define TEXT_FOR_DMA32(xx)
1139#endif
1140
1141#ifdef CONFIG_HIGHMEM
1142#define TEXT_FOR_HIGHMEM(xx) xx "_high",
1143#else
1144#define TEXT_FOR_HIGHMEM(xx)
1145#endif
1146
1147#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
1148 TEXT_FOR_HIGHMEM(xx) xx "_movable",
1149
1150const char * const vmstat_text[] = {
1151 /* enum zone_stat_item counters */
1152 "nr_free_pages",
1153 "nr_zone_inactive_anon",
1154 "nr_zone_active_anon",
1155 "nr_zone_inactive_file",
1156 "nr_zone_active_file",
1157 "nr_zone_unevictable",
1158 "nr_zone_write_pending",
1159 "nr_mlock",
1160 "nr_page_table_pages",
1161 "nr_bounce",
1162#if IS_ENABLED(CONFIG_ZSMALLOC)
1163 "nr_zspages",
1164#endif
1165 "nr_free_cma",
1166
1167 /* enum numa_stat_item counters */
1168#ifdef CONFIG_NUMA
1169 "numa_hit",
1170 "numa_miss",
1171 "numa_foreign",
1172 "numa_interleave",
1173 "numa_local",
1174 "numa_other",
1175#endif
1176
1177 /* enum node_stat_item counters */
1178 "nr_inactive_anon",
1179 "nr_active_anon",
1180 "nr_inactive_file",
1181 "nr_active_file",
1182 "nr_unevictable",
1183 "nr_slab_reclaimable",
1184 "nr_slab_unreclaimable",
1185 "nr_isolated_anon",
1186 "nr_isolated_file",
1187 "workingset_nodes",
1188 "workingset_refault_anon",
1189 "workingset_refault_file",
1190 "workingset_activate_anon",
1191 "workingset_activate_file",
1192 "workingset_restore_anon",
1193 "workingset_restore_file",
1194 "workingset_nodereclaim",
1195 "nr_anon_pages",
1196 "nr_mapped",
1197 "nr_file_pages",
1198 "nr_dirty",
1199 "nr_writeback",
1200 "nr_writeback_temp",
1201 "nr_shmem",
1202 "nr_shmem_hugepages",
1203 "nr_shmem_pmdmapped",
1204 "nr_file_hugepages",
1205 "nr_file_pmdmapped",
1206 "nr_anon_transparent_hugepages",
1207 "nr_vmscan_write",
1208 "nr_vmscan_immediate_reclaim",
1209 "nr_dirtied",
1210 "nr_written",
1211 "nr_kernel_misc_reclaimable",
1212 "nr_foll_pin_acquired",
1213 "nr_foll_pin_released",
1214 "nr_kernel_stack",
1215#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
1216 "nr_shadow_call_stack",
1217#endif
1218
1219 /* enum writeback_stat_item counters */
1220 "nr_dirty_threshold",
1221 "nr_dirty_background_threshold",
1222
1223#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
1224 /* enum vm_event_item counters */
1225 "pgpgin",
1226 "pgpgout",
1227 "pswpin",
1228 "pswpout",
1229
1230 TEXTS_FOR_ZONES("pgalloc")
1231 TEXTS_FOR_ZONES("allocstall")
1232 TEXTS_FOR_ZONES("pgskip")
1233
1234 "pgfree",
1235 "pgactivate",
1236 "pgdeactivate",
1237 "pglazyfree",
1238
1239 "pgfault",
1240 "pgmajfault",
1241 "pglazyfreed",
1242
1243 "pgrefill",
1244 "pgreuse",
1245 "pgsteal_kswapd",
1246 "pgsteal_direct",
1247 "pgscan_kswapd",
1248 "pgscan_direct",
1249 "pgscan_direct_throttle",
1250 "pgscan_anon",
1251 "pgscan_file",
1252 "pgsteal_anon",
1253 "pgsteal_file",
1254
1255#ifdef CONFIG_NUMA
1256 "zone_reclaim_failed",
1257#endif
1258 "pginodesteal",
1259 "slabs_scanned",
1260 "kswapd_inodesteal",
1261 "kswapd_low_wmark_hit_quickly",
1262 "kswapd_high_wmark_hit_quickly",
1263 "pageoutrun",
1264
1265 "pgrotated",
1266
1267 "drop_pagecache",
1268 "drop_slab",
1269 "oom_kill",
1270
1271#ifdef CONFIG_NUMA_BALANCING
1272 "numa_pte_updates",
1273 "numa_huge_pte_updates",
1274 "numa_hint_faults",
1275 "numa_hint_faults_local",
1276 "numa_pages_migrated",
1277#endif
1278#ifdef CONFIG_MIGRATION
1279 "pgmigrate_success",
1280 "pgmigrate_fail",
1281 "thp_migration_success",
1282 "thp_migration_fail",
1283 "thp_migration_split",
1284#endif
1285#ifdef CONFIG_COMPACTION
1286 "compact_migrate_scanned",
1287 "compact_free_scanned",
1288 "compact_isolated",
1289 "compact_stall",
1290 "compact_fail",
1291 "compact_success",
1292 "compact_daemon_wake",
1293 "compact_daemon_migrate_scanned",
1294 "compact_daemon_free_scanned",
1295#endif
1296
1297#ifdef CONFIG_HUGETLB_PAGE
1298 "htlb_buddy_alloc_success",
1299 "htlb_buddy_alloc_fail",
1300#endif
1301 "unevictable_pgs_culled",
1302 "unevictable_pgs_scanned",
1303 "unevictable_pgs_rescued",
1304 "unevictable_pgs_mlocked",
1305 "unevictable_pgs_munlocked",
1306 "unevictable_pgs_cleared",
1307 "unevictable_pgs_stranded",
1308
1309#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1310 "thp_fault_alloc",
1311 "thp_fault_fallback",
1312 "thp_fault_fallback_charge",
1313 "thp_collapse_alloc",
1314 "thp_collapse_alloc_failed",
1315 "thp_file_alloc",
1316 "thp_file_fallback",
1317 "thp_file_fallback_charge",
1318 "thp_file_mapped",
1319 "thp_split_page",
1320 "thp_split_page_failed",
1321 "thp_deferred_split_page",
1322 "thp_split_pmd",
1323#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1324 "thp_split_pud",
1325#endif
1326 "thp_zero_page_alloc",
1327 "thp_zero_page_alloc_failed",
1328 "thp_swpout",
1329 "thp_swpout_fallback",
1330#endif
1331#ifdef CONFIG_MEMORY_BALLOON
1332 "balloon_inflate",
1333 "balloon_deflate",
1334#ifdef CONFIG_BALLOON_COMPACTION
1335 "balloon_migrate",
1336#endif
1337#endif /* CONFIG_MEMORY_BALLOON */
1338#ifdef CONFIG_DEBUG_TLBFLUSH
1339 "nr_tlb_remote_flush",
1340 "nr_tlb_remote_flush_received",
1341 "nr_tlb_local_flush_all",
1342 "nr_tlb_local_flush_one",
1343#endif /* CONFIG_DEBUG_TLBFLUSH */
1344
1345#ifdef CONFIG_DEBUG_VM_VMACACHE
1346 "vmacache_find_calls",
1347 "vmacache_find_hits",
1348#endif
1349#ifdef CONFIG_SWAP
1350 "swap_ra",
1351 "swap_ra_hit",
1352#endif
1353#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
1354};
1355#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */
1356
1357#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
1358 defined(CONFIG_PROC_FS)
1359static void *frag_start(struct seq_file *m, loff_t *pos)
1360{
1361 pg_data_t *pgdat;
1362 loff_t node = *pos;
1363
1364 for (pgdat = first_online_pgdat();
1365 pgdat && node;
1366 pgdat = next_online_pgdat(pgdat))
1367 --node;
1368
1369 return pgdat;
1370}
1371
1372static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
1373{
1374 pg_data_t *pgdat = (pg_data_t *)arg;
1375
1376 (*pos)++;
1377 return next_online_pgdat(pgdat);
1378}
1379
1380static void frag_stop(struct seq_file *m, void *arg)
1381{
1382}
1383
1384/*
1385 * Walk zones in a node and print using a callback.
1386 * If @assert_populated is true, only use callback for zones that are populated.
1387 */
1388static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
1389 bool assert_populated, bool nolock,
1390 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
1391{
1392 struct zone *zone;
1393 struct zone *node_zones = pgdat->node_zones;
1394 unsigned long flags;
1395
1396 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
1397 if (assert_populated && !populated_zone(zone))
1398 continue;
1399
1400 if (!nolock)
1401 spin_lock_irqsave(&zone->lock, flags);
1402 print(m, pgdat, zone);
1403 if (!nolock)
1404 spin_unlock_irqrestore(&zone->lock, flags);
1405 }
1406}
1407#endif
1408
1409#ifdef CONFIG_PROC_FS
1410static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
1411 struct zone *zone)
1412{
1413 int order;
1414
1415 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1416 for (order = 0; order < MAX_ORDER; ++order)
1417 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
1418 seq_putc(m, '\n');
1419}
1420
1421/*
1422 * This walks the free areas for each zone.
1423 */
1424static int frag_show(struct seq_file *m, void *arg)
1425{
1426 pg_data_t *pgdat = (pg_data_t *)arg;
1427 walk_zones_in_node(m, pgdat, true, false, frag_show_print);
1428 return 0;
1429}
1430
1431static void pagetypeinfo_showfree_print(struct seq_file *m,
1432 pg_data_t *pgdat, struct zone *zone)
1433{
1434 int order, mtype;
1435
1436 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
1437 seq_printf(m, "Node %4d, zone %8s, type %12s ",
1438 pgdat->node_id,
1439 zone->name,
1440 migratetype_names[mtype]);
1441 for (order = 0; order < MAX_ORDER; ++order) {
1442 unsigned long freecount = 0;
1443 struct free_area *area;
1444 struct list_head *curr;
1445 bool overflow = false;
1446
1447 area = &(zone->free_area[order]);
1448
1449 list_for_each(curr, &area->free_list[mtype]) {
1450 /*
1451 * Cap the free_list iteration because it might
1452 * be really large and we are under a spinlock
1453 * so a long time spent here could trigger a
1454 * hard lockup detector. Anyway this is a
1455 * debugging tool so knowing there is a handful
1456 * of pages of this order should be more than
1457 * sufficient.
1458 */
1459 if (++freecount >= 100000) {
1460 overflow = true;
1461 break;
1462 }
1463 }
1464 seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount);
1465 spin_unlock_irq(&zone->lock);
1466 cond_resched();
1467 spin_lock_irq(&zone->lock);
1468 }
1469 seq_putc(m, '\n');
1470 }
1471}
1472
1473/* Print out the free pages at each order for each migatetype */
1474static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
1475{
1476 int order;
1477 pg_data_t *pgdat = (pg_data_t *)arg;
1478
1479 /* Print header */
1480 seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
1481 for (order = 0; order < MAX_ORDER; ++order)
1482 seq_printf(m, "%6d ", order);
1483 seq_putc(m, '\n');
1484
1485 walk_zones_in_node(m, pgdat, true, false, pagetypeinfo_showfree_print);
1486
1487 return 0;
1488}
1489
1490static void pagetypeinfo_showblockcount_print(struct seq_file *m,
1491 pg_data_t *pgdat, struct zone *zone)
1492{
1493 int mtype;
1494 unsigned long pfn;
1495 unsigned long start_pfn = zone->zone_start_pfn;
1496 unsigned long end_pfn = zone_end_pfn(zone);
1497 unsigned long count[MIGRATE_TYPES] = { 0, };
1498
1499 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
1500 struct page *page;
1501
1502 page = pfn_to_online_page(pfn);
1503 if (!page)
1504 continue;
1505
1506 /* Watch for unexpected holes punched in the memmap */
1507 if (!memmap_valid_within(pfn, page, zone))
1508 continue;
1509
1510 if (page_zone(page) != zone)
1511 continue;
1512
1513 mtype = get_pageblock_migratetype(page);
1514
1515 if (mtype < MIGRATE_TYPES)
1516 count[mtype]++;
1517 }
1518
1519 /* Print counts */
1520 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1521 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1522 seq_printf(m, "%12lu ", count[mtype]);
1523 seq_putc(m, '\n');
1524}
1525
1526/* Print out the number of pageblocks for each migratetype */
1527static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1528{
1529 int mtype;
1530 pg_data_t *pgdat = (pg_data_t *)arg;
1531
1532 seq_printf(m, "\n%-23s", "Number of blocks type ");
1533 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1534 seq_printf(m, "%12s ", migratetype_names[mtype]);
1535 seq_putc(m, '\n');
1536 walk_zones_in_node(m, pgdat, true, false,
1537 pagetypeinfo_showblockcount_print);
1538
1539 return 0;
1540}
1541
1542/*
1543 * Print out the number of pageblocks for each migratetype that contain pages
1544 * of other types. This gives an indication of how well fallbacks are being
1545 * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1546 * to determine what is going on
1547 */
1548static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1549{
1550#ifdef CONFIG_PAGE_OWNER
1551 int mtype;
1552
1553 if (!static_branch_unlikely(&page_owner_inited))
1554 return;
1555
1556 drain_all_pages(NULL);
1557
1558 seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1559 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1560 seq_printf(m, "%12s ", migratetype_names[mtype]);
1561 seq_putc(m, '\n');
1562
1563 walk_zones_in_node(m, pgdat, true, true,
1564 pagetypeinfo_showmixedcount_print);
1565#endif /* CONFIG_PAGE_OWNER */
1566}
1567
1568/*
1569 * This prints out statistics in relation to grouping pages by mobility.
1570 * It is expensive to collect so do not constantly read the file.
1571 */
1572static int pagetypeinfo_show(struct seq_file *m, void *arg)
1573{
1574 pg_data_t *pgdat = (pg_data_t *)arg;
1575
1576 /* check memoryless node */
1577 if (!node_state(pgdat->node_id, N_MEMORY))
1578 return 0;
1579
1580 seq_printf(m, "Page block order: %d\n", pageblock_order);
1581 seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages);
1582 seq_putc(m, '\n');
1583 pagetypeinfo_showfree(m, pgdat);
1584 pagetypeinfo_showblockcount(m, pgdat);
1585 pagetypeinfo_showmixedcount(m, pgdat);
1586
1587 return 0;
1588}
1589
1590static const struct seq_operations fragmentation_op = {
1591 .start = frag_start,
1592 .next = frag_next,
1593 .stop = frag_stop,
1594 .show = frag_show,
1595};
1596
1597static const struct seq_operations pagetypeinfo_op = {
1598 .start = frag_start,
1599 .next = frag_next,
1600 .stop = frag_stop,
1601 .show = pagetypeinfo_show,
1602};
1603
1604static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
1605{
1606 int zid;
1607
1608 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1609 struct zone *compare = &pgdat->node_zones[zid];
1610
1611 if (populated_zone(compare))
1612 return zone == compare;
1613 }
1614
1615 return false;
1616}
1617
1618static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1619 struct zone *zone)
1620{
1621 int i;
1622 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1623 if (is_zone_first_populated(pgdat, zone)) {
1624 seq_printf(m, "\n per-node stats");
1625 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1626 seq_printf(m, "\n %-12s %lu", node_stat_name(i),
1627 node_page_state_pages(pgdat, i));
1628 }
1629 }
1630 seq_printf(m,
1631 "\n pages free %lu"
1632 "\n min %lu"
1633 "\n low %lu"
1634 "\n high %lu"
1635 "\n spanned %lu"
1636 "\n present %lu"
1637 "\n managed %lu",
1638 zone_page_state(zone, NR_FREE_PAGES),
1639 min_wmark_pages(zone),
1640 low_wmark_pages(zone),
1641 high_wmark_pages(zone),
1642 zone->spanned_pages,
1643 zone->present_pages,
1644 zone_managed_pages(zone));
1645
1646 seq_printf(m,
1647 "\n protection: (%ld",
1648 zone->lowmem_reserve[0]);
1649 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1650 seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1651 seq_putc(m, ')');
1652
1653 /* If unpopulated, no other information is useful */
1654 if (!populated_zone(zone)) {
1655 seq_putc(m, '\n');
1656 return;
1657 }
1658
1659 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1660 seq_printf(m, "\n %-12s %lu", zone_stat_name(i),
1661 zone_page_state(zone, i));
1662
1663#ifdef CONFIG_NUMA
1664 for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
1665 seq_printf(m, "\n %-12s %lu", numa_stat_name(i),
1666 zone_numa_state_snapshot(zone, i));
1667#endif
1668
1669 seq_printf(m, "\n pagesets");
1670 for_each_online_cpu(i) {
1671 struct per_cpu_pageset *pageset;
1672
1673 pageset = per_cpu_ptr(zone->pageset, i);
1674 seq_printf(m,
1675 "\n cpu: %i"
1676 "\n count: %i"
1677 "\n high: %i"
1678 "\n batch: %i",
1679 i,
1680 pageset->pcp.count,
1681 pageset->pcp.high,
1682 pageset->pcp.batch);
1683#ifdef CONFIG_SMP
1684 seq_printf(m, "\n vm stats threshold: %d",
1685 pageset->stat_threshold);
1686#endif
1687 }
1688 seq_printf(m,
1689 "\n node_unreclaimable: %u"
1690 "\n start_pfn: %lu",
1691 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES,
1692 zone->zone_start_pfn);
1693 seq_putc(m, '\n');
1694}
1695
1696/*
1697 * Output information about zones in @pgdat. All zones are printed regardless
1698 * of whether they are populated or not: lowmem_reserve_ratio operates on the
1699 * set of all zones and userspace would not be aware of such zones if they are
1700 * suppressed here (zoneinfo displays the effect of lowmem_reserve_ratio).
1701 */
1702static int zoneinfo_show(struct seq_file *m, void *arg)
1703{
1704 pg_data_t *pgdat = (pg_data_t *)arg;
1705 walk_zones_in_node(m, pgdat, false, false, zoneinfo_show_print);
1706 return 0;
1707}
1708
1709static const struct seq_operations zoneinfo_op = {
1710 .start = frag_start, /* iterate over all zones. The same as in
1711 * fragmentation. */
1712 .next = frag_next,
1713 .stop = frag_stop,
1714 .show = zoneinfo_show,
1715};
1716
1717#define NR_VMSTAT_ITEMS (NR_VM_ZONE_STAT_ITEMS + \
1718 NR_VM_NUMA_STAT_ITEMS + \
1719 NR_VM_NODE_STAT_ITEMS + \
1720 NR_VM_WRITEBACK_STAT_ITEMS + \
1721 (IS_ENABLED(CONFIG_VM_EVENT_COUNTERS) ? \
1722 NR_VM_EVENT_ITEMS : 0))
1723
1724static void *vmstat_start(struct seq_file *m, loff_t *pos)
1725{
1726 unsigned long *v;
1727 int i;
1728
1729 if (*pos >= NR_VMSTAT_ITEMS)
1730 return NULL;
1731
1732 BUILD_BUG_ON(ARRAY_SIZE(vmstat_text) < NR_VMSTAT_ITEMS);
1733 v = kmalloc_array(NR_VMSTAT_ITEMS, sizeof(unsigned long), GFP_KERNEL);
1734 m->private = v;
1735 if (!v)
1736 return ERR_PTR(-ENOMEM);
1737 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1738 v[i] = global_zone_page_state(i);
1739 v += NR_VM_ZONE_STAT_ITEMS;
1740
1741#ifdef CONFIG_NUMA
1742 for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
1743 v[i] = global_numa_state(i);
1744 v += NR_VM_NUMA_STAT_ITEMS;
1745#endif
1746
1747 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
1748 v[i] = global_node_page_state_pages(i);
1749 v += NR_VM_NODE_STAT_ITEMS;
1750
1751 global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1752 v + NR_DIRTY_THRESHOLD);
1753 v += NR_VM_WRITEBACK_STAT_ITEMS;
1754
1755#ifdef CONFIG_VM_EVENT_COUNTERS
1756 all_vm_events(v);
1757 v[PGPGIN] /= 2; /* sectors -> kbytes */
1758 v[PGPGOUT] /= 2;
1759#endif
1760 return (unsigned long *)m->private + *pos;
1761}
1762
1763static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1764{
1765 (*pos)++;
1766 if (*pos >= NR_VMSTAT_ITEMS)
1767 return NULL;
1768 return (unsigned long *)m->private + *pos;
1769}
1770
1771static int vmstat_show(struct seq_file *m, void *arg)
1772{
1773 unsigned long *l = arg;
1774 unsigned long off = l - (unsigned long *)m->private;
1775
1776 seq_puts(m, vmstat_text[off]);
1777 seq_put_decimal_ull(m, " ", *l);
1778 seq_putc(m, '\n');
1779
1780 if (off == NR_VMSTAT_ITEMS - 1) {
1781 /*
1782 * We've come to the end - add any deprecated counters to avoid
1783 * breaking userspace which might depend on them being present.
1784 */
1785 seq_puts(m, "nr_unstable 0\n");
1786 }
1787 return 0;
1788}
1789
1790static void vmstat_stop(struct seq_file *m, void *arg)
1791{
1792 kfree(m->private);
1793 m->private = NULL;
1794}
1795
1796static const struct seq_operations vmstat_op = {
1797 .start = vmstat_start,
1798 .next = vmstat_next,
1799 .stop = vmstat_stop,
1800 .show = vmstat_show,
1801};
1802#endif /* CONFIG_PROC_FS */
1803
1804#ifdef CONFIG_SMP
1805static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1806int sysctl_stat_interval __read_mostly = HZ;
1807
1808#ifdef CONFIG_PROC_FS
1809static void refresh_vm_stats(struct work_struct *work)
1810{
1811 refresh_cpu_vm_stats(true);
1812}
1813
1814int vmstat_refresh(struct ctl_table *table, int write,
1815 void *buffer, size_t *lenp, loff_t *ppos)
1816{
1817 long val;
1818 int err;
1819 int i;
1820
1821 /*
1822 * The regular update, every sysctl_stat_interval, may come later
1823 * than expected: leaving a significant amount in per_cpu buckets.
1824 * This is particularly misleading when checking a quantity of HUGE
1825 * pages, immediately after running a test. /proc/sys/vm/stat_refresh,
1826 * which can equally be echo'ed to or cat'ted from (by root),
1827 * can be used to update the stats just before reading them.
1828 *
1829 * Oh, and since global_zone_page_state() etc. are so careful to hide
1830 * transiently negative values, report an error here if any of
1831 * the stats is negative, so we know to go looking for imbalance.
1832 */
1833 err = schedule_on_each_cpu(refresh_vm_stats);
1834 if (err)
1835 return err;
1836 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
1837 val = atomic_long_read(&vm_zone_stat[i]);
1838 if (val < 0) {
1839 pr_warn("%s: %s %ld\n",
1840 __func__, zone_stat_name(i), val);
1841 err = -EINVAL;
1842 }
1843 }
1844#ifdef CONFIG_NUMA
1845 for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) {
1846 val = atomic_long_read(&vm_numa_stat[i]);
1847 if (val < 0) {
1848 pr_warn("%s: %s %ld\n",
1849 __func__, numa_stat_name(i), val);
1850 err = -EINVAL;
1851 }
1852 }
1853#endif
1854 if (err)
1855 return err;
1856 if (write)
1857 *ppos += *lenp;
1858 else
1859 *lenp = 0;
1860 return 0;
1861}
1862#endif /* CONFIG_PROC_FS */
1863
1864static void vmstat_update(struct work_struct *w)
1865{
1866 if (refresh_cpu_vm_stats(true)) {
1867 /*
1868 * Counters were updated so we expect more updates
1869 * to occur in the future. Keep on running the
1870 * update worker thread.
1871 */
1872 queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
1873 this_cpu_ptr(&vmstat_work),
1874 round_jiffies_relative(sysctl_stat_interval));
1875 }
1876}
1877
1878/*
1879 * Switch off vmstat processing and then fold all the remaining differentials
1880 * until the diffs stay at zero. The function is used by NOHZ and can only be
1881 * invoked when tick processing is not active.
1882 */
1883/*
1884 * Check if the diffs for a certain cpu indicate that
1885 * an update is needed.
1886 */
1887static bool need_update(int cpu)
1888{
1889 struct zone *zone;
1890
1891 for_each_populated_zone(zone) {
1892 struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
1893
1894 BUILD_BUG_ON(sizeof(p->vm_stat_diff[0]) != 1);
1895#ifdef CONFIG_NUMA
1896 BUILD_BUG_ON(sizeof(p->vm_numa_stat_diff[0]) != 2);
1897#endif
1898
1899 /*
1900 * The fast way of checking if there are any vmstat diffs.
1901 */
1902 if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS *
1903 sizeof(p->vm_stat_diff[0])))
1904 return true;
1905#ifdef CONFIG_NUMA
1906 if (memchr_inv(p->vm_numa_stat_diff, 0, NR_VM_NUMA_STAT_ITEMS *
1907 sizeof(p->vm_numa_stat_diff[0])))
1908 return true;
1909#endif
1910 }
1911 return false;
1912}
1913
1914/*
1915 * Switch off vmstat processing and then fold all the remaining differentials
1916 * until the diffs stay at zero. The function is used by NOHZ and can only be
1917 * invoked when tick processing is not active.
1918 */
1919void quiet_vmstat(void)
1920{
1921 if (system_state != SYSTEM_RUNNING)
1922 return;
1923
1924 if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
1925 return;
1926
1927 if (!need_update(smp_processor_id()))
1928 return;
1929
1930 /*
1931 * Just refresh counters and do not care about the pending delayed
1932 * vmstat_update. It doesn't fire that often to matter and canceling
1933 * it would be too expensive from this path.
1934 * vmstat_shepherd will take care about that for us.
1935 */
1936 refresh_cpu_vm_stats(false);
1937}
1938
1939/*
1940 * Shepherd worker thread that checks the
1941 * differentials of processors that have their worker
1942 * threads for vm statistics updates disabled because of
1943 * inactivity.
1944 */
1945static void vmstat_shepherd(struct work_struct *w);
1946
1947static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
1948
1949static void vmstat_shepherd(struct work_struct *w)
1950{
1951 int cpu;
1952
1953 get_online_cpus();
1954 /* Check processors whose vmstat worker threads have been disabled */
1955 for_each_online_cpu(cpu) {
1956 struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
1957
1958 if (!delayed_work_pending(dw) && need_update(cpu))
1959 queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
1960 }
1961 put_online_cpus();
1962
1963 schedule_delayed_work(&shepherd,
1964 round_jiffies_relative(sysctl_stat_interval));
1965}
1966
1967static void __init start_shepherd_timer(void)
1968{
1969 int cpu;
1970
1971 for_each_possible_cpu(cpu)
1972 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
1973 vmstat_update);
1974
1975 schedule_delayed_work(&shepherd,
1976 round_jiffies_relative(sysctl_stat_interval));
1977}
1978
1979static void __init init_cpu_node_state(void)
1980{
1981 int node;
1982
1983 for_each_online_node(node) {
1984 if (cpumask_weight(cpumask_of_node(node)) > 0)
1985 node_set_state(node, N_CPU);
1986 }
1987}
1988
1989static int vmstat_cpu_online(unsigned int cpu)
1990{
1991 refresh_zone_stat_thresholds();
1992 node_set_state(cpu_to_node(cpu), N_CPU);
1993 return 0;
1994}
1995
1996static int vmstat_cpu_down_prep(unsigned int cpu)
1997{
1998 cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
1999 return 0;
2000}
2001
2002static int vmstat_cpu_dead(unsigned int cpu)
2003{
2004 const struct cpumask *node_cpus;
2005 int node;
2006
2007 node = cpu_to_node(cpu);
2008
2009 refresh_zone_stat_thresholds();
2010 node_cpus = cpumask_of_node(node);
2011 if (cpumask_weight(node_cpus) > 0)
2012 return 0;
2013
2014 node_clear_state(node, N_CPU);
2015 return 0;
2016}
2017
2018#endif
2019
2020struct workqueue_struct *mm_percpu_wq;
2021
2022void __init init_mm_internals(void)
2023{
2024 int ret __maybe_unused;
2025
2026 mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0);
2027
2028#ifdef CONFIG_SMP
2029 ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
2030 NULL, vmstat_cpu_dead);
2031 if (ret < 0)
2032 pr_err("vmstat: failed to register 'dead' hotplug state\n");
2033
2034 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online",
2035 vmstat_cpu_online,
2036 vmstat_cpu_down_prep);
2037 if (ret < 0)
2038 pr_err("vmstat: failed to register 'online' hotplug state\n");
2039
2040 get_online_cpus();
2041 init_cpu_node_state();
2042 put_online_cpus();
2043
2044 start_shepherd_timer();
2045#endif
2046#ifdef CONFIG_PROC_FS
2047 proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op);
2048 proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op);
2049 proc_create_seq("vmstat", 0444, NULL, &vmstat_op);
2050 proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op);
2051#endif
2052}
2053
2054#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
2055
2056/*
2057 * Return an index indicating how much of the available free memory is
2058 * unusable for an allocation of the requested size.
2059 */
2060static int unusable_free_index(unsigned int order,
2061 struct contig_page_info *info)
2062{
2063 /* No free memory is interpreted as all free memory is unusable */
2064 if (info->free_pages == 0)
2065 return 1000;
2066
2067 /*
2068 * Index should be a value between 0 and 1. Return a value to 3
2069 * decimal places.
2070 *
2071 * 0 => no fragmentation
2072 * 1 => high fragmentation
2073 */
2074 return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
2075
2076}
2077
2078static void unusable_show_print(struct seq_file *m,
2079 pg_data_t *pgdat, struct zone *zone)
2080{
2081 unsigned int order;
2082 int index;
2083 struct contig_page_info info;
2084
2085 seq_printf(m, "Node %d, zone %8s ",
2086 pgdat->node_id,
2087 zone->name);
2088 for (order = 0; order < MAX_ORDER; ++order) {
2089 fill_contig_page_info(zone, order, &info);
2090 index = unusable_free_index(order, &info);
2091 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2092 }
2093
2094 seq_putc(m, '\n');
2095}
2096
2097/*
2098 * Display unusable free space index
2099 *
2100 * The unusable free space index measures how much of the available free
2101 * memory cannot be used to satisfy an allocation of a given size and is a
2102 * value between 0 and 1. The higher the value, the more of free memory is
2103 * unusable and by implication, the worse the external fragmentation is. This
2104 * can be expressed as a percentage by multiplying by 100.
2105 */
2106static int unusable_show(struct seq_file *m, void *arg)
2107{
2108 pg_data_t *pgdat = (pg_data_t *)arg;
2109
2110 /* check memoryless node */
2111 if (!node_state(pgdat->node_id, N_MEMORY))
2112 return 0;
2113
2114 walk_zones_in_node(m, pgdat, true, false, unusable_show_print);
2115
2116 return 0;
2117}
2118
2119static const struct seq_operations unusable_sops = {
2120 .start = frag_start,
2121 .next = frag_next,
2122 .stop = frag_stop,
2123 .show = unusable_show,
2124};
2125
2126DEFINE_SEQ_ATTRIBUTE(unusable);
2127
2128static void extfrag_show_print(struct seq_file *m,
2129 pg_data_t *pgdat, struct zone *zone)
2130{
2131 unsigned int order;
2132 int index;
2133
2134 /* Alloc on stack as interrupts are disabled for zone walk */
2135 struct contig_page_info info;
2136
2137 seq_printf(m, "Node %d, zone %8s ",
2138 pgdat->node_id,
2139 zone->name);
2140 for (order = 0; order < MAX_ORDER; ++order) {
2141 fill_contig_page_info(zone, order, &info);
2142 index = __fragmentation_index(order, &info);
2143 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2144 }
2145
2146 seq_putc(m, '\n');
2147}
2148
2149/*
2150 * Display fragmentation index for orders that allocations would fail for
2151 */
2152static int extfrag_show(struct seq_file *m, void *arg)
2153{
2154 pg_data_t *pgdat = (pg_data_t *)arg;
2155
2156 walk_zones_in_node(m, pgdat, true, false, extfrag_show_print);
2157
2158 return 0;
2159}
2160
2161static const struct seq_operations extfrag_sops = {
2162 .start = frag_start,
2163 .next = frag_next,
2164 .stop = frag_stop,
2165 .show = extfrag_show,
2166};
2167
2168DEFINE_SEQ_ATTRIBUTE(extfrag);
2169
2170static int __init extfrag_debug_init(void)
2171{
2172 struct dentry *extfrag_debug_root;
2173
2174 extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
2175
2176 debugfs_create_file("unusable_index", 0444, extfrag_debug_root, NULL,
2177 &unusable_fops);
2178
2179 debugfs_create_file("extfrag_index", 0444, extfrag_debug_root, NULL,
2180 &extfrag_fops);
2181
2182 return 0;
2183}
2184
2185module_init(extfrag_debug_init);
2186#endif
1/*
2 * linux/mm/vmstat.c
3 *
4 * Manages VM statistics
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 *
7 * zoned VM statistics
8 * Copyright (C) 2006 Silicon Graphics, Inc.,
9 * Christoph Lameter <christoph@lameter.com>
10 * Copyright (C) 2008-2014 Christoph Lameter
11 */
12#include <linux/fs.h>
13#include <linux/mm.h>
14#include <linux/err.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/cpu.h>
18#include <linux/cpumask.h>
19#include <linux/vmstat.h>
20#include <linux/proc_fs.h>
21#include <linux/seq_file.h>
22#include <linux/debugfs.h>
23#include <linux/sched.h>
24#include <linux/math64.h>
25#include <linux/writeback.h>
26#include <linux/compaction.h>
27#include <linux/mm_inline.h>
28#include <linux/page_ext.h>
29#include <linux/page_owner.h>
30
31#include "internal.h"
32
33#ifdef CONFIG_VM_EVENT_COUNTERS
34DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
35EXPORT_PER_CPU_SYMBOL(vm_event_states);
36
37static void sum_vm_events(unsigned long *ret)
38{
39 int cpu;
40 int i;
41
42 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
43
44 for_each_online_cpu(cpu) {
45 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
46
47 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
48 ret[i] += this->event[i];
49 }
50}
51
52/*
53 * Accumulate the vm event counters across all CPUs.
54 * The result is unavoidably approximate - it can change
55 * during and after execution of this function.
56*/
57void all_vm_events(unsigned long *ret)
58{
59 get_online_cpus();
60 sum_vm_events(ret);
61 put_online_cpus();
62}
63EXPORT_SYMBOL_GPL(all_vm_events);
64
65/*
66 * Fold the foreign cpu events into our own.
67 *
68 * This is adding to the events on one processor
69 * but keeps the global counts constant.
70 */
71void vm_events_fold_cpu(int cpu)
72{
73 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
74 int i;
75
76 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
77 count_vm_events(i, fold_state->event[i]);
78 fold_state->event[i] = 0;
79 }
80}
81
82#endif /* CONFIG_VM_EVENT_COUNTERS */
83
84/*
85 * Manage combined zone based / global counters
86 *
87 * vm_stat contains the global counters
88 */
89atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
90EXPORT_SYMBOL(vm_stat);
91
92#ifdef CONFIG_SMP
93
94int calculate_pressure_threshold(struct zone *zone)
95{
96 int threshold;
97 int watermark_distance;
98
99 /*
100 * As vmstats are not up to date, there is drift between the estimated
101 * and real values. For high thresholds and a high number of CPUs, it
102 * is possible for the min watermark to be breached while the estimated
103 * value looks fine. The pressure threshold is a reduced value such
104 * that even the maximum amount of drift will not accidentally breach
105 * the min watermark
106 */
107 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
108 threshold = max(1, (int)(watermark_distance / num_online_cpus()));
109
110 /*
111 * Maximum threshold is 125
112 */
113 threshold = min(125, threshold);
114
115 return threshold;
116}
117
118int calculate_normal_threshold(struct zone *zone)
119{
120 int threshold;
121 int mem; /* memory in 128 MB units */
122
123 /*
124 * The threshold scales with the number of processors and the amount
125 * of memory per zone. More memory means that we can defer updates for
126 * longer, more processors could lead to more contention.
127 * fls() is used to have a cheap way of logarithmic scaling.
128 *
129 * Some sample thresholds:
130 *
131 * Threshold Processors (fls) Zonesize fls(mem+1)
132 * ------------------------------------------------------------------
133 * 8 1 1 0.9-1 GB 4
134 * 16 2 2 0.9-1 GB 4
135 * 20 2 2 1-2 GB 5
136 * 24 2 2 2-4 GB 6
137 * 28 2 2 4-8 GB 7
138 * 32 2 2 8-16 GB 8
139 * 4 2 2 <128M 1
140 * 30 4 3 2-4 GB 5
141 * 48 4 3 8-16 GB 8
142 * 32 8 4 1-2 GB 4
143 * 32 8 4 0.9-1GB 4
144 * 10 16 5 <128M 1
145 * 40 16 5 900M 4
146 * 70 64 7 2-4 GB 5
147 * 84 64 7 4-8 GB 6
148 * 108 512 9 4-8 GB 6
149 * 125 1024 10 8-16 GB 8
150 * 125 1024 10 16-32 GB 9
151 */
152
153 mem = zone->managed_pages >> (27 - PAGE_SHIFT);
154
155 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
156
157 /*
158 * Maximum threshold is 125
159 */
160 threshold = min(125, threshold);
161
162 return threshold;
163}
164
165/*
166 * Refresh the thresholds for each zone.
167 */
168void refresh_zone_stat_thresholds(void)
169{
170 struct zone *zone;
171 int cpu;
172 int threshold;
173
174 for_each_populated_zone(zone) {
175 unsigned long max_drift, tolerate_drift;
176
177 threshold = calculate_normal_threshold(zone);
178
179 for_each_online_cpu(cpu)
180 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
181 = threshold;
182
183 /*
184 * Only set percpu_drift_mark if there is a danger that
185 * NR_FREE_PAGES reports the low watermark is ok when in fact
186 * the min watermark could be breached by an allocation
187 */
188 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
189 max_drift = num_online_cpus() * threshold;
190 if (max_drift > tolerate_drift)
191 zone->percpu_drift_mark = high_wmark_pages(zone) +
192 max_drift;
193 }
194}
195
196void set_pgdat_percpu_threshold(pg_data_t *pgdat,
197 int (*calculate_pressure)(struct zone *))
198{
199 struct zone *zone;
200 int cpu;
201 int threshold;
202 int i;
203
204 for (i = 0; i < pgdat->nr_zones; i++) {
205 zone = &pgdat->node_zones[i];
206 if (!zone->percpu_drift_mark)
207 continue;
208
209 threshold = (*calculate_pressure)(zone);
210 for_each_online_cpu(cpu)
211 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
212 = threshold;
213 }
214}
215
216/*
217 * For use when we know that interrupts are disabled,
218 * or when we know that preemption is disabled and that
219 * particular counter cannot be updated from interrupt context.
220 */
221void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
222 long delta)
223{
224 struct per_cpu_pageset __percpu *pcp = zone->pageset;
225 s8 __percpu *p = pcp->vm_stat_diff + item;
226 long x;
227 long t;
228
229 x = delta + __this_cpu_read(*p);
230
231 t = __this_cpu_read(pcp->stat_threshold);
232
233 if (unlikely(x > t || x < -t)) {
234 zone_page_state_add(x, zone, item);
235 x = 0;
236 }
237 __this_cpu_write(*p, x);
238}
239EXPORT_SYMBOL(__mod_zone_page_state);
240
241/*
242 * Optimized increment and decrement functions.
243 *
244 * These are only for a single page and therefore can take a struct page *
245 * argument instead of struct zone *. This allows the inclusion of the code
246 * generated for page_zone(page) into the optimized functions.
247 *
248 * No overflow check is necessary and therefore the differential can be
249 * incremented or decremented in place which may allow the compilers to
250 * generate better code.
251 * The increment or decrement is known and therefore one boundary check can
252 * be omitted.
253 *
254 * NOTE: These functions are very performance sensitive. Change only
255 * with care.
256 *
257 * Some processors have inc/dec instructions that are atomic vs an interrupt.
258 * However, the code must first determine the differential location in a zone
259 * based on the processor number and then inc/dec the counter. There is no
260 * guarantee without disabling preemption that the processor will not change
261 * in between and therefore the atomicity vs. interrupt cannot be exploited
262 * in a useful way here.
263 */
264void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
265{
266 struct per_cpu_pageset __percpu *pcp = zone->pageset;
267 s8 __percpu *p = pcp->vm_stat_diff + item;
268 s8 v, t;
269
270 v = __this_cpu_inc_return(*p);
271 t = __this_cpu_read(pcp->stat_threshold);
272 if (unlikely(v > t)) {
273 s8 overstep = t >> 1;
274
275 zone_page_state_add(v + overstep, zone, item);
276 __this_cpu_write(*p, -overstep);
277 }
278}
279
280void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
281{
282 __inc_zone_state(page_zone(page), item);
283}
284EXPORT_SYMBOL(__inc_zone_page_state);
285
286void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
287{
288 struct per_cpu_pageset __percpu *pcp = zone->pageset;
289 s8 __percpu *p = pcp->vm_stat_diff + item;
290 s8 v, t;
291
292 v = __this_cpu_dec_return(*p);
293 t = __this_cpu_read(pcp->stat_threshold);
294 if (unlikely(v < - t)) {
295 s8 overstep = t >> 1;
296
297 zone_page_state_add(v - overstep, zone, item);
298 __this_cpu_write(*p, overstep);
299 }
300}
301
302void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
303{
304 __dec_zone_state(page_zone(page), item);
305}
306EXPORT_SYMBOL(__dec_zone_page_state);
307
308#ifdef CONFIG_HAVE_CMPXCHG_LOCAL
309/*
310 * If we have cmpxchg_local support then we do not need to incur the overhead
311 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
312 *
313 * mod_state() modifies the zone counter state through atomic per cpu
314 * operations.
315 *
316 * Overstep mode specifies how overstep should handled:
317 * 0 No overstepping
318 * 1 Overstepping half of threshold
319 * -1 Overstepping minus half of threshold
320*/
321static inline void mod_state(struct zone *zone, enum zone_stat_item item,
322 long delta, int overstep_mode)
323{
324 struct per_cpu_pageset __percpu *pcp = zone->pageset;
325 s8 __percpu *p = pcp->vm_stat_diff + item;
326 long o, n, t, z;
327
328 do {
329 z = 0; /* overflow to zone counters */
330
331 /*
332 * The fetching of the stat_threshold is racy. We may apply
333 * a counter threshold to the wrong the cpu if we get
334 * rescheduled while executing here. However, the next
335 * counter update will apply the threshold again and
336 * therefore bring the counter under the threshold again.
337 *
338 * Most of the time the thresholds are the same anyways
339 * for all cpus in a zone.
340 */
341 t = this_cpu_read(pcp->stat_threshold);
342
343 o = this_cpu_read(*p);
344 n = delta + o;
345
346 if (n > t || n < -t) {
347 int os = overstep_mode * (t >> 1) ;
348
349 /* Overflow must be added to zone counters */
350 z = n + os;
351 n = -os;
352 }
353 } while (this_cpu_cmpxchg(*p, o, n) != o);
354
355 if (z)
356 zone_page_state_add(z, zone, item);
357}
358
359void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
360 long delta)
361{
362 mod_state(zone, item, delta, 0);
363}
364EXPORT_SYMBOL(mod_zone_page_state);
365
366void inc_zone_state(struct zone *zone, enum zone_stat_item item)
367{
368 mod_state(zone, item, 1, 1);
369}
370
371void inc_zone_page_state(struct page *page, enum zone_stat_item item)
372{
373 mod_state(page_zone(page), item, 1, 1);
374}
375EXPORT_SYMBOL(inc_zone_page_state);
376
377void dec_zone_page_state(struct page *page, enum zone_stat_item item)
378{
379 mod_state(page_zone(page), item, -1, -1);
380}
381EXPORT_SYMBOL(dec_zone_page_state);
382#else
383/*
384 * Use interrupt disable to serialize counter updates
385 */
386void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
387 long delta)
388{
389 unsigned long flags;
390
391 local_irq_save(flags);
392 __mod_zone_page_state(zone, item, delta);
393 local_irq_restore(flags);
394}
395EXPORT_SYMBOL(mod_zone_page_state);
396
397void inc_zone_state(struct zone *zone, enum zone_stat_item item)
398{
399 unsigned long flags;
400
401 local_irq_save(flags);
402 __inc_zone_state(zone, item);
403 local_irq_restore(flags);
404}
405
406void inc_zone_page_state(struct page *page, enum zone_stat_item item)
407{
408 unsigned long flags;
409 struct zone *zone;
410
411 zone = page_zone(page);
412 local_irq_save(flags);
413 __inc_zone_state(zone, item);
414 local_irq_restore(flags);
415}
416EXPORT_SYMBOL(inc_zone_page_state);
417
418void dec_zone_page_state(struct page *page, enum zone_stat_item item)
419{
420 unsigned long flags;
421
422 local_irq_save(flags);
423 __dec_zone_page_state(page, item);
424 local_irq_restore(flags);
425}
426EXPORT_SYMBOL(dec_zone_page_state);
427#endif
428
429
430/*
431 * Fold a differential into the global counters.
432 * Returns the number of counters updated.
433 */
434static int fold_diff(int *diff)
435{
436 int i;
437 int changes = 0;
438
439 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
440 if (diff[i]) {
441 atomic_long_add(diff[i], &vm_stat[i]);
442 changes++;
443 }
444 return changes;
445}
446
447/*
448 * Update the zone counters for the current cpu.
449 *
450 * Note that refresh_cpu_vm_stats strives to only access
451 * node local memory. The per cpu pagesets on remote zones are placed
452 * in the memory local to the processor using that pageset. So the
453 * loop over all zones will access a series of cachelines local to
454 * the processor.
455 *
456 * The call to zone_page_state_add updates the cachelines with the
457 * statistics in the remote zone struct as well as the global cachelines
458 * with the global counters. These could cause remote node cache line
459 * bouncing and will have to be only done when necessary.
460 *
461 * The function returns the number of global counters updated.
462 */
463static int refresh_cpu_vm_stats(bool do_pagesets)
464{
465 struct zone *zone;
466 int i;
467 int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
468 int changes = 0;
469
470 for_each_populated_zone(zone) {
471 struct per_cpu_pageset __percpu *p = zone->pageset;
472
473 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
474 int v;
475
476 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
477 if (v) {
478
479 atomic_long_add(v, &zone->vm_stat[i]);
480 global_diff[i] += v;
481#ifdef CONFIG_NUMA
482 /* 3 seconds idle till flush */
483 __this_cpu_write(p->expire, 3);
484#endif
485 }
486 }
487#ifdef CONFIG_NUMA
488 if (do_pagesets) {
489 cond_resched();
490 /*
491 * Deal with draining the remote pageset of this
492 * processor
493 *
494 * Check if there are pages remaining in this pageset
495 * if not then there is nothing to expire.
496 */
497 if (!__this_cpu_read(p->expire) ||
498 !__this_cpu_read(p->pcp.count))
499 continue;
500
501 /*
502 * We never drain zones local to this processor.
503 */
504 if (zone_to_nid(zone) == numa_node_id()) {
505 __this_cpu_write(p->expire, 0);
506 continue;
507 }
508
509 if (__this_cpu_dec_return(p->expire))
510 continue;
511
512 if (__this_cpu_read(p->pcp.count)) {
513 drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
514 changes++;
515 }
516 }
517#endif
518 }
519 changes += fold_diff(global_diff);
520 return changes;
521}
522
523/*
524 * Fold the data for an offline cpu into the global array.
525 * There cannot be any access by the offline cpu and therefore
526 * synchronization is simplified.
527 */
528void cpu_vm_stats_fold(int cpu)
529{
530 struct zone *zone;
531 int i;
532 int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
533
534 for_each_populated_zone(zone) {
535 struct per_cpu_pageset *p;
536
537 p = per_cpu_ptr(zone->pageset, cpu);
538
539 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
540 if (p->vm_stat_diff[i]) {
541 int v;
542
543 v = p->vm_stat_diff[i];
544 p->vm_stat_diff[i] = 0;
545 atomic_long_add(v, &zone->vm_stat[i]);
546 global_diff[i] += v;
547 }
548 }
549
550 fold_diff(global_diff);
551}
552
553/*
554 * this is only called if !populated_zone(zone), which implies no other users of
555 * pset->vm_stat_diff[] exsist.
556 */
557void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
558{
559 int i;
560
561 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
562 if (pset->vm_stat_diff[i]) {
563 int v = pset->vm_stat_diff[i];
564 pset->vm_stat_diff[i] = 0;
565 atomic_long_add(v, &zone->vm_stat[i]);
566 atomic_long_add(v, &vm_stat[i]);
567 }
568}
569#endif
570
571#ifdef CONFIG_NUMA
572/*
573 * zonelist = the list of zones passed to the allocator
574 * z = the zone from which the allocation occurred.
575 *
576 * Must be called with interrupts disabled.
577 *
578 * When __GFP_OTHER_NODE is set assume the node of the preferred
579 * zone is the local node. This is useful for daemons who allocate
580 * memory on behalf of other processes.
581 */
582void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags)
583{
584 if (z->zone_pgdat == preferred_zone->zone_pgdat) {
585 __inc_zone_state(z, NUMA_HIT);
586 } else {
587 __inc_zone_state(z, NUMA_MISS);
588 __inc_zone_state(preferred_zone, NUMA_FOREIGN);
589 }
590 if (z->node == ((flags & __GFP_OTHER_NODE) ?
591 preferred_zone->node : numa_node_id()))
592 __inc_zone_state(z, NUMA_LOCAL);
593 else
594 __inc_zone_state(z, NUMA_OTHER);
595}
596
597/*
598 * Determine the per node value of a stat item.
599 */
600unsigned long node_page_state(int node, enum zone_stat_item item)
601{
602 struct zone *zones = NODE_DATA(node)->node_zones;
603
604 return
605#ifdef CONFIG_ZONE_DMA
606 zone_page_state(&zones[ZONE_DMA], item) +
607#endif
608#ifdef CONFIG_ZONE_DMA32
609 zone_page_state(&zones[ZONE_DMA32], item) +
610#endif
611#ifdef CONFIG_HIGHMEM
612 zone_page_state(&zones[ZONE_HIGHMEM], item) +
613#endif
614 zone_page_state(&zones[ZONE_NORMAL], item) +
615 zone_page_state(&zones[ZONE_MOVABLE], item);
616}
617
618#endif
619
620#ifdef CONFIG_COMPACTION
621
622struct contig_page_info {
623 unsigned long free_pages;
624 unsigned long free_blocks_total;
625 unsigned long free_blocks_suitable;
626};
627
628/*
629 * Calculate the number of free pages in a zone, how many contiguous
630 * pages are free and how many are large enough to satisfy an allocation of
631 * the target size. Note that this function makes no attempt to estimate
632 * how many suitable free blocks there *might* be if MOVABLE pages were
633 * migrated. Calculating that is possible, but expensive and can be
634 * figured out from userspace
635 */
636static void fill_contig_page_info(struct zone *zone,
637 unsigned int suitable_order,
638 struct contig_page_info *info)
639{
640 unsigned int order;
641
642 info->free_pages = 0;
643 info->free_blocks_total = 0;
644 info->free_blocks_suitable = 0;
645
646 for (order = 0; order < MAX_ORDER; order++) {
647 unsigned long blocks;
648
649 /* Count number of free blocks */
650 blocks = zone->free_area[order].nr_free;
651 info->free_blocks_total += blocks;
652
653 /* Count free base pages */
654 info->free_pages += blocks << order;
655
656 /* Count the suitable free blocks */
657 if (order >= suitable_order)
658 info->free_blocks_suitable += blocks <<
659 (order - suitable_order);
660 }
661}
662
663/*
664 * A fragmentation index only makes sense if an allocation of a requested
665 * size would fail. If that is true, the fragmentation index indicates
666 * whether external fragmentation or a lack of memory was the problem.
667 * The value can be used to determine if page reclaim or compaction
668 * should be used
669 */
670static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
671{
672 unsigned long requested = 1UL << order;
673
674 if (!info->free_blocks_total)
675 return 0;
676
677 /* Fragmentation index only makes sense when a request would fail */
678 if (info->free_blocks_suitable)
679 return -1000;
680
681 /*
682 * Index is between 0 and 1 so return within 3 decimal places
683 *
684 * 0 => allocation would fail due to lack of memory
685 * 1 => allocation would fail due to fragmentation
686 */
687 return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
688}
689
690/* Same as __fragmentation index but allocs contig_page_info on stack */
691int fragmentation_index(struct zone *zone, unsigned int order)
692{
693 struct contig_page_info info;
694
695 fill_contig_page_info(zone, order, &info);
696 return __fragmentation_index(order, &info);
697}
698#endif
699
700#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
701#ifdef CONFIG_ZONE_DMA
702#define TEXT_FOR_DMA(xx) xx "_dma",
703#else
704#define TEXT_FOR_DMA(xx)
705#endif
706
707#ifdef CONFIG_ZONE_DMA32
708#define TEXT_FOR_DMA32(xx) xx "_dma32",
709#else
710#define TEXT_FOR_DMA32(xx)
711#endif
712
713#ifdef CONFIG_HIGHMEM
714#define TEXT_FOR_HIGHMEM(xx) xx "_high",
715#else
716#define TEXT_FOR_HIGHMEM(xx)
717#endif
718
719#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
720 TEXT_FOR_HIGHMEM(xx) xx "_movable",
721
722const char * const vmstat_text[] = {
723 /* enum zone_stat_item countes */
724 "nr_free_pages",
725 "nr_alloc_batch",
726 "nr_inactive_anon",
727 "nr_active_anon",
728 "nr_inactive_file",
729 "nr_active_file",
730 "nr_unevictable",
731 "nr_mlock",
732 "nr_anon_pages",
733 "nr_mapped",
734 "nr_file_pages",
735 "nr_dirty",
736 "nr_writeback",
737 "nr_slab_reclaimable",
738 "nr_slab_unreclaimable",
739 "nr_page_table_pages",
740 "nr_kernel_stack",
741 "nr_unstable",
742 "nr_bounce",
743 "nr_vmscan_write",
744 "nr_vmscan_immediate_reclaim",
745 "nr_writeback_temp",
746 "nr_isolated_anon",
747 "nr_isolated_file",
748 "nr_shmem",
749 "nr_dirtied",
750 "nr_written",
751 "nr_pages_scanned",
752
753#ifdef CONFIG_NUMA
754 "numa_hit",
755 "numa_miss",
756 "numa_foreign",
757 "numa_interleave",
758 "numa_local",
759 "numa_other",
760#endif
761 "workingset_refault",
762 "workingset_activate",
763 "workingset_nodereclaim",
764 "nr_anon_transparent_hugepages",
765 "nr_free_cma",
766
767 /* enum writeback_stat_item counters */
768 "nr_dirty_threshold",
769 "nr_dirty_background_threshold",
770
771#ifdef CONFIG_VM_EVENT_COUNTERS
772 /* enum vm_event_item counters */
773 "pgpgin",
774 "pgpgout",
775 "pswpin",
776 "pswpout",
777
778 TEXTS_FOR_ZONES("pgalloc")
779
780 "pgfree",
781 "pgactivate",
782 "pgdeactivate",
783
784 "pgfault",
785 "pgmajfault",
786 "pglazyfreed",
787
788 TEXTS_FOR_ZONES("pgrefill")
789 TEXTS_FOR_ZONES("pgsteal_kswapd")
790 TEXTS_FOR_ZONES("pgsteal_direct")
791 TEXTS_FOR_ZONES("pgscan_kswapd")
792 TEXTS_FOR_ZONES("pgscan_direct")
793 "pgscan_direct_throttle",
794
795#ifdef CONFIG_NUMA
796 "zone_reclaim_failed",
797#endif
798 "pginodesteal",
799 "slabs_scanned",
800 "kswapd_inodesteal",
801 "kswapd_low_wmark_hit_quickly",
802 "kswapd_high_wmark_hit_quickly",
803 "pageoutrun",
804 "allocstall",
805
806 "pgrotated",
807
808 "drop_pagecache",
809 "drop_slab",
810
811#ifdef CONFIG_NUMA_BALANCING
812 "numa_pte_updates",
813 "numa_huge_pte_updates",
814 "numa_hint_faults",
815 "numa_hint_faults_local",
816 "numa_pages_migrated",
817#endif
818#ifdef CONFIG_MIGRATION
819 "pgmigrate_success",
820 "pgmigrate_fail",
821#endif
822#ifdef CONFIG_COMPACTION
823 "compact_migrate_scanned",
824 "compact_free_scanned",
825 "compact_isolated",
826 "compact_stall",
827 "compact_fail",
828 "compact_success",
829 "compact_daemon_wake",
830#endif
831
832#ifdef CONFIG_HUGETLB_PAGE
833 "htlb_buddy_alloc_success",
834 "htlb_buddy_alloc_fail",
835#endif
836 "unevictable_pgs_culled",
837 "unevictable_pgs_scanned",
838 "unevictable_pgs_rescued",
839 "unevictable_pgs_mlocked",
840 "unevictable_pgs_munlocked",
841 "unevictable_pgs_cleared",
842 "unevictable_pgs_stranded",
843
844#ifdef CONFIG_TRANSPARENT_HUGEPAGE
845 "thp_fault_alloc",
846 "thp_fault_fallback",
847 "thp_collapse_alloc",
848 "thp_collapse_alloc_failed",
849 "thp_split_page",
850 "thp_split_page_failed",
851 "thp_deferred_split_page",
852 "thp_split_pmd",
853 "thp_zero_page_alloc",
854 "thp_zero_page_alloc_failed",
855#endif
856#ifdef CONFIG_MEMORY_BALLOON
857 "balloon_inflate",
858 "balloon_deflate",
859#ifdef CONFIG_BALLOON_COMPACTION
860 "balloon_migrate",
861#endif
862#endif /* CONFIG_MEMORY_BALLOON */
863#ifdef CONFIG_DEBUG_TLBFLUSH
864#ifdef CONFIG_SMP
865 "nr_tlb_remote_flush",
866 "nr_tlb_remote_flush_received",
867#endif /* CONFIG_SMP */
868 "nr_tlb_local_flush_all",
869 "nr_tlb_local_flush_one",
870#endif /* CONFIG_DEBUG_TLBFLUSH */
871
872#ifdef CONFIG_DEBUG_VM_VMACACHE
873 "vmacache_find_calls",
874 "vmacache_find_hits",
875 "vmacache_full_flushes",
876#endif
877#endif /* CONFIG_VM_EVENTS_COUNTERS */
878};
879#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
880
881
882#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
883 defined(CONFIG_PROC_FS)
884static void *frag_start(struct seq_file *m, loff_t *pos)
885{
886 pg_data_t *pgdat;
887 loff_t node = *pos;
888
889 for (pgdat = first_online_pgdat();
890 pgdat && node;
891 pgdat = next_online_pgdat(pgdat))
892 --node;
893
894 return pgdat;
895}
896
897static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
898{
899 pg_data_t *pgdat = (pg_data_t *)arg;
900
901 (*pos)++;
902 return next_online_pgdat(pgdat);
903}
904
905static void frag_stop(struct seq_file *m, void *arg)
906{
907}
908
909/* Walk all the zones in a node and print using a callback */
910static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
911 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
912{
913 struct zone *zone;
914 struct zone *node_zones = pgdat->node_zones;
915 unsigned long flags;
916
917 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
918 if (!populated_zone(zone))
919 continue;
920
921 spin_lock_irqsave(&zone->lock, flags);
922 print(m, pgdat, zone);
923 spin_unlock_irqrestore(&zone->lock, flags);
924 }
925}
926#endif
927
928#ifdef CONFIG_PROC_FS
929static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
930 struct zone *zone)
931{
932 int order;
933
934 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
935 for (order = 0; order < MAX_ORDER; ++order)
936 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
937 seq_putc(m, '\n');
938}
939
940/*
941 * This walks the free areas for each zone.
942 */
943static int frag_show(struct seq_file *m, void *arg)
944{
945 pg_data_t *pgdat = (pg_data_t *)arg;
946 walk_zones_in_node(m, pgdat, frag_show_print);
947 return 0;
948}
949
950static void pagetypeinfo_showfree_print(struct seq_file *m,
951 pg_data_t *pgdat, struct zone *zone)
952{
953 int order, mtype;
954
955 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
956 seq_printf(m, "Node %4d, zone %8s, type %12s ",
957 pgdat->node_id,
958 zone->name,
959 migratetype_names[mtype]);
960 for (order = 0; order < MAX_ORDER; ++order) {
961 unsigned long freecount = 0;
962 struct free_area *area;
963 struct list_head *curr;
964
965 area = &(zone->free_area[order]);
966
967 list_for_each(curr, &area->free_list[mtype])
968 freecount++;
969 seq_printf(m, "%6lu ", freecount);
970 }
971 seq_putc(m, '\n');
972 }
973}
974
975/* Print out the free pages at each order for each migatetype */
976static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
977{
978 int order;
979 pg_data_t *pgdat = (pg_data_t *)arg;
980
981 /* Print header */
982 seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
983 for (order = 0; order < MAX_ORDER; ++order)
984 seq_printf(m, "%6d ", order);
985 seq_putc(m, '\n');
986
987 walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
988
989 return 0;
990}
991
992static void pagetypeinfo_showblockcount_print(struct seq_file *m,
993 pg_data_t *pgdat, struct zone *zone)
994{
995 int mtype;
996 unsigned long pfn;
997 unsigned long start_pfn = zone->zone_start_pfn;
998 unsigned long end_pfn = zone_end_pfn(zone);
999 unsigned long count[MIGRATE_TYPES] = { 0, };
1000
1001 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
1002 struct page *page;
1003
1004 if (!pfn_valid(pfn))
1005 continue;
1006
1007 page = pfn_to_page(pfn);
1008
1009 /* Watch for unexpected holes punched in the memmap */
1010 if (!memmap_valid_within(pfn, page, zone))
1011 continue;
1012
1013 mtype = get_pageblock_migratetype(page);
1014
1015 if (mtype < MIGRATE_TYPES)
1016 count[mtype]++;
1017 }
1018
1019 /* Print counts */
1020 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1021 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1022 seq_printf(m, "%12lu ", count[mtype]);
1023 seq_putc(m, '\n');
1024}
1025
1026/* Print out the free pages at each order for each migratetype */
1027static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1028{
1029 int mtype;
1030 pg_data_t *pgdat = (pg_data_t *)arg;
1031
1032 seq_printf(m, "\n%-23s", "Number of blocks type ");
1033 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1034 seq_printf(m, "%12s ", migratetype_names[mtype]);
1035 seq_putc(m, '\n');
1036 walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
1037
1038 return 0;
1039}
1040
1041#ifdef CONFIG_PAGE_OWNER
1042static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
1043 pg_data_t *pgdat,
1044 struct zone *zone)
1045{
1046 struct page *page;
1047 struct page_ext *page_ext;
1048 unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
1049 unsigned long end_pfn = pfn + zone->spanned_pages;
1050 unsigned long count[MIGRATE_TYPES] = { 0, };
1051 int pageblock_mt, page_mt;
1052 int i;
1053
1054 /* Scan block by block. First and last block may be incomplete */
1055 pfn = zone->zone_start_pfn;
1056
1057 /*
1058 * Walk the zone in pageblock_nr_pages steps. If a page block spans
1059 * a zone boundary, it will be double counted between zones. This does
1060 * not matter as the mixed block count will still be correct
1061 */
1062 for (; pfn < end_pfn; ) {
1063 if (!pfn_valid(pfn)) {
1064 pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
1065 continue;
1066 }
1067
1068 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
1069 block_end_pfn = min(block_end_pfn, end_pfn);
1070
1071 page = pfn_to_page(pfn);
1072 pageblock_mt = get_pfnblock_migratetype(page, pfn);
1073
1074 for (; pfn < block_end_pfn; pfn++) {
1075 if (!pfn_valid_within(pfn))
1076 continue;
1077
1078 page = pfn_to_page(pfn);
1079 if (PageBuddy(page)) {
1080 pfn += (1UL << page_order(page)) - 1;
1081 continue;
1082 }
1083
1084 if (PageReserved(page))
1085 continue;
1086
1087 page_ext = lookup_page_ext(page);
1088
1089 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
1090 continue;
1091
1092 page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
1093 if (pageblock_mt != page_mt) {
1094 if (is_migrate_cma(pageblock_mt))
1095 count[MIGRATE_MOVABLE]++;
1096 else
1097 count[pageblock_mt]++;
1098
1099 pfn = block_end_pfn;
1100 break;
1101 }
1102 pfn += (1UL << page_ext->order) - 1;
1103 }
1104 }
1105
1106 /* Print counts */
1107 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1108 for (i = 0; i < MIGRATE_TYPES; i++)
1109 seq_printf(m, "%12lu ", count[i]);
1110 seq_putc(m, '\n');
1111}
1112#endif /* CONFIG_PAGE_OWNER */
1113
1114/*
1115 * Print out the number of pageblocks for each migratetype that contain pages
1116 * of other types. This gives an indication of how well fallbacks are being
1117 * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1118 * to determine what is going on
1119 */
1120static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1121{
1122#ifdef CONFIG_PAGE_OWNER
1123 int mtype;
1124
1125 if (!static_branch_unlikely(&page_owner_inited))
1126 return;
1127
1128 drain_all_pages(NULL);
1129
1130 seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1131 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1132 seq_printf(m, "%12s ", migratetype_names[mtype]);
1133 seq_putc(m, '\n');
1134
1135 walk_zones_in_node(m, pgdat, pagetypeinfo_showmixedcount_print);
1136#endif /* CONFIG_PAGE_OWNER */
1137}
1138
1139/*
1140 * This prints out statistics in relation to grouping pages by mobility.
1141 * It is expensive to collect so do not constantly read the file.
1142 */
1143static int pagetypeinfo_show(struct seq_file *m, void *arg)
1144{
1145 pg_data_t *pgdat = (pg_data_t *)arg;
1146
1147 /* check memoryless node */
1148 if (!node_state(pgdat->node_id, N_MEMORY))
1149 return 0;
1150
1151 seq_printf(m, "Page block order: %d\n", pageblock_order);
1152 seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages);
1153 seq_putc(m, '\n');
1154 pagetypeinfo_showfree(m, pgdat);
1155 pagetypeinfo_showblockcount(m, pgdat);
1156 pagetypeinfo_showmixedcount(m, pgdat);
1157
1158 return 0;
1159}
1160
1161static const struct seq_operations fragmentation_op = {
1162 .start = frag_start,
1163 .next = frag_next,
1164 .stop = frag_stop,
1165 .show = frag_show,
1166};
1167
1168static int fragmentation_open(struct inode *inode, struct file *file)
1169{
1170 return seq_open(file, &fragmentation_op);
1171}
1172
1173static const struct file_operations fragmentation_file_operations = {
1174 .open = fragmentation_open,
1175 .read = seq_read,
1176 .llseek = seq_lseek,
1177 .release = seq_release,
1178};
1179
1180static const struct seq_operations pagetypeinfo_op = {
1181 .start = frag_start,
1182 .next = frag_next,
1183 .stop = frag_stop,
1184 .show = pagetypeinfo_show,
1185};
1186
1187static int pagetypeinfo_open(struct inode *inode, struct file *file)
1188{
1189 return seq_open(file, &pagetypeinfo_op);
1190}
1191
1192static const struct file_operations pagetypeinfo_file_ops = {
1193 .open = pagetypeinfo_open,
1194 .read = seq_read,
1195 .llseek = seq_lseek,
1196 .release = seq_release,
1197};
1198
1199static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1200 struct zone *zone)
1201{
1202 int i;
1203 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1204 seq_printf(m,
1205 "\n pages free %lu"
1206 "\n min %lu"
1207 "\n low %lu"
1208 "\n high %lu"
1209 "\n scanned %lu"
1210 "\n spanned %lu"
1211 "\n present %lu"
1212 "\n managed %lu",
1213 zone_page_state(zone, NR_FREE_PAGES),
1214 min_wmark_pages(zone),
1215 low_wmark_pages(zone),
1216 high_wmark_pages(zone),
1217 zone_page_state(zone, NR_PAGES_SCANNED),
1218 zone->spanned_pages,
1219 zone->present_pages,
1220 zone->managed_pages);
1221
1222 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1223 seq_printf(m, "\n %-12s %lu", vmstat_text[i],
1224 zone_page_state(zone, i));
1225
1226 seq_printf(m,
1227 "\n protection: (%ld",
1228 zone->lowmem_reserve[0]);
1229 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1230 seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1231 seq_printf(m,
1232 ")"
1233 "\n pagesets");
1234 for_each_online_cpu(i) {
1235 struct per_cpu_pageset *pageset;
1236
1237 pageset = per_cpu_ptr(zone->pageset, i);
1238 seq_printf(m,
1239 "\n cpu: %i"
1240 "\n count: %i"
1241 "\n high: %i"
1242 "\n batch: %i",
1243 i,
1244 pageset->pcp.count,
1245 pageset->pcp.high,
1246 pageset->pcp.batch);
1247#ifdef CONFIG_SMP
1248 seq_printf(m, "\n vm stats threshold: %d",
1249 pageset->stat_threshold);
1250#endif
1251 }
1252 seq_printf(m,
1253 "\n all_unreclaimable: %u"
1254 "\n start_pfn: %lu"
1255 "\n inactive_ratio: %u",
1256 !zone_reclaimable(zone),
1257 zone->zone_start_pfn,
1258 zone->inactive_ratio);
1259 seq_putc(m, '\n');
1260}
1261
1262/*
1263 * Output information about zones in @pgdat.
1264 */
1265static int zoneinfo_show(struct seq_file *m, void *arg)
1266{
1267 pg_data_t *pgdat = (pg_data_t *)arg;
1268 walk_zones_in_node(m, pgdat, zoneinfo_show_print);
1269 return 0;
1270}
1271
1272static const struct seq_operations zoneinfo_op = {
1273 .start = frag_start, /* iterate over all zones. The same as in
1274 * fragmentation. */
1275 .next = frag_next,
1276 .stop = frag_stop,
1277 .show = zoneinfo_show,
1278};
1279
1280static int zoneinfo_open(struct inode *inode, struct file *file)
1281{
1282 return seq_open(file, &zoneinfo_op);
1283}
1284
1285static const struct file_operations proc_zoneinfo_file_operations = {
1286 .open = zoneinfo_open,
1287 .read = seq_read,
1288 .llseek = seq_lseek,
1289 .release = seq_release,
1290};
1291
1292enum writeback_stat_item {
1293 NR_DIRTY_THRESHOLD,
1294 NR_DIRTY_BG_THRESHOLD,
1295 NR_VM_WRITEBACK_STAT_ITEMS,
1296};
1297
1298static void *vmstat_start(struct seq_file *m, loff_t *pos)
1299{
1300 unsigned long *v;
1301 int i, stat_items_size;
1302
1303 if (*pos >= ARRAY_SIZE(vmstat_text))
1304 return NULL;
1305 stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
1306 NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
1307
1308#ifdef CONFIG_VM_EVENT_COUNTERS
1309 stat_items_size += sizeof(struct vm_event_state);
1310#endif
1311
1312 v = kmalloc(stat_items_size, GFP_KERNEL);
1313 m->private = v;
1314 if (!v)
1315 return ERR_PTR(-ENOMEM);
1316 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1317 v[i] = global_page_state(i);
1318 v += NR_VM_ZONE_STAT_ITEMS;
1319
1320 global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1321 v + NR_DIRTY_THRESHOLD);
1322 v += NR_VM_WRITEBACK_STAT_ITEMS;
1323
1324#ifdef CONFIG_VM_EVENT_COUNTERS
1325 all_vm_events(v);
1326 v[PGPGIN] /= 2; /* sectors -> kbytes */
1327 v[PGPGOUT] /= 2;
1328#endif
1329 return (unsigned long *)m->private + *pos;
1330}
1331
1332static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1333{
1334 (*pos)++;
1335 if (*pos >= ARRAY_SIZE(vmstat_text))
1336 return NULL;
1337 return (unsigned long *)m->private + *pos;
1338}
1339
1340static int vmstat_show(struct seq_file *m, void *arg)
1341{
1342 unsigned long *l = arg;
1343 unsigned long off = l - (unsigned long *)m->private;
1344
1345 seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
1346 return 0;
1347}
1348
1349static void vmstat_stop(struct seq_file *m, void *arg)
1350{
1351 kfree(m->private);
1352 m->private = NULL;
1353}
1354
1355static const struct seq_operations vmstat_op = {
1356 .start = vmstat_start,
1357 .next = vmstat_next,
1358 .stop = vmstat_stop,
1359 .show = vmstat_show,
1360};
1361
1362static int vmstat_open(struct inode *inode, struct file *file)
1363{
1364 return seq_open(file, &vmstat_op);
1365}
1366
1367static const struct file_operations proc_vmstat_file_operations = {
1368 .open = vmstat_open,
1369 .read = seq_read,
1370 .llseek = seq_lseek,
1371 .release = seq_release,
1372};
1373#endif /* CONFIG_PROC_FS */
1374
1375#ifdef CONFIG_SMP
1376static struct workqueue_struct *vmstat_wq;
1377static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1378int sysctl_stat_interval __read_mostly = HZ;
1379static cpumask_var_t cpu_stat_off;
1380
1381static void vmstat_update(struct work_struct *w)
1382{
1383 if (refresh_cpu_vm_stats(true)) {
1384 /*
1385 * Counters were updated so we expect more updates
1386 * to occur in the future. Keep on running the
1387 * update worker thread.
1388 * If we were marked on cpu_stat_off clear the flag
1389 * so that vmstat_shepherd doesn't schedule us again.
1390 */
1391 if (!cpumask_test_and_clear_cpu(smp_processor_id(),
1392 cpu_stat_off)) {
1393 queue_delayed_work_on(smp_processor_id(), vmstat_wq,
1394 this_cpu_ptr(&vmstat_work),
1395 round_jiffies_relative(sysctl_stat_interval));
1396 }
1397 } else {
1398 /*
1399 * We did not update any counters so the app may be in
1400 * a mode where it does not cause counter updates.
1401 * We may be uselessly running vmstat_update.
1402 * Defer the checking for differentials to the
1403 * shepherd thread on a different processor.
1404 */
1405 cpumask_set_cpu(smp_processor_id(), cpu_stat_off);
1406 }
1407}
1408
1409/*
1410 * Switch off vmstat processing and then fold all the remaining differentials
1411 * until the diffs stay at zero. The function is used by NOHZ and can only be
1412 * invoked when tick processing is not active.
1413 */
1414/*
1415 * Check if the diffs for a certain cpu indicate that
1416 * an update is needed.
1417 */
1418static bool need_update(int cpu)
1419{
1420 struct zone *zone;
1421
1422 for_each_populated_zone(zone) {
1423 struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
1424
1425 BUILD_BUG_ON(sizeof(p->vm_stat_diff[0]) != 1);
1426 /*
1427 * The fast way of checking if there are any vmstat diffs.
1428 * This works because the diffs are byte sized items.
1429 */
1430 if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS))
1431 return true;
1432
1433 }
1434 return false;
1435}
1436
1437void quiet_vmstat(void)
1438{
1439 if (system_state != SYSTEM_RUNNING)
1440 return;
1441
1442 /*
1443 * If we are already in hands of the shepherd then there
1444 * is nothing for us to do here.
1445 */
1446 if (cpumask_test_and_set_cpu(smp_processor_id(), cpu_stat_off))
1447 return;
1448
1449 if (!need_update(smp_processor_id()))
1450 return;
1451
1452 /*
1453 * Just refresh counters and do not care about the pending delayed
1454 * vmstat_update. It doesn't fire that often to matter and canceling
1455 * it would be too expensive from this path.
1456 * vmstat_shepherd will take care about that for us.
1457 */
1458 refresh_cpu_vm_stats(false);
1459}
1460
1461
1462/*
1463 * Shepherd worker thread that checks the
1464 * differentials of processors that have their worker
1465 * threads for vm statistics updates disabled because of
1466 * inactivity.
1467 */
1468static void vmstat_shepherd(struct work_struct *w);
1469
1470static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
1471
1472static void vmstat_shepherd(struct work_struct *w)
1473{
1474 int cpu;
1475
1476 get_online_cpus();
1477 /* Check processors whose vmstat worker threads have been disabled */
1478 for_each_cpu(cpu, cpu_stat_off) {
1479 struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
1480
1481 if (need_update(cpu)) {
1482 if (cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
1483 queue_delayed_work_on(cpu, vmstat_wq, dw, 0);
1484 } else {
1485 /*
1486 * Cancel the work if quiet_vmstat has put this
1487 * cpu on cpu_stat_off because the work item might
1488 * be still scheduled
1489 */
1490 cancel_delayed_work(dw);
1491 }
1492 }
1493 put_online_cpus();
1494
1495 schedule_delayed_work(&shepherd,
1496 round_jiffies_relative(sysctl_stat_interval));
1497}
1498
1499static void __init start_shepherd_timer(void)
1500{
1501 int cpu;
1502
1503 for_each_possible_cpu(cpu)
1504 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
1505 vmstat_update);
1506
1507 if (!alloc_cpumask_var(&cpu_stat_off, GFP_KERNEL))
1508 BUG();
1509 cpumask_copy(cpu_stat_off, cpu_online_mask);
1510
1511 vmstat_wq = alloc_workqueue("vmstat", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1512 schedule_delayed_work(&shepherd,
1513 round_jiffies_relative(sysctl_stat_interval));
1514}
1515
1516static void vmstat_cpu_dead(int node)
1517{
1518 int cpu;
1519
1520 get_online_cpus();
1521 for_each_online_cpu(cpu)
1522 if (cpu_to_node(cpu) == node)
1523 goto end;
1524
1525 node_clear_state(node, N_CPU);
1526end:
1527 put_online_cpus();
1528}
1529
1530/*
1531 * Use the cpu notifier to insure that the thresholds are recalculated
1532 * when necessary.
1533 */
1534static int vmstat_cpuup_callback(struct notifier_block *nfb,
1535 unsigned long action,
1536 void *hcpu)
1537{
1538 long cpu = (long)hcpu;
1539
1540 switch (action) {
1541 case CPU_ONLINE:
1542 case CPU_ONLINE_FROZEN:
1543 refresh_zone_stat_thresholds();
1544 node_set_state(cpu_to_node(cpu), N_CPU);
1545 cpumask_set_cpu(cpu, cpu_stat_off);
1546 break;
1547 case CPU_DOWN_PREPARE:
1548 case CPU_DOWN_PREPARE_FROZEN:
1549 cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
1550 cpumask_clear_cpu(cpu, cpu_stat_off);
1551 break;
1552 case CPU_DOWN_FAILED:
1553 case CPU_DOWN_FAILED_FROZEN:
1554 cpumask_set_cpu(cpu, cpu_stat_off);
1555 break;
1556 case CPU_DEAD:
1557 case CPU_DEAD_FROZEN:
1558 refresh_zone_stat_thresholds();
1559 vmstat_cpu_dead(cpu_to_node(cpu));
1560 break;
1561 default:
1562 break;
1563 }
1564 return NOTIFY_OK;
1565}
1566
1567static struct notifier_block vmstat_notifier =
1568 { &vmstat_cpuup_callback, NULL, 0 };
1569#endif
1570
1571static int __init setup_vmstat(void)
1572{
1573#ifdef CONFIG_SMP
1574 cpu_notifier_register_begin();
1575 __register_cpu_notifier(&vmstat_notifier);
1576
1577 start_shepherd_timer();
1578 cpu_notifier_register_done();
1579#endif
1580#ifdef CONFIG_PROC_FS
1581 proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
1582 proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
1583 proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
1584 proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
1585#endif
1586 return 0;
1587}
1588module_init(setup_vmstat)
1589
1590#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1591
1592/*
1593 * Return an index indicating how much of the available free memory is
1594 * unusable for an allocation of the requested size.
1595 */
1596static int unusable_free_index(unsigned int order,
1597 struct contig_page_info *info)
1598{
1599 /* No free memory is interpreted as all free memory is unusable */
1600 if (info->free_pages == 0)
1601 return 1000;
1602
1603 /*
1604 * Index should be a value between 0 and 1. Return a value to 3
1605 * decimal places.
1606 *
1607 * 0 => no fragmentation
1608 * 1 => high fragmentation
1609 */
1610 return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
1611
1612}
1613
1614static void unusable_show_print(struct seq_file *m,
1615 pg_data_t *pgdat, struct zone *zone)
1616{
1617 unsigned int order;
1618 int index;
1619 struct contig_page_info info;
1620
1621 seq_printf(m, "Node %d, zone %8s ",
1622 pgdat->node_id,
1623 zone->name);
1624 for (order = 0; order < MAX_ORDER; ++order) {
1625 fill_contig_page_info(zone, order, &info);
1626 index = unusable_free_index(order, &info);
1627 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1628 }
1629
1630 seq_putc(m, '\n');
1631}
1632
1633/*
1634 * Display unusable free space index
1635 *
1636 * The unusable free space index measures how much of the available free
1637 * memory cannot be used to satisfy an allocation of a given size and is a
1638 * value between 0 and 1. The higher the value, the more of free memory is
1639 * unusable and by implication, the worse the external fragmentation is. This
1640 * can be expressed as a percentage by multiplying by 100.
1641 */
1642static int unusable_show(struct seq_file *m, void *arg)
1643{
1644 pg_data_t *pgdat = (pg_data_t *)arg;
1645
1646 /* check memoryless node */
1647 if (!node_state(pgdat->node_id, N_MEMORY))
1648 return 0;
1649
1650 walk_zones_in_node(m, pgdat, unusable_show_print);
1651
1652 return 0;
1653}
1654
1655static const struct seq_operations unusable_op = {
1656 .start = frag_start,
1657 .next = frag_next,
1658 .stop = frag_stop,
1659 .show = unusable_show,
1660};
1661
1662static int unusable_open(struct inode *inode, struct file *file)
1663{
1664 return seq_open(file, &unusable_op);
1665}
1666
1667static const struct file_operations unusable_file_ops = {
1668 .open = unusable_open,
1669 .read = seq_read,
1670 .llseek = seq_lseek,
1671 .release = seq_release,
1672};
1673
1674static void extfrag_show_print(struct seq_file *m,
1675 pg_data_t *pgdat, struct zone *zone)
1676{
1677 unsigned int order;
1678 int index;
1679
1680 /* Alloc on stack as interrupts are disabled for zone walk */
1681 struct contig_page_info info;
1682
1683 seq_printf(m, "Node %d, zone %8s ",
1684 pgdat->node_id,
1685 zone->name);
1686 for (order = 0; order < MAX_ORDER; ++order) {
1687 fill_contig_page_info(zone, order, &info);
1688 index = __fragmentation_index(order, &info);
1689 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1690 }
1691
1692 seq_putc(m, '\n');
1693}
1694
1695/*
1696 * Display fragmentation index for orders that allocations would fail for
1697 */
1698static int extfrag_show(struct seq_file *m, void *arg)
1699{
1700 pg_data_t *pgdat = (pg_data_t *)arg;
1701
1702 walk_zones_in_node(m, pgdat, extfrag_show_print);
1703
1704 return 0;
1705}
1706
1707static const struct seq_operations extfrag_op = {
1708 .start = frag_start,
1709 .next = frag_next,
1710 .stop = frag_stop,
1711 .show = extfrag_show,
1712};
1713
1714static int extfrag_open(struct inode *inode, struct file *file)
1715{
1716 return seq_open(file, &extfrag_op);
1717}
1718
1719static const struct file_operations extfrag_file_ops = {
1720 .open = extfrag_open,
1721 .read = seq_read,
1722 .llseek = seq_lseek,
1723 .release = seq_release,
1724};
1725
1726static int __init extfrag_debug_init(void)
1727{
1728 struct dentry *extfrag_debug_root;
1729
1730 extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
1731 if (!extfrag_debug_root)
1732 return -ENOMEM;
1733
1734 if (!debugfs_create_file("unusable_index", 0444,
1735 extfrag_debug_root, NULL, &unusable_file_ops))
1736 goto fail;
1737
1738 if (!debugfs_create_file("extfrag_index", 0444,
1739 extfrag_debug_root, NULL, &extfrag_file_ops))
1740 goto fail;
1741
1742 return 0;
1743fail:
1744 debugfs_remove_recursive(extfrag_debug_root);
1745 return -ENOMEM;
1746}
1747
1748module_init(extfrag_debug_init);
1749#endif