Loading...
1/*
2 * linux/mm/vmstat.c
3 *
4 * Manages VM statistics
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 *
7 * zoned VM statistics
8 * Copyright (C) 2006 Silicon Graphics, Inc.,
9 * Christoph Lameter <christoph@lameter.com>
10 * Copyright (C) 2008-2014 Christoph Lameter
11 */
12#include <linux/fs.h>
13#include <linux/mm.h>
14#include <linux/err.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/cpu.h>
18#include <linux/cpumask.h>
19#include <linux/vmstat.h>
20#include <linux/proc_fs.h>
21#include <linux/seq_file.h>
22#include <linux/debugfs.h>
23#include <linux/sched.h>
24#include <linux/math64.h>
25#include <linux/writeback.h>
26#include <linux/compaction.h>
27#include <linux/mm_inline.h>
28#include <linux/page_ext.h>
29#include <linux/page_owner.h>
30
31#include "internal.h"
32
33#ifdef CONFIG_VM_EVENT_COUNTERS
34DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
35EXPORT_PER_CPU_SYMBOL(vm_event_states);
36
37static void sum_vm_events(unsigned long *ret)
38{
39 int cpu;
40 int i;
41
42 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
43
44 for_each_online_cpu(cpu) {
45 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
46
47 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
48 ret[i] += this->event[i];
49 }
50}
51
52/*
53 * Accumulate the vm event counters across all CPUs.
54 * The result is unavoidably approximate - it can change
55 * during and after execution of this function.
56*/
57void all_vm_events(unsigned long *ret)
58{
59 get_online_cpus();
60 sum_vm_events(ret);
61 put_online_cpus();
62}
63EXPORT_SYMBOL_GPL(all_vm_events);
64
65/*
66 * Fold the foreign cpu events into our own.
67 *
68 * This is adding to the events on one processor
69 * but keeps the global counts constant.
70 */
71void vm_events_fold_cpu(int cpu)
72{
73 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
74 int i;
75
76 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
77 count_vm_events(i, fold_state->event[i]);
78 fold_state->event[i] = 0;
79 }
80}
81
82#endif /* CONFIG_VM_EVENT_COUNTERS */
83
84/*
85 * Manage combined zone based / global counters
86 *
87 * vm_stat contains the global counters
88 */
89atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
90EXPORT_SYMBOL(vm_stat);
91
92#ifdef CONFIG_SMP
93
94int calculate_pressure_threshold(struct zone *zone)
95{
96 int threshold;
97 int watermark_distance;
98
99 /*
100 * As vmstats are not up to date, there is drift between the estimated
101 * and real values. For high thresholds and a high number of CPUs, it
102 * is possible for the min watermark to be breached while the estimated
103 * value looks fine. The pressure threshold is a reduced value such
104 * that even the maximum amount of drift will not accidentally breach
105 * the min watermark
106 */
107 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
108 threshold = max(1, (int)(watermark_distance / num_online_cpus()));
109
110 /*
111 * Maximum threshold is 125
112 */
113 threshold = min(125, threshold);
114
115 return threshold;
116}
117
118int calculate_normal_threshold(struct zone *zone)
119{
120 int threshold;
121 int mem; /* memory in 128 MB units */
122
123 /*
124 * The threshold scales with the number of processors and the amount
125 * of memory per zone. More memory means that we can defer updates for
126 * longer, more processors could lead to more contention.
127 * fls() is used to have a cheap way of logarithmic scaling.
128 *
129 * Some sample thresholds:
130 *
131 * Threshold Processors (fls) Zonesize fls(mem+1)
132 * ------------------------------------------------------------------
133 * 8 1 1 0.9-1 GB 4
134 * 16 2 2 0.9-1 GB 4
135 * 20 2 2 1-2 GB 5
136 * 24 2 2 2-4 GB 6
137 * 28 2 2 4-8 GB 7
138 * 32 2 2 8-16 GB 8
139 * 4 2 2 <128M 1
140 * 30 4 3 2-4 GB 5
141 * 48 4 3 8-16 GB 8
142 * 32 8 4 1-2 GB 4
143 * 32 8 4 0.9-1GB 4
144 * 10 16 5 <128M 1
145 * 40 16 5 900M 4
146 * 70 64 7 2-4 GB 5
147 * 84 64 7 4-8 GB 6
148 * 108 512 9 4-8 GB 6
149 * 125 1024 10 8-16 GB 8
150 * 125 1024 10 16-32 GB 9
151 */
152
153 mem = zone->managed_pages >> (27 - PAGE_SHIFT);
154
155 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
156
157 /*
158 * Maximum threshold is 125
159 */
160 threshold = min(125, threshold);
161
162 return threshold;
163}
164
165/*
166 * Refresh the thresholds for each zone.
167 */
168void refresh_zone_stat_thresholds(void)
169{
170 struct zone *zone;
171 int cpu;
172 int threshold;
173
174 for_each_populated_zone(zone) {
175 unsigned long max_drift, tolerate_drift;
176
177 threshold = calculate_normal_threshold(zone);
178
179 for_each_online_cpu(cpu)
180 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
181 = threshold;
182
183 /*
184 * Only set percpu_drift_mark if there is a danger that
185 * NR_FREE_PAGES reports the low watermark is ok when in fact
186 * the min watermark could be breached by an allocation
187 */
188 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
189 max_drift = num_online_cpus() * threshold;
190 if (max_drift > tolerate_drift)
191 zone->percpu_drift_mark = high_wmark_pages(zone) +
192 max_drift;
193 }
194}
195
196void set_pgdat_percpu_threshold(pg_data_t *pgdat,
197 int (*calculate_pressure)(struct zone *))
198{
199 struct zone *zone;
200 int cpu;
201 int threshold;
202 int i;
203
204 for (i = 0; i < pgdat->nr_zones; i++) {
205 zone = &pgdat->node_zones[i];
206 if (!zone->percpu_drift_mark)
207 continue;
208
209 threshold = (*calculate_pressure)(zone);
210 for_each_online_cpu(cpu)
211 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
212 = threshold;
213 }
214}
215
216/*
217 * For use when we know that interrupts are disabled,
218 * or when we know that preemption is disabled and that
219 * particular counter cannot be updated from interrupt context.
220 */
221void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
222 long delta)
223{
224 struct per_cpu_pageset __percpu *pcp = zone->pageset;
225 s8 __percpu *p = pcp->vm_stat_diff + item;
226 long x;
227 long t;
228
229 x = delta + __this_cpu_read(*p);
230
231 t = __this_cpu_read(pcp->stat_threshold);
232
233 if (unlikely(x > t || x < -t)) {
234 zone_page_state_add(x, zone, item);
235 x = 0;
236 }
237 __this_cpu_write(*p, x);
238}
239EXPORT_SYMBOL(__mod_zone_page_state);
240
241/*
242 * Optimized increment and decrement functions.
243 *
244 * These are only for a single page and therefore can take a struct page *
245 * argument instead of struct zone *. This allows the inclusion of the code
246 * generated for page_zone(page) into the optimized functions.
247 *
248 * No overflow check is necessary and therefore the differential can be
249 * incremented or decremented in place which may allow the compilers to
250 * generate better code.
251 * The increment or decrement is known and therefore one boundary check can
252 * be omitted.
253 *
254 * NOTE: These functions are very performance sensitive. Change only
255 * with care.
256 *
257 * Some processors have inc/dec instructions that are atomic vs an interrupt.
258 * However, the code must first determine the differential location in a zone
259 * based on the processor number and then inc/dec the counter. There is no
260 * guarantee without disabling preemption that the processor will not change
261 * in between and therefore the atomicity vs. interrupt cannot be exploited
262 * in a useful way here.
263 */
264void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
265{
266 struct per_cpu_pageset __percpu *pcp = zone->pageset;
267 s8 __percpu *p = pcp->vm_stat_diff + item;
268 s8 v, t;
269
270 v = __this_cpu_inc_return(*p);
271 t = __this_cpu_read(pcp->stat_threshold);
272 if (unlikely(v > t)) {
273 s8 overstep = t >> 1;
274
275 zone_page_state_add(v + overstep, zone, item);
276 __this_cpu_write(*p, -overstep);
277 }
278}
279
280void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
281{
282 __inc_zone_state(page_zone(page), item);
283}
284EXPORT_SYMBOL(__inc_zone_page_state);
285
286void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
287{
288 struct per_cpu_pageset __percpu *pcp = zone->pageset;
289 s8 __percpu *p = pcp->vm_stat_diff + item;
290 s8 v, t;
291
292 v = __this_cpu_dec_return(*p);
293 t = __this_cpu_read(pcp->stat_threshold);
294 if (unlikely(v < - t)) {
295 s8 overstep = t >> 1;
296
297 zone_page_state_add(v - overstep, zone, item);
298 __this_cpu_write(*p, overstep);
299 }
300}
301
302void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
303{
304 __dec_zone_state(page_zone(page), item);
305}
306EXPORT_SYMBOL(__dec_zone_page_state);
307
308#ifdef CONFIG_HAVE_CMPXCHG_LOCAL
309/*
310 * If we have cmpxchg_local support then we do not need to incur the overhead
311 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
312 *
313 * mod_state() modifies the zone counter state through atomic per cpu
314 * operations.
315 *
316 * Overstep mode specifies how overstep should handled:
317 * 0 No overstepping
318 * 1 Overstepping half of threshold
319 * -1 Overstepping minus half of threshold
320*/
321static inline void mod_state(struct zone *zone, enum zone_stat_item item,
322 long delta, int overstep_mode)
323{
324 struct per_cpu_pageset __percpu *pcp = zone->pageset;
325 s8 __percpu *p = pcp->vm_stat_diff + item;
326 long o, n, t, z;
327
328 do {
329 z = 0; /* overflow to zone counters */
330
331 /*
332 * The fetching of the stat_threshold is racy. We may apply
333 * a counter threshold to the wrong the cpu if we get
334 * rescheduled while executing here. However, the next
335 * counter update will apply the threshold again and
336 * therefore bring the counter under the threshold again.
337 *
338 * Most of the time the thresholds are the same anyways
339 * for all cpus in a zone.
340 */
341 t = this_cpu_read(pcp->stat_threshold);
342
343 o = this_cpu_read(*p);
344 n = delta + o;
345
346 if (n > t || n < -t) {
347 int os = overstep_mode * (t >> 1) ;
348
349 /* Overflow must be added to zone counters */
350 z = n + os;
351 n = -os;
352 }
353 } while (this_cpu_cmpxchg(*p, o, n) != o);
354
355 if (z)
356 zone_page_state_add(z, zone, item);
357}
358
359void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
360 long delta)
361{
362 mod_state(zone, item, delta, 0);
363}
364EXPORT_SYMBOL(mod_zone_page_state);
365
366void inc_zone_state(struct zone *zone, enum zone_stat_item item)
367{
368 mod_state(zone, item, 1, 1);
369}
370
371void inc_zone_page_state(struct page *page, enum zone_stat_item item)
372{
373 mod_state(page_zone(page), item, 1, 1);
374}
375EXPORT_SYMBOL(inc_zone_page_state);
376
377void dec_zone_page_state(struct page *page, enum zone_stat_item item)
378{
379 mod_state(page_zone(page), item, -1, -1);
380}
381EXPORT_SYMBOL(dec_zone_page_state);
382#else
383/*
384 * Use interrupt disable to serialize counter updates
385 */
386void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
387 long delta)
388{
389 unsigned long flags;
390
391 local_irq_save(flags);
392 __mod_zone_page_state(zone, item, delta);
393 local_irq_restore(flags);
394}
395EXPORT_SYMBOL(mod_zone_page_state);
396
397void inc_zone_state(struct zone *zone, enum zone_stat_item item)
398{
399 unsigned long flags;
400
401 local_irq_save(flags);
402 __inc_zone_state(zone, item);
403 local_irq_restore(flags);
404}
405
406void inc_zone_page_state(struct page *page, enum zone_stat_item item)
407{
408 unsigned long flags;
409 struct zone *zone;
410
411 zone = page_zone(page);
412 local_irq_save(flags);
413 __inc_zone_state(zone, item);
414 local_irq_restore(flags);
415}
416EXPORT_SYMBOL(inc_zone_page_state);
417
418void dec_zone_page_state(struct page *page, enum zone_stat_item item)
419{
420 unsigned long flags;
421
422 local_irq_save(flags);
423 __dec_zone_page_state(page, item);
424 local_irq_restore(flags);
425}
426EXPORT_SYMBOL(dec_zone_page_state);
427#endif
428
429
430/*
431 * Fold a differential into the global counters.
432 * Returns the number of counters updated.
433 */
434static int fold_diff(int *diff)
435{
436 int i;
437 int changes = 0;
438
439 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
440 if (diff[i]) {
441 atomic_long_add(diff[i], &vm_stat[i]);
442 changes++;
443 }
444 return changes;
445}
446
447/*
448 * Update the zone counters for the current cpu.
449 *
450 * Note that refresh_cpu_vm_stats strives to only access
451 * node local memory. The per cpu pagesets on remote zones are placed
452 * in the memory local to the processor using that pageset. So the
453 * loop over all zones will access a series of cachelines local to
454 * the processor.
455 *
456 * The call to zone_page_state_add updates the cachelines with the
457 * statistics in the remote zone struct as well as the global cachelines
458 * with the global counters. These could cause remote node cache line
459 * bouncing and will have to be only done when necessary.
460 *
461 * The function returns the number of global counters updated.
462 */
463static int refresh_cpu_vm_stats(bool do_pagesets)
464{
465 struct zone *zone;
466 int i;
467 int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
468 int changes = 0;
469
470 for_each_populated_zone(zone) {
471 struct per_cpu_pageset __percpu *p = zone->pageset;
472
473 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
474 int v;
475
476 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
477 if (v) {
478
479 atomic_long_add(v, &zone->vm_stat[i]);
480 global_diff[i] += v;
481#ifdef CONFIG_NUMA
482 /* 3 seconds idle till flush */
483 __this_cpu_write(p->expire, 3);
484#endif
485 }
486 }
487#ifdef CONFIG_NUMA
488 if (do_pagesets) {
489 cond_resched();
490 /*
491 * Deal with draining the remote pageset of this
492 * processor
493 *
494 * Check if there are pages remaining in this pageset
495 * if not then there is nothing to expire.
496 */
497 if (!__this_cpu_read(p->expire) ||
498 !__this_cpu_read(p->pcp.count))
499 continue;
500
501 /*
502 * We never drain zones local to this processor.
503 */
504 if (zone_to_nid(zone) == numa_node_id()) {
505 __this_cpu_write(p->expire, 0);
506 continue;
507 }
508
509 if (__this_cpu_dec_return(p->expire))
510 continue;
511
512 if (__this_cpu_read(p->pcp.count)) {
513 drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
514 changes++;
515 }
516 }
517#endif
518 }
519 changes += fold_diff(global_diff);
520 return changes;
521}
522
523/*
524 * Fold the data for an offline cpu into the global array.
525 * There cannot be any access by the offline cpu and therefore
526 * synchronization is simplified.
527 */
528void cpu_vm_stats_fold(int cpu)
529{
530 struct zone *zone;
531 int i;
532 int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
533
534 for_each_populated_zone(zone) {
535 struct per_cpu_pageset *p;
536
537 p = per_cpu_ptr(zone->pageset, cpu);
538
539 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
540 if (p->vm_stat_diff[i]) {
541 int v;
542
543 v = p->vm_stat_diff[i];
544 p->vm_stat_diff[i] = 0;
545 atomic_long_add(v, &zone->vm_stat[i]);
546 global_diff[i] += v;
547 }
548 }
549
550 fold_diff(global_diff);
551}
552
553/*
554 * this is only called if !populated_zone(zone), which implies no other users of
555 * pset->vm_stat_diff[] exsist.
556 */
557void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
558{
559 int i;
560
561 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
562 if (pset->vm_stat_diff[i]) {
563 int v = pset->vm_stat_diff[i];
564 pset->vm_stat_diff[i] = 0;
565 atomic_long_add(v, &zone->vm_stat[i]);
566 atomic_long_add(v, &vm_stat[i]);
567 }
568}
569#endif
570
571#ifdef CONFIG_NUMA
572/*
573 * zonelist = the list of zones passed to the allocator
574 * z = the zone from which the allocation occurred.
575 *
576 * Must be called with interrupts disabled.
577 *
578 * When __GFP_OTHER_NODE is set assume the node of the preferred
579 * zone is the local node. This is useful for daemons who allocate
580 * memory on behalf of other processes.
581 */
582void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags)
583{
584 if (z->zone_pgdat == preferred_zone->zone_pgdat) {
585 __inc_zone_state(z, NUMA_HIT);
586 } else {
587 __inc_zone_state(z, NUMA_MISS);
588 __inc_zone_state(preferred_zone, NUMA_FOREIGN);
589 }
590 if (z->node == ((flags & __GFP_OTHER_NODE) ?
591 preferred_zone->node : numa_node_id()))
592 __inc_zone_state(z, NUMA_LOCAL);
593 else
594 __inc_zone_state(z, NUMA_OTHER);
595}
596
597/*
598 * Determine the per node value of a stat item.
599 */
600unsigned long node_page_state(int node, enum zone_stat_item item)
601{
602 struct zone *zones = NODE_DATA(node)->node_zones;
603
604 return
605#ifdef CONFIG_ZONE_DMA
606 zone_page_state(&zones[ZONE_DMA], item) +
607#endif
608#ifdef CONFIG_ZONE_DMA32
609 zone_page_state(&zones[ZONE_DMA32], item) +
610#endif
611#ifdef CONFIG_HIGHMEM
612 zone_page_state(&zones[ZONE_HIGHMEM], item) +
613#endif
614 zone_page_state(&zones[ZONE_NORMAL], item) +
615 zone_page_state(&zones[ZONE_MOVABLE], item);
616}
617
618#endif
619
620#ifdef CONFIG_COMPACTION
621
622struct contig_page_info {
623 unsigned long free_pages;
624 unsigned long free_blocks_total;
625 unsigned long free_blocks_suitable;
626};
627
628/*
629 * Calculate the number of free pages in a zone, how many contiguous
630 * pages are free and how many are large enough to satisfy an allocation of
631 * the target size. Note that this function makes no attempt to estimate
632 * how many suitable free blocks there *might* be if MOVABLE pages were
633 * migrated. Calculating that is possible, but expensive and can be
634 * figured out from userspace
635 */
636static void fill_contig_page_info(struct zone *zone,
637 unsigned int suitable_order,
638 struct contig_page_info *info)
639{
640 unsigned int order;
641
642 info->free_pages = 0;
643 info->free_blocks_total = 0;
644 info->free_blocks_suitable = 0;
645
646 for (order = 0; order < MAX_ORDER; order++) {
647 unsigned long blocks;
648
649 /* Count number of free blocks */
650 blocks = zone->free_area[order].nr_free;
651 info->free_blocks_total += blocks;
652
653 /* Count free base pages */
654 info->free_pages += blocks << order;
655
656 /* Count the suitable free blocks */
657 if (order >= suitable_order)
658 info->free_blocks_suitable += blocks <<
659 (order - suitable_order);
660 }
661}
662
663/*
664 * A fragmentation index only makes sense if an allocation of a requested
665 * size would fail. If that is true, the fragmentation index indicates
666 * whether external fragmentation or a lack of memory was the problem.
667 * The value can be used to determine if page reclaim or compaction
668 * should be used
669 */
670static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
671{
672 unsigned long requested = 1UL << order;
673
674 if (!info->free_blocks_total)
675 return 0;
676
677 /* Fragmentation index only makes sense when a request would fail */
678 if (info->free_blocks_suitable)
679 return -1000;
680
681 /*
682 * Index is between 0 and 1 so return within 3 decimal places
683 *
684 * 0 => allocation would fail due to lack of memory
685 * 1 => allocation would fail due to fragmentation
686 */
687 return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
688}
689
690/* Same as __fragmentation index but allocs contig_page_info on stack */
691int fragmentation_index(struct zone *zone, unsigned int order)
692{
693 struct contig_page_info info;
694
695 fill_contig_page_info(zone, order, &info);
696 return __fragmentation_index(order, &info);
697}
698#endif
699
700#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
701#ifdef CONFIG_ZONE_DMA
702#define TEXT_FOR_DMA(xx) xx "_dma",
703#else
704#define TEXT_FOR_DMA(xx)
705#endif
706
707#ifdef CONFIG_ZONE_DMA32
708#define TEXT_FOR_DMA32(xx) xx "_dma32",
709#else
710#define TEXT_FOR_DMA32(xx)
711#endif
712
713#ifdef CONFIG_HIGHMEM
714#define TEXT_FOR_HIGHMEM(xx) xx "_high",
715#else
716#define TEXT_FOR_HIGHMEM(xx)
717#endif
718
719#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
720 TEXT_FOR_HIGHMEM(xx) xx "_movable",
721
722const char * const vmstat_text[] = {
723 /* enum zone_stat_item countes */
724 "nr_free_pages",
725 "nr_alloc_batch",
726 "nr_inactive_anon",
727 "nr_active_anon",
728 "nr_inactive_file",
729 "nr_active_file",
730 "nr_unevictable",
731 "nr_mlock",
732 "nr_anon_pages",
733 "nr_mapped",
734 "nr_file_pages",
735 "nr_dirty",
736 "nr_writeback",
737 "nr_slab_reclaimable",
738 "nr_slab_unreclaimable",
739 "nr_page_table_pages",
740 "nr_kernel_stack",
741 "nr_unstable",
742 "nr_bounce",
743 "nr_vmscan_write",
744 "nr_vmscan_immediate_reclaim",
745 "nr_writeback_temp",
746 "nr_isolated_anon",
747 "nr_isolated_file",
748 "nr_shmem",
749 "nr_dirtied",
750 "nr_written",
751 "nr_pages_scanned",
752
753#ifdef CONFIG_NUMA
754 "numa_hit",
755 "numa_miss",
756 "numa_foreign",
757 "numa_interleave",
758 "numa_local",
759 "numa_other",
760#endif
761 "workingset_refault",
762 "workingset_activate",
763 "workingset_nodereclaim",
764 "nr_anon_transparent_hugepages",
765 "nr_free_cma",
766
767 /* enum writeback_stat_item counters */
768 "nr_dirty_threshold",
769 "nr_dirty_background_threshold",
770
771#ifdef CONFIG_VM_EVENT_COUNTERS
772 /* enum vm_event_item counters */
773 "pgpgin",
774 "pgpgout",
775 "pswpin",
776 "pswpout",
777
778 TEXTS_FOR_ZONES("pgalloc")
779
780 "pgfree",
781 "pgactivate",
782 "pgdeactivate",
783
784 "pgfault",
785 "pgmajfault",
786 "pglazyfreed",
787
788 TEXTS_FOR_ZONES("pgrefill")
789 TEXTS_FOR_ZONES("pgsteal_kswapd")
790 TEXTS_FOR_ZONES("pgsteal_direct")
791 TEXTS_FOR_ZONES("pgscan_kswapd")
792 TEXTS_FOR_ZONES("pgscan_direct")
793 "pgscan_direct_throttle",
794
795#ifdef CONFIG_NUMA
796 "zone_reclaim_failed",
797#endif
798 "pginodesteal",
799 "slabs_scanned",
800 "kswapd_inodesteal",
801 "kswapd_low_wmark_hit_quickly",
802 "kswapd_high_wmark_hit_quickly",
803 "pageoutrun",
804 "allocstall",
805
806 "pgrotated",
807
808 "drop_pagecache",
809 "drop_slab",
810
811#ifdef CONFIG_NUMA_BALANCING
812 "numa_pte_updates",
813 "numa_huge_pte_updates",
814 "numa_hint_faults",
815 "numa_hint_faults_local",
816 "numa_pages_migrated",
817#endif
818#ifdef CONFIG_MIGRATION
819 "pgmigrate_success",
820 "pgmigrate_fail",
821#endif
822#ifdef CONFIG_COMPACTION
823 "compact_migrate_scanned",
824 "compact_free_scanned",
825 "compact_isolated",
826 "compact_stall",
827 "compact_fail",
828 "compact_success",
829 "compact_daemon_wake",
830#endif
831
832#ifdef CONFIG_HUGETLB_PAGE
833 "htlb_buddy_alloc_success",
834 "htlb_buddy_alloc_fail",
835#endif
836 "unevictable_pgs_culled",
837 "unevictable_pgs_scanned",
838 "unevictable_pgs_rescued",
839 "unevictable_pgs_mlocked",
840 "unevictable_pgs_munlocked",
841 "unevictable_pgs_cleared",
842 "unevictable_pgs_stranded",
843
844#ifdef CONFIG_TRANSPARENT_HUGEPAGE
845 "thp_fault_alloc",
846 "thp_fault_fallback",
847 "thp_collapse_alloc",
848 "thp_collapse_alloc_failed",
849 "thp_split_page",
850 "thp_split_page_failed",
851 "thp_deferred_split_page",
852 "thp_split_pmd",
853 "thp_zero_page_alloc",
854 "thp_zero_page_alloc_failed",
855#endif
856#ifdef CONFIG_MEMORY_BALLOON
857 "balloon_inflate",
858 "balloon_deflate",
859#ifdef CONFIG_BALLOON_COMPACTION
860 "balloon_migrate",
861#endif
862#endif /* CONFIG_MEMORY_BALLOON */
863#ifdef CONFIG_DEBUG_TLBFLUSH
864#ifdef CONFIG_SMP
865 "nr_tlb_remote_flush",
866 "nr_tlb_remote_flush_received",
867#endif /* CONFIG_SMP */
868 "nr_tlb_local_flush_all",
869 "nr_tlb_local_flush_one",
870#endif /* CONFIG_DEBUG_TLBFLUSH */
871
872#ifdef CONFIG_DEBUG_VM_VMACACHE
873 "vmacache_find_calls",
874 "vmacache_find_hits",
875 "vmacache_full_flushes",
876#endif
877#endif /* CONFIG_VM_EVENTS_COUNTERS */
878};
879#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
880
881
882#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
883 defined(CONFIG_PROC_FS)
884static void *frag_start(struct seq_file *m, loff_t *pos)
885{
886 pg_data_t *pgdat;
887 loff_t node = *pos;
888
889 for (pgdat = first_online_pgdat();
890 pgdat && node;
891 pgdat = next_online_pgdat(pgdat))
892 --node;
893
894 return pgdat;
895}
896
897static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
898{
899 pg_data_t *pgdat = (pg_data_t *)arg;
900
901 (*pos)++;
902 return next_online_pgdat(pgdat);
903}
904
905static void frag_stop(struct seq_file *m, void *arg)
906{
907}
908
909/* Walk all the zones in a node and print using a callback */
910static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
911 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
912{
913 struct zone *zone;
914 struct zone *node_zones = pgdat->node_zones;
915 unsigned long flags;
916
917 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
918 if (!populated_zone(zone))
919 continue;
920
921 spin_lock_irqsave(&zone->lock, flags);
922 print(m, pgdat, zone);
923 spin_unlock_irqrestore(&zone->lock, flags);
924 }
925}
926#endif
927
928#ifdef CONFIG_PROC_FS
929static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
930 struct zone *zone)
931{
932 int order;
933
934 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
935 for (order = 0; order < MAX_ORDER; ++order)
936 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
937 seq_putc(m, '\n');
938}
939
940/*
941 * This walks the free areas for each zone.
942 */
943static int frag_show(struct seq_file *m, void *arg)
944{
945 pg_data_t *pgdat = (pg_data_t *)arg;
946 walk_zones_in_node(m, pgdat, frag_show_print);
947 return 0;
948}
949
950static void pagetypeinfo_showfree_print(struct seq_file *m,
951 pg_data_t *pgdat, struct zone *zone)
952{
953 int order, mtype;
954
955 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
956 seq_printf(m, "Node %4d, zone %8s, type %12s ",
957 pgdat->node_id,
958 zone->name,
959 migratetype_names[mtype]);
960 for (order = 0; order < MAX_ORDER; ++order) {
961 unsigned long freecount = 0;
962 struct free_area *area;
963 struct list_head *curr;
964
965 area = &(zone->free_area[order]);
966
967 list_for_each(curr, &area->free_list[mtype])
968 freecount++;
969 seq_printf(m, "%6lu ", freecount);
970 }
971 seq_putc(m, '\n');
972 }
973}
974
975/* Print out the free pages at each order for each migatetype */
976static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
977{
978 int order;
979 pg_data_t *pgdat = (pg_data_t *)arg;
980
981 /* Print header */
982 seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
983 for (order = 0; order < MAX_ORDER; ++order)
984 seq_printf(m, "%6d ", order);
985 seq_putc(m, '\n');
986
987 walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
988
989 return 0;
990}
991
992static void pagetypeinfo_showblockcount_print(struct seq_file *m,
993 pg_data_t *pgdat, struct zone *zone)
994{
995 int mtype;
996 unsigned long pfn;
997 unsigned long start_pfn = zone->zone_start_pfn;
998 unsigned long end_pfn = zone_end_pfn(zone);
999 unsigned long count[MIGRATE_TYPES] = { 0, };
1000
1001 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
1002 struct page *page;
1003
1004 if (!pfn_valid(pfn))
1005 continue;
1006
1007 page = pfn_to_page(pfn);
1008
1009 /* Watch for unexpected holes punched in the memmap */
1010 if (!memmap_valid_within(pfn, page, zone))
1011 continue;
1012
1013 mtype = get_pageblock_migratetype(page);
1014
1015 if (mtype < MIGRATE_TYPES)
1016 count[mtype]++;
1017 }
1018
1019 /* Print counts */
1020 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1021 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1022 seq_printf(m, "%12lu ", count[mtype]);
1023 seq_putc(m, '\n');
1024}
1025
1026/* Print out the free pages at each order for each migratetype */
1027static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1028{
1029 int mtype;
1030 pg_data_t *pgdat = (pg_data_t *)arg;
1031
1032 seq_printf(m, "\n%-23s", "Number of blocks type ");
1033 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1034 seq_printf(m, "%12s ", migratetype_names[mtype]);
1035 seq_putc(m, '\n');
1036 walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
1037
1038 return 0;
1039}
1040
1041#ifdef CONFIG_PAGE_OWNER
1042static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
1043 pg_data_t *pgdat,
1044 struct zone *zone)
1045{
1046 struct page *page;
1047 struct page_ext *page_ext;
1048 unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
1049 unsigned long end_pfn = pfn + zone->spanned_pages;
1050 unsigned long count[MIGRATE_TYPES] = { 0, };
1051 int pageblock_mt, page_mt;
1052 int i;
1053
1054 /* Scan block by block. First and last block may be incomplete */
1055 pfn = zone->zone_start_pfn;
1056
1057 /*
1058 * Walk the zone in pageblock_nr_pages steps. If a page block spans
1059 * a zone boundary, it will be double counted between zones. This does
1060 * not matter as the mixed block count will still be correct
1061 */
1062 for (; pfn < end_pfn; ) {
1063 if (!pfn_valid(pfn)) {
1064 pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
1065 continue;
1066 }
1067
1068 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
1069 block_end_pfn = min(block_end_pfn, end_pfn);
1070
1071 page = pfn_to_page(pfn);
1072 pageblock_mt = get_pfnblock_migratetype(page, pfn);
1073
1074 for (; pfn < block_end_pfn; pfn++) {
1075 if (!pfn_valid_within(pfn))
1076 continue;
1077
1078 page = pfn_to_page(pfn);
1079 if (PageBuddy(page)) {
1080 pfn += (1UL << page_order(page)) - 1;
1081 continue;
1082 }
1083
1084 if (PageReserved(page))
1085 continue;
1086
1087 page_ext = lookup_page_ext(page);
1088
1089 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
1090 continue;
1091
1092 page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
1093 if (pageblock_mt != page_mt) {
1094 if (is_migrate_cma(pageblock_mt))
1095 count[MIGRATE_MOVABLE]++;
1096 else
1097 count[pageblock_mt]++;
1098
1099 pfn = block_end_pfn;
1100 break;
1101 }
1102 pfn += (1UL << page_ext->order) - 1;
1103 }
1104 }
1105
1106 /* Print counts */
1107 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1108 for (i = 0; i < MIGRATE_TYPES; i++)
1109 seq_printf(m, "%12lu ", count[i]);
1110 seq_putc(m, '\n');
1111}
1112#endif /* CONFIG_PAGE_OWNER */
1113
1114/*
1115 * Print out the number of pageblocks for each migratetype that contain pages
1116 * of other types. This gives an indication of how well fallbacks are being
1117 * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1118 * to determine what is going on
1119 */
1120static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1121{
1122#ifdef CONFIG_PAGE_OWNER
1123 int mtype;
1124
1125 if (!static_branch_unlikely(&page_owner_inited))
1126 return;
1127
1128 drain_all_pages(NULL);
1129
1130 seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1131 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1132 seq_printf(m, "%12s ", migratetype_names[mtype]);
1133 seq_putc(m, '\n');
1134
1135 walk_zones_in_node(m, pgdat, pagetypeinfo_showmixedcount_print);
1136#endif /* CONFIG_PAGE_OWNER */
1137}
1138
1139/*
1140 * This prints out statistics in relation to grouping pages by mobility.
1141 * It is expensive to collect so do not constantly read the file.
1142 */
1143static int pagetypeinfo_show(struct seq_file *m, void *arg)
1144{
1145 pg_data_t *pgdat = (pg_data_t *)arg;
1146
1147 /* check memoryless node */
1148 if (!node_state(pgdat->node_id, N_MEMORY))
1149 return 0;
1150
1151 seq_printf(m, "Page block order: %d\n", pageblock_order);
1152 seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages);
1153 seq_putc(m, '\n');
1154 pagetypeinfo_showfree(m, pgdat);
1155 pagetypeinfo_showblockcount(m, pgdat);
1156 pagetypeinfo_showmixedcount(m, pgdat);
1157
1158 return 0;
1159}
1160
1161static const struct seq_operations fragmentation_op = {
1162 .start = frag_start,
1163 .next = frag_next,
1164 .stop = frag_stop,
1165 .show = frag_show,
1166};
1167
1168static int fragmentation_open(struct inode *inode, struct file *file)
1169{
1170 return seq_open(file, &fragmentation_op);
1171}
1172
1173static const struct file_operations fragmentation_file_operations = {
1174 .open = fragmentation_open,
1175 .read = seq_read,
1176 .llseek = seq_lseek,
1177 .release = seq_release,
1178};
1179
1180static const struct seq_operations pagetypeinfo_op = {
1181 .start = frag_start,
1182 .next = frag_next,
1183 .stop = frag_stop,
1184 .show = pagetypeinfo_show,
1185};
1186
1187static int pagetypeinfo_open(struct inode *inode, struct file *file)
1188{
1189 return seq_open(file, &pagetypeinfo_op);
1190}
1191
1192static const struct file_operations pagetypeinfo_file_ops = {
1193 .open = pagetypeinfo_open,
1194 .read = seq_read,
1195 .llseek = seq_lseek,
1196 .release = seq_release,
1197};
1198
1199static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1200 struct zone *zone)
1201{
1202 int i;
1203 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1204 seq_printf(m,
1205 "\n pages free %lu"
1206 "\n min %lu"
1207 "\n low %lu"
1208 "\n high %lu"
1209 "\n scanned %lu"
1210 "\n spanned %lu"
1211 "\n present %lu"
1212 "\n managed %lu",
1213 zone_page_state(zone, NR_FREE_PAGES),
1214 min_wmark_pages(zone),
1215 low_wmark_pages(zone),
1216 high_wmark_pages(zone),
1217 zone_page_state(zone, NR_PAGES_SCANNED),
1218 zone->spanned_pages,
1219 zone->present_pages,
1220 zone->managed_pages);
1221
1222 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1223 seq_printf(m, "\n %-12s %lu", vmstat_text[i],
1224 zone_page_state(zone, i));
1225
1226 seq_printf(m,
1227 "\n protection: (%ld",
1228 zone->lowmem_reserve[0]);
1229 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1230 seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1231 seq_printf(m,
1232 ")"
1233 "\n pagesets");
1234 for_each_online_cpu(i) {
1235 struct per_cpu_pageset *pageset;
1236
1237 pageset = per_cpu_ptr(zone->pageset, i);
1238 seq_printf(m,
1239 "\n cpu: %i"
1240 "\n count: %i"
1241 "\n high: %i"
1242 "\n batch: %i",
1243 i,
1244 pageset->pcp.count,
1245 pageset->pcp.high,
1246 pageset->pcp.batch);
1247#ifdef CONFIG_SMP
1248 seq_printf(m, "\n vm stats threshold: %d",
1249 pageset->stat_threshold);
1250#endif
1251 }
1252 seq_printf(m,
1253 "\n all_unreclaimable: %u"
1254 "\n start_pfn: %lu"
1255 "\n inactive_ratio: %u",
1256 !zone_reclaimable(zone),
1257 zone->zone_start_pfn,
1258 zone->inactive_ratio);
1259 seq_putc(m, '\n');
1260}
1261
1262/*
1263 * Output information about zones in @pgdat.
1264 */
1265static int zoneinfo_show(struct seq_file *m, void *arg)
1266{
1267 pg_data_t *pgdat = (pg_data_t *)arg;
1268 walk_zones_in_node(m, pgdat, zoneinfo_show_print);
1269 return 0;
1270}
1271
1272static const struct seq_operations zoneinfo_op = {
1273 .start = frag_start, /* iterate over all zones. The same as in
1274 * fragmentation. */
1275 .next = frag_next,
1276 .stop = frag_stop,
1277 .show = zoneinfo_show,
1278};
1279
1280static int zoneinfo_open(struct inode *inode, struct file *file)
1281{
1282 return seq_open(file, &zoneinfo_op);
1283}
1284
1285static const struct file_operations proc_zoneinfo_file_operations = {
1286 .open = zoneinfo_open,
1287 .read = seq_read,
1288 .llseek = seq_lseek,
1289 .release = seq_release,
1290};
1291
1292enum writeback_stat_item {
1293 NR_DIRTY_THRESHOLD,
1294 NR_DIRTY_BG_THRESHOLD,
1295 NR_VM_WRITEBACK_STAT_ITEMS,
1296};
1297
1298static void *vmstat_start(struct seq_file *m, loff_t *pos)
1299{
1300 unsigned long *v;
1301 int i, stat_items_size;
1302
1303 if (*pos >= ARRAY_SIZE(vmstat_text))
1304 return NULL;
1305 stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
1306 NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
1307
1308#ifdef CONFIG_VM_EVENT_COUNTERS
1309 stat_items_size += sizeof(struct vm_event_state);
1310#endif
1311
1312 v = kmalloc(stat_items_size, GFP_KERNEL);
1313 m->private = v;
1314 if (!v)
1315 return ERR_PTR(-ENOMEM);
1316 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1317 v[i] = global_page_state(i);
1318 v += NR_VM_ZONE_STAT_ITEMS;
1319
1320 global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1321 v + NR_DIRTY_THRESHOLD);
1322 v += NR_VM_WRITEBACK_STAT_ITEMS;
1323
1324#ifdef CONFIG_VM_EVENT_COUNTERS
1325 all_vm_events(v);
1326 v[PGPGIN] /= 2; /* sectors -> kbytes */
1327 v[PGPGOUT] /= 2;
1328#endif
1329 return (unsigned long *)m->private + *pos;
1330}
1331
1332static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1333{
1334 (*pos)++;
1335 if (*pos >= ARRAY_SIZE(vmstat_text))
1336 return NULL;
1337 return (unsigned long *)m->private + *pos;
1338}
1339
1340static int vmstat_show(struct seq_file *m, void *arg)
1341{
1342 unsigned long *l = arg;
1343 unsigned long off = l - (unsigned long *)m->private;
1344
1345 seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
1346 return 0;
1347}
1348
1349static void vmstat_stop(struct seq_file *m, void *arg)
1350{
1351 kfree(m->private);
1352 m->private = NULL;
1353}
1354
1355static const struct seq_operations vmstat_op = {
1356 .start = vmstat_start,
1357 .next = vmstat_next,
1358 .stop = vmstat_stop,
1359 .show = vmstat_show,
1360};
1361
1362static int vmstat_open(struct inode *inode, struct file *file)
1363{
1364 return seq_open(file, &vmstat_op);
1365}
1366
1367static const struct file_operations proc_vmstat_file_operations = {
1368 .open = vmstat_open,
1369 .read = seq_read,
1370 .llseek = seq_lseek,
1371 .release = seq_release,
1372};
1373#endif /* CONFIG_PROC_FS */
1374
1375#ifdef CONFIG_SMP
1376static struct workqueue_struct *vmstat_wq;
1377static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1378int sysctl_stat_interval __read_mostly = HZ;
1379static cpumask_var_t cpu_stat_off;
1380
1381static void vmstat_update(struct work_struct *w)
1382{
1383 if (refresh_cpu_vm_stats(true)) {
1384 /*
1385 * Counters were updated so we expect more updates
1386 * to occur in the future. Keep on running the
1387 * update worker thread.
1388 * If we were marked on cpu_stat_off clear the flag
1389 * so that vmstat_shepherd doesn't schedule us again.
1390 */
1391 if (!cpumask_test_and_clear_cpu(smp_processor_id(),
1392 cpu_stat_off)) {
1393 queue_delayed_work_on(smp_processor_id(), vmstat_wq,
1394 this_cpu_ptr(&vmstat_work),
1395 round_jiffies_relative(sysctl_stat_interval));
1396 }
1397 } else {
1398 /*
1399 * We did not update any counters so the app may be in
1400 * a mode where it does not cause counter updates.
1401 * We may be uselessly running vmstat_update.
1402 * Defer the checking for differentials to the
1403 * shepherd thread on a different processor.
1404 */
1405 cpumask_set_cpu(smp_processor_id(), cpu_stat_off);
1406 }
1407}
1408
1409/*
1410 * Switch off vmstat processing and then fold all the remaining differentials
1411 * until the diffs stay at zero. The function is used by NOHZ and can only be
1412 * invoked when tick processing is not active.
1413 */
1414/*
1415 * Check if the diffs for a certain cpu indicate that
1416 * an update is needed.
1417 */
1418static bool need_update(int cpu)
1419{
1420 struct zone *zone;
1421
1422 for_each_populated_zone(zone) {
1423 struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
1424
1425 BUILD_BUG_ON(sizeof(p->vm_stat_diff[0]) != 1);
1426 /*
1427 * The fast way of checking if there are any vmstat diffs.
1428 * This works because the diffs are byte sized items.
1429 */
1430 if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS))
1431 return true;
1432
1433 }
1434 return false;
1435}
1436
1437void quiet_vmstat(void)
1438{
1439 if (system_state != SYSTEM_RUNNING)
1440 return;
1441
1442 /*
1443 * If we are already in hands of the shepherd then there
1444 * is nothing for us to do here.
1445 */
1446 if (cpumask_test_and_set_cpu(smp_processor_id(), cpu_stat_off))
1447 return;
1448
1449 if (!need_update(smp_processor_id()))
1450 return;
1451
1452 /*
1453 * Just refresh counters and do not care about the pending delayed
1454 * vmstat_update. It doesn't fire that often to matter and canceling
1455 * it would be too expensive from this path.
1456 * vmstat_shepherd will take care about that for us.
1457 */
1458 refresh_cpu_vm_stats(false);
1459}
1460
1461
1462/*
1463 * Shepherd worker thread that checks the
1464 * differentials of processors that have their worker
1465 * threads for vm statistics updates disabled because of
1466 * inactivity.
1467 */
1468static void vmstat_shepherd(struct work_struct *w);
1469
1470static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
1471
1472static void vmstat_shepherd(struct work_struct *w)
1473{
1474 int cpu;
1475
1476 get_online_cpus();
1477 /* Check processors whose vmstat worker threads have been disabled */
1478 for_each_cpu(cpu, cpu_stat_off) {
1479 struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
1480
1481 if (need_update(cpu)) {
1482 if (cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
1483 queue_delayed_work_on(cpu, vmstat_wq, dw, 0);
1484 } else {
1485 /*
1486 * Cancel the work if quiet_vmstat has put this
1487 * cpu on cpu_stat_off because the work item might
1488 * be still scheduled
1489 */
1490 cancel_delayed_work(dw);
1491 }
1492 }
1493 put_online_cpus();
1494
1495 schedule_delayed_work(&shepherd,
1496 round_jiffies_relative(sysctl_stat_interval));
1497}
1498
1499static void __init start_shepherd_timer(void)
1500{
1501 int cpu;
1502
1503 for_each_possible_cpu(cpu)
1504 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
1505 vmstat_update);
1506
1507 if (!alloc_cpumask_var(&cpu_stat_off, GFP_KERNEL))
1508 BUG();
1509 cpumask_copy(cpu_stat_off, cpu_online_mask);
1510
1511 vmstat_wq = alloc_workqueue("vmstat", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1512 schedule_delayed_work(&shepherd,
1513 round_jiffies_relative(sysctl_stat_interval));
1514}
1515
1516static void vmstat_cpu_dead(int node)
1517{
1518 int cpu;
1519
1520 get_online_cpus();
1521 for_each_online_cpu(cpu)
1522 if (cpu_to_node(cpu) == node)
1523 goto end;
1524
1525 node_clear_state(node, N_CPU);
1526end:
1527 put_online_cpus();
1528}
1529
1530/*
1531 * Use the cpu notifier to insure that the thresholds are recalculated
1532 * when necessary.
1533 */
1534static int vmstat_cpuup_callback(struct notifier_block *nfb,
1535 unsigned long action,
1536 void *hcpu)
1537{
1538 long cpu = (long)hcpu;
1539
1540 switch (action) {
1541 case CPU_ONLINE:
1542 case CPU_ONLINE_FROZEN:
1543 refresh_zone_stat_thresholds();
1544 node_set_state(cpu_to_node(cpu), N_CPU);
1545 cpumask_set_cpu(cpu, cpu_stat_off);
1546 break;
1547 case CPU_DOWN_PREPARE:
1548 case CPU_DOWN_PREPARE_FROZEN:
1549 cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
1550 cpumask_clear_cpu(cpu, cpu_stat_off);
1551 break;
1552 case CPU_DOWN_FAILED:
1553 case CPU_DOWN_FAILED_FROZEN:
1554 cpumask_set_cpu(cpu, cpu_stat_off);
1555 break;
1556 case CPU_DEAD:
1557 case CPU_DEAD_FROZEN:
1558 refresh_zone_stat_thresholds();
1559 vmstat_cpu_dead(cpu_to_node(cpu));
1560 break;
1561 default:
1562 break;
1563 }
1564 return NOTIFY_OK;
1565}
1566
1567static struct notifier_block vmstat_notifier =
1568 { &vmstat_cpuup_callback, NULL, 0 };
1569#endif
1570
1571static int __init setup_vmstat(void)
1572{
1573#ifdef CONFIG_SMP
1574 cpu_notifier_register_begin();
1575 __register_cpu_notifier(&vmstat_notifier);
1576
1577 start_shepherd_timer();
1578 cpu_notifier_register_done();
1579#endif
1580#ifdef CONFIG_PROC_FS
1581 proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
1582 proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
1583 proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
1584 proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
1585#endif
1586 return 0;
1587}
1588module_init(setup_vmstat)
1589
1590#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1591
1592/*
1593 * Return an index indicating how much of the available free memory is
1594 * unusable for an allocation of the requested size.
1595 */
1596static int unusable_free_index(unsigned int order,
1597 struct contig_page_info *info)
1598{
1599 /* No free memory is interpreted as all free memory is unusable */
1600 if (info->free_pages == 0)
1601 return 1000;
1602
1603 /*
1604 * Index should be a value between 0 and 1. Return a value to 3
1605 * decimal places.
1606 *
1607 * 0 => no fragmentation
1608 * 1 => high fragmentation
1609 */
1610 return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
1611
1612}
1613
1614static void unusable_show_print(struct seq_file *m,
1615 pg_data_t *pgdat, struct zone *zone)
1616{
1617 unsigned int order;
1618 int index;
1619 struct contig_page_info info;
1620
1621 seq_printf(m, "Node %d, zone %8s ",
1622 pgdat->node_id,
1623 zone->name);
1624 for (order = 0; order < MAX_ORDER; ++order) {
1625 fill_contig_page_info(zone, order, &info);
1626 index = unusable_free_index(order, &info);
1627 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1628 }
1629
1630 seq_putc(m, '\n');
1631}
1632
1633/*
1634 * Display unusable free space index
1635 *
1636 * The unusable free space index measures how much of the available free
1637 * memory cannot be used to satisfy an allocation of a given size and is a
1638 * value between 0 and 1. The higher the value, the more of free memory is
1639 * unusable and by implication, the worse the external fragmentation is. This
1640 * can be expressed as a percentage by multiplying by 100.
1641 */
1642static int unusable_show(struct seq_file *m, void *arg)
1643{
1644 pg_data_t *pgdat = (pg_data_t *)arg;
1645
1646 /* check memoryless node */
1647 if (!node_state(pgdat->node_id, N_MEMORY))
1648 return 0;
1649
1650 walk_zones_in_node(m, pgdat, unusable_show_print);
1651
1652 return 0;
1653}
1654
1655static const struct seq_operations unusable_op = {
1656 .start = frag_start,
1657 .next = frag_next,
1658 .stop = frag_stop,
1659 .show = unusable_show,
1660};
1661
1662static int unusable_open(struct inode *inode, struct file *file)
1663{
1664 return seq_open(file, &unusable_op);
1665}
1666
1667static const struct file_operations unusable_file_ops = {
1668 .open = unusable_open,
1669 .read = seq_read,
1670 .llseek = seq_lseek,
1671 .release = seq_release,
1672};
1673
1674static void extfrag_show_print(struct seq_file *m,
1675 pg_data_t *pgdat, struct zone *zone)
1676{
1677 unsigned int order;
1678 int index;
1679
1680 /* Alloc on stack as interrupts are disabled for zone walk */
1681 struct contig_page_info info;
1682
1683 seq_printf(m, "Node %d, zone %8s ",
1684 pgdat->node_id,
1685 zone->name);
1686 for (order = 0; order < MAX_ORDER; ++order) {
1687 fill_contig_page_info(zone, order, &info);
1688 index = __fragmentation_index(order, &info);
1689 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1690 }
1691
1692 seq_putc(m, '\n');
1693}
1694
1695/*
1696 * Display fragmentation index for orders that allocations would fail for
1697 */
1698static int extfrag_show(struct seq_file *m, void *arg)
1699{
1700 pg_data_t *pgdat = (pg_data_t *)arg;
1701
1702 walk_zones_in_node(m, pgdat, extfrag_show_print);
1703
1704 return 0;
1705}
1706
1707static const struct seq_operations extfrag_op = {
1708 .start = frag_start,
1709 .next = frag_next,
1710 .stop = frag_stop,
1711 .show = extfrag_show,
1712};
1713
1714static int extfrag_open(struct inode *inode, struct file *file)
1715{
1716 return seq_open(file, &extfrag_op);
1717}
1718
1719static const struct file_operations extfrag_file_ops = {
1720 .open = extfrag_open,
1721 .read = seq_read,
1722 .llseek = seq_lseek,
1723 .release = seq_release,
1724};
1725
1726static int __init extfrag_debug_init(void)
1727{
1728 struct dentry *extfrag_debug_root;
1729
1730 extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
1731 if (!extfrag_debug_root)
1732 return -ENOMEM;
1733
1734 if (!debugfs_create_file("unusable_index", 0444,
1735 extfrag_debug_root, NULL, &unusable_file_ops))
1736 goto fail;
1737
1738 if (!debugfs_create_file("extfrag_index", 0444,
1739 extfrag_debug_root, NULL, &extfrag_file_ops))
1740 goto fail;
1741
1742 return 0;
1743fail:
1744 debugfs_remove_recursive(extfrag_debug_root);
1745 return -ENOMEM;
1746}
1747
1748module_init(extfrag_debug_init);
1749#endif
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/mm/vmstat.c
4 *
5 * Manages VM statistics
6 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
7 *
8 * zoned VM statistics
9 * Copyright (C) 2006 Silicon Graphics, Inc.,
10 * Christoph Lameter <christoph@lameter.com>
11 * Copyright (C) 2008-2014 Christoph Lameter
12 */
13#include <linux/fs.h>
14#include <linux/mm.h>
15#include <linux/err.h>
16#include <linux/module.h>
17#include <linux/slab.h>
18#include <linux/cpu.h>
19#include <linux/cpumask.h>
20#include <linux/vmstat.h>
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
23#include <linux/debugfs.h>
24#include <linux/sched.h>
25#include <linux/math64.h>
26#include <linux/writeback.h>
27#include <linux/compaction.h>
28#include <linux/mm_inline.h>
29#include <linux/page_owner.h>
30#include <linux/sched/isolation.h>
31
32#include "internal.h"
33
34#ifdef CONFIG_NUMA
35int sysctl_vm_numa_stat = ENABLE_NUMA_STAT;
36
37/* zero numa counters within a zone */
38static void zero_zone_numa_counters(struct zone *zone)
39{
40 int item, cpu;
41
42 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++) {
43 atomic_long_set(&zone->vm_numa_event[item], 0);
44 for_each_online_cpu(cpu) {
45 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_numa_event[item]
46 = 0;
47 }
48 }
49}
50
51/* zero numa counters of all the populated zones */
52static void zero_zones_numa_counters(void)
53{
54 struct zone *zone;
55
56 for_each_populated_zone(zone)
57 zero_zone_numa_counters(zone);
58}
59
60/* zero global numa counters */
61static void zero_global_numa_counters(void)
62{
63 int item;
64
65 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
66 atomic_long_set(&vm_numa_event[item], 0);
67}
68
69static void invalid_numa_statistics(void)
70{
71 zero_zones_numa_counters();
72 zero_global_numa_counters();
73}
74
75static DEFINE_MUTEX(vm_numa_stat_lock);
76
77int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
78 void *buffer, size_t *length, loff_t *ppos)
79{
80 int ret, oldval;
81
82 mutex_lock(&vm_numa_stat_lock);
83 if (write)
84 oldval = sysctl_vm_numa_stat;
85 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
86 if (ret || !write)
87 goto out;
88
89 if (oldval == sysctl_vm_numa_stat)
90 goto out;
91 else if (sysctl_vm_numa_stat == ENABLE_NUMA_STAT) {
92 static_branch_enable(&vm_numa_stat_key);
93 pr_info("enable numa statistics\n");
94 } else {
95 static_branch_disable(&vm_numa_stat_key);
96 invalid_numa_statistics();
97 pr_info("disable numa statistics, and clear numa counters\n");
98 }
99
100out:
101 mutex_unlock(&vm_numa_stat_lock);
102 return ret;
103}
104#endif
105
106#ifdef CONFIG_VM_EVENT_COUNTERS
107DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
108EXPORT_PER_CPU_SYMBOL(vm_event_states);
109
110static void sum_vm_events(unsigned long *ret)
111{
112 int cpu;
113 int i;
114
115 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
116
117 for_each_online_cpu(cpu) {
118 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
119
120 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
121 ret[i] += this->event[i];
122 }
123}
124
125/*
126 * Accumulate the vm event counters across all CPUs.
127 * The result is unavoidably approximate - it can change
128 * during and after execution of this function.
129*/
130void all_vm_events(unsigned long *ret)
131{
132 cpus_read_lock();
133 sum_vm_events(ret);
134 cpus_read_unlock();
135}
136EXPORT_SYMBOL_GPL(all_vm_events);
137
138/*
139 * Fold the foreign cpu events into our own.
140 *
141 * This is adding to the events on one processor
142 * but keeps the global counts constant.
143 */
144void vm_events_fold_cpu(int cpu)
145{
146 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
147 int i;
148
149 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
150 count_vm_events(i, fold_state->event[i]);
151 fold_state->event[i] = 0;
152 }
153}
154
155#endif /* CONFIG_VM_EVENT_COUNTERS */
156
157/*
158 * Manage combined zone based / global counters
159 *
160 * vm_stat contains the global counters
161 */
162atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
163atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp;
164atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS] __cacheline_aligned_in_smp;
165EXPORT_SYMBOL(vm_zone_stat);
166EXPORT_SYMBOL(vm_node_stat);
167
168#ifdef CONFIG_NUMA
169static void fold_vm_zone_numa_events(struct zone *zone)
170{
171 unsigned long zone_numa_events[NR_VM_NUMA_EVENT_ITEMS] = { 0, };
172 int cpu;
173 enum numa_stat_item item;
174
175 for_each_online_cpu(cpu) {
176 struct per_cpu_zonestat *pzstats;
177
178 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
179 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
180 zone_numa_events[item] += xchg(&pzstats->vm_numa_event[item], 0);
181 }
182
183 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
184 zone_numa_event_add(zone_numa_events[item], zone, item);
185}
186
187void fold_vm_numa_events(void)
188{
189 struct zone *zone;
190
191 for_each_populated_zone(zone)
192 fold_vm_zone_numa_events(zone);
193}
194#endif
195
196#ifdef CONFIG_SMP
197
198int calculate_pressure_threshold(struct zone *zone)
199{
200 int threshold;
201 int watermark_distance;
202
203 /*
204 * As vmstats are not up to date, there is drift between the estimated
205 * and real values. For high thresholds and a high number of CPUs, it
206 * is possible for the min watermark to be breached while the estimated
207 * value looks fine. The pressure threshold is a reduced value such
208 * that even the maximum amount of drift will not accidentally breach
209 * the min watermark
210 */
211 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
212 threshold = max(1, (int)(watermark_distance / num_online_cpus()));
213
214 /*
215 * Maximum threshold is 125
216 */
217 threshold = min(125, threshold);
218
219 return threshold;
220}
221
222int calculate_normal_threshold(struct zone *zone)
223{
224 int threshold;
225 int mem; /* memory in 128 MB units */
226
227 /*
228 * The threshold scales with the number of processors and the amount
229 * of memory per zone. More memory means that we can defer updates for
230 * longer, more processors could lead to more contention.
231 * fls() is used to have a cheap way of logarithmic scaling.
232 *
233 * Some sample thresholds:
234 *
235 * Threshold Processors (fls) Zonesize fls(mem)+1
236 * ------------------------------------------------------------------
237 * 8 1 1 0.9-1 GB 4
238 * 16 2 2 0.9-1 GB 4
239 * 20 2 2 1-2 GB 5
240 * 24 2 2 2-4 GB 6
241 * 28 2 2 4-8 GB 7
242 * 32 2 2 8-16 GB 8
243 * 4 2 2 <128M 1
244 * 30 4 3 2-4 GB 5
245 * 48 4 3 8-16 GB 8
246 * 32 8 4 1-2 GB 4
247 * 32 8 4 0.9-1GB 4
248 * 10 16 5 <128M 1
249 * 40 16 5 900M 4
250 * 70 64 7 2-4 GB 5
251 * 84 64 7 4-8 GB 6
252 * 108 512 9 4-8 GB 6
253 * 125 1024 10 8-16 GB 8
254 * 125 1024 10 16-32 GB 9
255 */
256
257 mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT);
258
259 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
260
261 /*
262 * Maximum threshold is 125
263 */
264 threshold = min(125, threshold);
265
266 return threshold;
267}
268
269/*
270 * Refresh the thresholds for each zone.
271 */
272void refresh_zone_stat_thresholds(void)
273{
274 struct pglist_data *pgdat;
275 struct zone *zone;
276 int cpu;
277 int threshold;
278
279 /* Zero current pgdat thresholds */
280 for_each_online_pgdat(pgdat) {
281 for_each_online_cpu(cpu) {
282 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0;
283 }
284 }
285
286 for_each_populated_zone(zone) {
287 struct pglist_data *pgdat = zone->zone_pgdat;
288 unsigned long max_drift, tolerate_drift;
289
290 threshold = calculate_normal_threshold(zone);
291
292 for_each_online_cpu(cpu) {
293 int pgdat_threshold;
294
295 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
296 = threshold;
297
298 /* Base nodestat threshold on the largest populated zone. */
299 pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold;
300 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold
301 = max(threshold, pgdat_threshold);
302 }
303
304 /*
305 * Only set percpu_drift_mark if there is a danger that
306 * NR_FREE_PAGES reports the low watermark is ok when in fact
307 * the min watermark could be breached by an allocation
308 */
309 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
310 max_drift = num_online_cpus() * threshold;
311 if (max_drift > tolerate_drift)
312 zone->percpu_drift_mark = high_wmark_pages(zone) +
313 max_drift;
314 }
315}
316
317void set_pgdat_percpu_threshold(pg_data_t *pgdat,
318 int (*calculate_pressure)(struct zone *))
319{
320 struct zone *zone;
321 int cpu;
322 int threshold;
323 int i;
324
325 for (i = 0; i < pgdat->nr_zones; i++) {
326 zone = &pgdat->node_zones[i];
327 if (!zone->percpu_drift_mark)
328 continue;
329
330 threshold = (*calculate_pressure)(zone);
331 for_each_online_cpu(cpu)
332 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
333 = threshold;
334 }
335}
336
337/*
338 * For use when we know that interrupts are disabled,
339 * or when we know that preemption is disabled and that
340 * particular counter cannot be updated from interrupt context.
341 */
342void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
343 long delta)
344{
345 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
346 s8 __percpu *p = pcp->vm_stat_diff + item;
347 long x;
348 long t;
349
350 /*
351 * Accurate vmstat updates require a RMW. On !PREEMPT_RT kernels,
352 * atomicity is provided by IRQs being disabled -- either explicitly
353 * or via local_lock_irq. On PREEMPT_RT, local_lock_irq only disables
354 * CPU migrations and preemption potentially corrupts a counter so
355 * disable preemption.
356 */
357 preempt_disable_nested();
358
359 x = delta + __this_cpu_read(*p);
360
361 t = __this_cpu_read(pcp->stat_threshold);
362
363 if (unlikely(abs(x) > t)) {
364 zone_page_state_add(x, zone, item);
365 x = 0;
366 }
367 __this_cpu_write(*p, x);
368
369 preempt_enable_nested();
370}
371EXPORT_SYMBOL(__mod_zone_page_state);
372
373void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
374 long delta)
375{
376 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
377 s8 __percpu *p = pcp->vm_node_stat_diff + item;
378 long x;
379 long t;
380
381 if (vmstat_item_in_bytes(item)) {
382 /*
383 * Only cgroups use subpage accounting right now; at
384 * the global level, these items still change in
385 * multiples of whole pages. Store them as pages
386 * internally to keep the per-cpu counters compact.
387 */
388 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
389 delta >>= PAGE_SHIFT;
390 }
391
392 /* See __mod_node_page_state */
393 preempt_disable_nested();
394
395 x = delta + __this_cpu_read(*p);
396
397 t = __this_cpu_read(pcp->stat_threshold);
398
399 if (unlikely(abs(x) > t)) {
400 node_page_state_add(x, pgdat, item);
401 x = 0;
402 }
403 __this_cpu_write(*p, x);
404
405 preempt_enable_nested();
406}
407EXPORT_SYMBOL(__mod_node_page_state);
408
409/*
410 * Optimized increment and decrement functions.
411 *
412 * These are only for a single page and therefore can take a struct page *
413 * argument instead of struct zone *. This allows the inclusion of the code
414 * generated for page_zone(page) into the optimized functions.
415 *
416 * No overflow check is necessary and therefore the differential can be
417 * incremented or decremented in place which may allow the compilers to
418 * generate better code.
419 * The increment or decrement is known and therefore one boundary check can
420 * be omitted.
421 *
422 * NOTE: These functions are very performance sensitive. Change only
423 * with care.
424 *
425 * Some processors have inc/dec instructions that are atomic vs an interrupt.
426 * However, the code must first determine the differential location in a zone
427 * based on the processor number and then inc/dec the counter. There is no
428 * guarantee without disabling preemption that the processor will not change
429 * in between and therefore the atomicity vs. interrupt cannot be exploited
430 * in a useful way here.
431 */
432void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
433{
434 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
435 s8 __percpu *p = pcp->vm_stat_diff + item;
436 s8 v, t;
437
438 /* See __mod_node_page_state */
439 preempt_disable_nested();
440
441 v = __this_cpu_inc_return(*p);
442 t = __this_cpu_read(pcp->stat_threshold);
443 if (unlikely(v > t)) {
444 s8 overstep = t >> 1;
445
446 zone_page_state_add(v + overstep, zone, item);
447 __this_cpu_write(*p, -overstep);
448 }
449
450 preempt_enable_nested();
451}
452
453void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
454{
455 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
456 s8 __percpu *p = pcp->vm_node_stat_diff + item;
457 s8 v, t;
458
459 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
460
461 /* See __mod_node_page_state */
462 preempt_disable_nested();
463
464 v = __this_cpu_inc_return(*p);
465 t = __this_cpu_read(pcp->stat_threshold);
466 if (unlikely(v > t)) {
467 s8 overstep = t >> 1;
468
469 node_page_state_add(v + overstep, pgdat, item);
470 __this_cpu_write(*p, -overstep);
471 }
472
473 preempt_enable_nested();
474}
475
476void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
477{
478 __inc_zone_state(page_zone(page), item);
479}
480EXPORT_SYMBOL(__inc_zone_page_state);
481
482void __inc_node_page_state(struct page *page, enum node_stat_item item)
483{
484 __inc_node_state(page_pgdat(page), item);
485}
486EXPORT_SYMBOL(__inc_node_page_state);
487
488void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
489{
490 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
491 s8 __percpu *p = pcp->vm_stat_diff + item;
492 s8 v, t;
493
494 /* See __mod_node_page_state */
495 preempt_disable_nested();
496
497 v = __this_cpu_dec_return(*p);
498 t = __this_cpu_read(pcp->stat_threshold);
499 if (unlikely(v < - t)) {
500 s8 overstep = t >> 1;
501
502 zone_page_state_add(v - overstep, zone, item);
503 __this_cpu_write(*p, overstep);
504 }
505
506 preempt_enable_nested();
507}
508
509void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
510{
511 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
512 s8 __percpu *p = pcp->vm_node_stat_diff + item;
513 s8 v, t;
514
515 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
516
517 /* See __mod_node_page_state */
518 preempt_disable_nested();
519
520 v = __this_cpu_dec_return(*p);
521 t = __this_cpu_read(pcp->stat_threshold);
522 if (unlikely(v < - t)) {
523 s8 overstep = t >> 1;
524
525 node_page_state_add(v - overstep, pgdat, item);
526 __this_cpu_write(*p, overstep);
527 }
528
529 preempt_enable_nested();
530}
531
532void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
533{
534 __dec_zone_state(page_zone(page), item);
535}
536EXPORT_SYMBOL(__dec_zone_page_state);
537
538void __dec_node_page_state(struct page *page, enum node_stat_item item)
539{
540 __dec_node_state(page_pgdat(page), item);
541}
542EXPORT_SYMBOL(__dec_node_page_state);
543
544#ifdef CONFIG_HAVE_CMPXCHG_LOCAL
545/*
546 * If we have cmpxchg_local support then we do not need to incur the overhead
547 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
548 *
549 * mod_state() modifies the zone counter state through atomic per cpu
550 * operations.
551 *
552 * Overstep mode specifies how overstep should handled:
553 * 0 No overstepping
554 * 1 Overstepping half of threshold
555 * -1 Overstepping minus half of threshold
556*/
557static inline void mod_zone_state(struct zone *zone,
558 enum zone_stat_item item, long delta, int overstep_mode)
559{
560 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
561 s8 __percpu *p = pcp->vm_stat_diff + item;
562 long n, t, z;
563 s8 o;
564
565 o = this_cpu_read(*p);
566 do {
567 z = 0; /* overflow to zone counters */
568
569 /*
570 * The fetching of the stat_threshold is racy. We may apply
571 * a counter threshold to the wrong the cpu if we get
572 * rescheduled while executing here. However, the next
573 * counter update will apply the threshold again and
574 * therefore bring the counter under the threshold again.
575 *
576 * Most of the time the thresholds are the same anyways
577 * for all cpus in a zone.
578 */
579 t = this_cpu_read(pcp->stat_threshold);
580
581 n = delta + (long)o;
582
583 if (abs(n) > t) {
584 int os = overstep_mode * (t >> 1) ;
585
586 /* Overflow must be added to zone counters */
587 z = n + os;
588 n = -os;
589 }
590 } while (!this_cpu_try_cmpxchg(*p, &o, n));
591
592 if (z)
593 zone_page_state_add(z, zone, item);
594}
595
596void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
597 long delta)
598{
599 mod_zone_state(zone, item, delta, 0);
600}
601EXPORT_SYMBOL(mod_zone_page_state);
602
603void inc_zone_page_state(struct page *page, enum zone_stat_item item)
604{
605 mod_zone_state(page_zone(page), item, 1, 1);
606}
607EXPORT_SYMBOL(inc_zone_page_state);
608
609void dec_zone_page_state(struct page *page, enum zone_stat_item item)
610{
611 mod_zone_state(page_zone(page), item, -1, -1);
612}
613EXPORT_SYMBOL(dec_zone_page_state);
614
615static inline void mod_node_state(struct pglist_data *pgdat,
616 enum node_stat_item item, int delta, int overstep_mode)
617{
618 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
619 s8 __percpu *p = pcp->vm_node_stat_diff + item;
620 long n, t, z;
621 s8 o;
622
623 if (vmstat_item_in_bytes(item)) {
624 /*
625 * Only cgroups use subpage accounting right now; at
626 * the global level, these items still change in
627 * multiples of whole pages. Store them as pages
628 * internally to keep the per-cpu counters compact.
629 */
630 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
631 delta >>= PAGE_SHIFT;
632 }
633
634 o = this_cpu_read(*p);
635 do {
636 z = 0; /* overflow to node counters */
637
638 /*
639 * The fetching of the stat_threshold is racy. We may apply
640 * a counter threshold to the wrong the cpu if we get
641 * rescheduled while executing here. However, the next
642 * counter update will apply the threshold again and
643 * therefore bring the counter under the threshold again.
644 *
645 * Most of the time the thresholds are the same anyways
646 * for all cpus in a node.
647 */
648 t = this_cpu_read(pcp->stat_threshold);
649
650 n = delta + (long)o;
651
652 if (abs(n) > t) {
653 int os = overstep_mode * (t >> 1) ;
654
655 /* Overflow must be added to node counters */
656 z = n + os;
657 n = -os;
658 }
659 } while (!this_cpu_try_cmpxchg(*p, &o, n));
660
661 if (z)
662 node_page_state_add(z, pgdat, item);
663}
664
665void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
666 long delta)
667{
668 mod_node_state(pgdat, item, delta, 0);
669}
670EXPORT_SYMBOL(mod_node_page_state);
671
672void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
673{
674 mod_node_state(pgdat, item, 1, 1);
675}
676
677void inc_node_page_state(struct page *page, enum node_stat_item item)
678{
679 mod_node_state(page_pgdat(page), item, 1, 1);
680}
681EXPORT_SYMBOL(inc_node_page_state);
682
683void dec_node_page_state(struct page *page, enum node_stat_item item)
684{
685 mod_node_state(page_pgdat(page), item, -1, -1);
686}
687EXPORT_SYMBOL(dec_node_page_state);
688#else
689/*
690 * Use interrupt disable to serialize counter updates
691 */
692void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
693 long delta)
694{
695 unsigned long flags;
696
697 local_irq_save(flags);
698 __mod_zone_page_state(zone, item, delta);
699 local_irq_restore(flags);
700}
701EXPORT_SYMBOL(mod_zone_page_state);
702
703void inc_zone_page_state(struct page *page, enum zone_stat_item item)
704{
705 unsigned long flags;
706 struct zone *zone;
707
708 zone = page_zone(page);
709 local_irq_save(flags);
710 __inc_zone_state(zone, item);
711 local_irq_restore(flags);
712}
713EXPORT_SYMBOL(inc_zone_page_state);
714
715void dec_zone_page_state(struct page *page, enum zone_stat_item item)
716{
717 unsigned long flags;
718
719 local_irq_save(flags);
720 __dec_zone_page_state(page, item);
721 local_irq_restore(flags);
722}
723EXPORT_SYMBOL(dec_zone_page_state);
724
725void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
726{
727 unsigned long flags;
728
729 local_irq_save(flags);
730 __inc_node_state(pgdat, item);
731 local_irq_restore(flags);
732}
733EXPORT_SYMBOL(inc_node_state);
734
735void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
736 long delta)
737{
738 unsigned long flags;
739
740 local_irq_save(flags);
741 __mod_node_page_state(pgdat, item, delta);
742 local_irq_restore(flags);
743}
744EXPORT_SYMBOL(mod_node_page_state);
745
746void inc_node_page_state(struct page *page, enum node_stat_item item)
747{
748 unsigned long flags;
749 struct pglist_data *pgdat;
750
751 pgdat = page_pgdat(page);
752 local_irq_save(flags);
753 __inc_node_state(pgdat, item);
754 local_irq_restore(flags);
755}
756EXPORT_SYMBOL(inc_node_page_state);
757
758void dec_node_page_state(struct page *page, enum node_stat_item item)
759{
760 unsigned long flags;
761
762 local_irq_save(flags);
763 __dec_node_page_state(page, item);
764 local_irq_restore(flags);
765}
766EXPORT_SYMBOL(dec_node_page_state);
767#endif
768
769/*
770 * Fold a differential into the global counters.
771 * Returns the number of counters updated.
772 */
773static int fold_diff(int *zone_diff, int *node_diff)
774{
775 int i;
776 int changes = 0;
777
778 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
779 if (zone_diff[i]) {
780 atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
781 changes++;
782 }
783
784 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
785 if (node_diff[i]) {
786 atomic_long_add(node_diff[i], &vm_node_stat[i]);
787 changes++;
788 }
789 return changes;
790}
791
792/*
793 * Update the zone counters for the current cpu.
794 *
795 * Note that refresh_cpu_vm_stats strives to only access
796 * node local memory. The per cpu pagesets on remote zones are placed
797 * in the memory local to the processor using that pageset. So the
798 * loop over all zones will access a series of cachelines local to
799 * the processor.
800 *
801 * The call to zone_page_state_add updates the cachelines with the
802 * statistics in the remote zone struct as well as the global cachelines
803 * with the global counters. These could cause remote node cache line
804 * bouncing and will have to be only done when necessary.
805 *
806 * The function returns the number of global counters updated.
807 */
808static int refresh_cpu_vm_stats(bool do_pagesets)
809{
810 struct pglist_data *pgdat;
811 struct zone *zone;
812 int i;
813 int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
814 int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
815 int changes = 0;
816
817 for_each_populated_zone(zone) {
818 struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
819 struct per_cpu_pages __percpu *pcp = zone->per_cpu_pageset;
820
821 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
822 int v;
823
824 v = this_cpu_xchg(pzstats->vm_stat_diff[i], 0);
825 if (v) {
826
827 atomic_long_add(v, &zone->vm_stat[i]);
828 global_zone_diff[i] += v;
829#ifdef CONFIG_NUMA
830 /* 3 seconds idle till flush */
831 __this_cpu_write(pcp->expire, 3);
832#endif
833 }
834 }
835
836 if (do_pagesets) {
837 cond_resched();
838
839 changes += decay_pcp_high(zone, this_cpu_ptr(pcp));
840#ifdef CONFIG_NUMA
841 /*
842 * Deal with draining the remote pageset of this
843 * processor
844 *
845 * Check if there are pages remaining in this pageset
846 * if not then there is nothing to expire.
847 */
848 if (!__this_cpu_read(pcp->expire) ||
849 !__this_cpu_read(pcp->count))
850 continue;
851
852 /*
853 * We never drain zones local to this processor.
854 */
855 if (zone_to_nid(zone) == numa_node_id()) {
856 __this_cpu_write(pcp->expire, 0);
857 continue;
858 }
859
860 if (__this_cpu_dec_return(pcp->expire)) {
861 changes++;
862 continue;
863 }
864
865 if (__this_cpu_read(pcp->count)) {
866 drain_zone_pages(zone, this_cpu_ptr(pcp));
867 changes++;
868 }
869#endif
870 }
871 }
872
873 for_each_online_pgdat(pgdat) {
874 struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;
875
876 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
877 int v;
878
879 v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
880 if (v) {
881 atomic_long_add(v, &pgdat->vm_stat[i]);
882 global_node_diff[i] += v;
883 }
884 }
885 }
886
887 changes += fold_diff(global_zone_diff, global_node_diff);
888 return changes;
889}
890
891/*
892 * Fold the data for an offline cpu into the global array.
893 * There cannot be any access by the offline cpu and therefore
894 * synchronization is simplified.
895 */
896void cpu_vm_stats_fold(int cpu)
897{
898 struct pglist_data *pgdat;
899 struct zone *zone;
900 int i;
901 int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
902 int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
903
904 for_each_populated_zone(zone) {
905 struct per_cpu_zonestat *pzstats;
906
907 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
908
909 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
910 if (pzstats->vm_stat_diff[i]) {
911 int v;
912
913 v = pzstats->vm_stat_diff[i];
914 pzstats->vm_stat_diff[i] = 0;
915 atomic_long_add(v, &zone->vm_stat[i]);
916 global_zone_diff[i] += v;
917 }
918 }
919#ifdef CONFIG_NUMA
920 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) {
921 if (pzstats->vm_numa_event[i]) {
922 unsigned long v;
923
924 v = pzstats->vm_numa_event[i];
925 pzstats->vm_numa_event[i] = 0;
926 zone_numa_event_add(v, zone, i);
927 }
928 }
929#endif
930 }
931
932 for_each_online_pgdat(pgdat) {
933 struct per_cpu_nodestat *p;
934
935 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
936
937 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
938 if (p->vm_node_stat_diff[i]) {
939 int v;
940
941 v = p->vm_node_stat_diff[i];
942 p->vm_node_stat_diff[i] = 0;
943 atomic_long_add(v, &pgdat->vm_stat[i]);
944 global_node_diff[i] += v;
945 }
946 }
947
948 fold_diff(global_zone_diff, global_node_diff);
949}
950
951/*
952 * this is only called if !populated_zone(zone), which implies no other users of
953 * pset->vm_stat_diff[] exist.
954 */
955void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *pzstats)
956{
957 unsigned long v;
958 int i;
959
960 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
961 if (pzstats->vm_stat_diff[i]) {
962 v = pzstats->vm_stat_diff[i];
963 pzstats->vm_stat_diff[i] = 0;
964 zone_page_state_add(v, zone, i);
965 }
966 }
967
968#ifdef CONFIG_NUMA
969 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) {
970 if (pzstats->vm_numa_event[i]) {
971 v = pzstats->vm_numa_event[i];
972 pzstats->vm_numa_event[i] = 0;
973 zone_numa_event_add(v, zone, i);
974 }
975 }
976#endif
977}
978#endif
979
980#ifdef CONFIG_NUMA
981/*
982 * Determine the per node value of a stat item. This function
983 * is called frequently in a NUMA machine, so try to be as
984 * frugal as possible.
985 */
986unsigned long sum_zone_node_page_state(int node,
987 enum zone_stat_item item)
988{
989 struct zone *zones = NODE_DATA(node)->node_zones;
990 int i;
991 unsigned long count = 0;
992
993 for (i = 0; i < MAX_NR_ZONES; i++)
994 count += zone_page_state(zones + i, item);
995
996 return count;
997}
998
999/* Determine the per node value of a numa stat item. */
1000unsigned long sum_zone_numa_event_state(int node,
1001 enum numa_stat_item item)
1002{
1003 struct zone *zones = NODE_DATA(node)->node_zones;
1004 unsigned long count = 0;
1005 int i;
1006
1007 for (i = 0; i < MAX_NR_ZONES; i++)
1008 count += zone_numa_event_state(zones + i, item);
1009
1010 return count;
1011}
1012
1013/*
1014 * Determine the per node value of a stat item.
1015 */
1016unsigned long node_page_state_pages(struct pglist_data *pgdat,
1017 enum node_stat_item item)
1018{
1019 long x = atomic_long_read(&pgdat->vm_stat[item]);
1020#ifdef CONFIG_SMP
1021 if (x < 0)
1022 x = 0;
1023#endif
1024 return x;
1025}
1026
1027unsigned long node_page_state(struct pglist_data *pgdat,
1028 enum node_stat_item item)
1029{
1030 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
1031
1032 return node_page_state_pages(pgdat, item);
1033}
1034#endif
1035
1036#ifdef CONFIG_COMPACTION
1037
1038struct contig_page_info {
1039 unsigned long free_pages;
1040 unsigned long free_blocks_total;
1041 unsigned long free_blocks_suitable;
1042};
1043
1044/*
1045 * Calculate the number of free pages in a zone, how many contiguous
1046 * pages are free and how many are large enough to satisfy an allocation of
1047 * the target size. Note that this function makes no attempt to estimate
1048 * how many suitable free blocks there *might* be if MOVABLE pages were
1049 * migrated. Calculating that is possible, but expensive and can be
1050 * figured out from userspace
1051 */
1052static void fill_contig_page_info(struct zone *zone,
1053 unsigned int suitable_order,
1054 struct contig_page_info *info)
1055{
1056 unsigned int order;
1057
1058 info->free_pages = 0;
1059 info->free_blocks_total = 0;
1060 info->free_blocks_suitable = 0;
1061
1062 for (order = 0; order < NR_PAGE_ORDERS; order++) {
1063 unsigned long blocks;
1064
1065 /*
1066 * Count number of free blocks.
1067 *
1068 * Access to nr_free is lockless as nr_free is used only for
1069 * diagnostic purposes. Use data_race to avoid KCSAN warning.
1070 */
1071 blocks = data_race(zone->free_area[order].nr_free);
1072 info->free_blocks_total += blocks;
1073
1074 /* Count free base pages */
1075 info->free_pages += blocks << order;
1076
1077 /* Count the suitable free blocks */
1078 if (order >= suitable_order)
1079 info->free_blocks_suitable += blocks <<
1080 (order - suitable_order);
1081 }
1082}
1083
1084/*
1085 * A fragmentation index only makes sense if an allocation of a requested
1086 * size would fail. If that is true, the fragmentation index indicates
1087 * whether external fragmentation or a lack of memory was the problem.
1088 * The value can be used to determine if page reclaim or compaction
1089 * should be used
1090 */
1091static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
1092{
1093 unsigned long requested = 1UL << order;
1094
1095 if (WARN_ON_ONCE(order > MAX_PAGE_ORDER))
1096 return 0;
1097
1098 if (!info->free_blocks_total)
1099 return 0;
1100
1101 /* Fragmentation index only makes sense when a request would fail */
1102 if (info->free_blocks_suitable)
1103 return -1000;
1104
1105 /*
1106 * Index is between 0 and 1 so return within 3 decimal places
1107 *
1108 * 0 => allocation would fail due to lack of memory
1109 * 1 => allocation would fail due to fragmentation
1110 */
1111 return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
1112}
1113
1114/*
1115 * Calculates external fragmentation within a zone wrt the given order.
1116 * It is defined as the percentage of pages found in blocks of size
1117 * less than 1 << order. It returns values in range [0, 100].
1118 */
1119unsigned int extfrag_for_order(struct zone *zone, unsigned int order)
1120{
1121 struct contig_page_info info;
1122
1123 fill_contig_page_info(zone, order, &info);
1124 if (info.free_pages == 0)
1125 return 0;
1126
1127 return div_u64((info.free_pages -
1128 (info.free_blocks_suitable << order)) * 100,
1129 info.free_pages);
1130}
1131
1132/* Same as __fragmentation index but allocs contig_page_info on stack */
1133int fragmentation_index(struct zone *zone, unsigned int order)
1134{
1135 struct contig_page_info info;
1136
1137 fill_contig_page_info(zone, order, &info);
1138 return __fragmentation_index(order, &info);
1139}
1140#endif
1141
1142#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || \
1143 defined(CONFIG_NUMA) || defined(CONFIG_MEMCG)
1144#ifdef CONFIG_ZONE_DMA
1145#define TEXT_FOR_DMA(xx) xx "_dma",
1146#else
1147#define TEXT_FOR_DMA(xx)
1148#endif
1149
1150#ifdef CONFIG_ZONE_DMA32
1151#define TEXT_FOR_DMA32(xx) xx "_dma32",
1152#else
1153#define TEXT_FOR_DMA32(xx)
1154#endif
1155
1156#ifdef CONFIG_HIGHMEM
1157#define TEXT_FOR_HIGHMEM(xx) xx "_high",
1158#else
1159#define TEXT_FOR_HIGHMEM(xx)
1160#endif
1161
1162#ifdef CONFIG_ZONE_DEVICE
1163#define TEXT_FOR_DEVICE(xx) xx "_device",
1164#else
1165#define TEXT_FOR_DEVICE(xx)
1166#endif
1167
1168#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
1169 TEXT_FOR_HIGHMEM(xx) xx "_movable", \
1170 TEXT_FOR_DEVICE(xx)
1171
1172const char * const vmstat_text[] = {
1173 /* enum zone_stat_item counters */
1174 "nr_free_pages",
1175 "nr_zone_inactive_anon",
1176 "nr_zone_active_anon",
1177 "nr_zone_inactive_file",
1178 "nr_zone_active_file",
1179 "nr_zone_unevictable",
1180 "nr_zone_write_pending",
1181 "nr_mlock",
1182 "nr_bounce",
1183#if IS_ENABLED(CONFIG_ZSMALLOC)
1184 "nr_zspages",
1185#endif
1186 "nr_free_cma",
1187#ifdef CONFIG_UNACCEPTED_MEMORY
1188 "nr_unaccepted",
1189#endif
1190
1191 /* enum numa_stat_item counters */
1192#ifdef CONFIG_NUMA
1193 "numa_hit",
1194 "numa_miss",
1195 "numa_foreign",
1196 "numa_interleave",
1197 "numa_local",
1198 "numa_other",
1199#endif
1200
1201 /* enum node_stat_item counters */
1202 "nr_inactive_anon",
1203 "nr_active_anon",
1204 "nr_inactive_file",
1205 "nr_active_file",
1206 "nr_unevictable",
1207 "nr_slab_reclaimable",
1208 "nr_slab_unreclaimable",
1209 "nr_isolated_anon",
1210 "nr_isolated_file",
1211 "workingset_nodes",
1212 "workingset_refault_anon",
1213 "workingset_refault_file",
1214 "workingset_activate_anon",
1215 "workingset_activate_file",
1216 "workingset_restore_anon",
1217 "workingset_restore_file",
1218 "workingset_nodereclaim",
1219 "nr_anon_pages",
1220 "nr_mapped",
1221 "nr_file_pages",
1222 "nr_dirty",
1223 "nr_writeback",
1224 "nr_writeback_temp",
1225 "nr_shmem",
1226 "nr_shmem_hugepages",
1227 "nr_shmem_pmdmapped",
1228 "nr_file_hugepages",
1229 "nr_file_pmdmapped",
1230 "nr_anon_transparent_hugepages",
1231 "nr_vmscan_write",
1232 "nr_vmscan_immediate_reclaim",
1233 "nr_dirtied",
1234 "nr_written",
1235 "nr_throttled_written",
1236 "nr_kernel_misc_reclaimable",
1237 "nr_foll_pin_acquired",
1238 "nr_foll_pin_released",
1239 "nr_kernel_stack",
1240#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
1241 "nr_shadow_call_stack",
1242#endif
1243 "nr_page_table_pages",
1244 "nr_sec_page_table_pages",
1245#ifdef CONFIG_SWAP
1246 "nr_swapcached",
1247#endif
1248#ifdef CONFIG_NUMA_BALANCING
1249 "pgpromote_success",
1250 "pgpromote_candidate",
1251#endif
1252 "pgdemote_kswapd",
1253 "pgdemote_direct",
1254 "pgdemote_khugepaged",
1255
1256 /* enum writeback_stat_item counters */
1257 "nr_dirty_threshold",
1258 "nr_dirty_background_threshold",
1259
1260#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
1261 /* enum vm_event_item counters */
1262 "pgpgin",
1263 "pgpgout",
1264 "pswpin",
1265 "pswpout",
1266
1267 TEXTS_FOR_ZONES("pgalloc")
1268 TEXTS_FOR_ZONES("allocstall")
1269 TEXTS_FOR_ZONES("pgskip")
1270
1271 "pgfree",
1272 "pgactivate",
1273 "pgdeactivate",
1274 "pglazyfree",
1275
1276 "pgfault",
1277 "pgmajfault",
1278 "pglazyfreed",
1279
1280 "pgrefill",
1281 "pgreuse",
1282 "pgsteal_kswapd",
1283 "pgsteal_direct",
1284 "pgsteal_khugepaged",
1285 "pgscan_kswapd",
1286 "pgscan_direct",
1287 "pgscan_khugepaged",
1288 "pgscan_direct_throttle",
1289 "pgscan_anon",
1290 "pgscan_file",
1291 "pgsteal_anon",
1292 "pgsteal_file",
1293
1294#ifdef CONFIG_NUMA
1295 "zone_reclaim_failed",
1296#endif
1297 "pginodesteal",
1298 "slabs_scanned",
1299 "kswapd_inodesteal",
1300 "kswapd_low_wmark_hit_quickly",
1301 "kswapd_high_wmark_hit_quickly",
1302 "pageoutrun",
1303
1304 "pgrotated",
1305
1306 "drop_pagecache",
1307 "drop_slab",
1308 "oom_kill",
1309
1310#ifdef CONFIG_NUMA_BALANCING
1311 "numa_pte_updates",
1312 "numa_huge_pte_updates",
1313 "numa_hint_faults",
1314 "numa_hint_faults_local",
1315 "numa_pages_migrated",
1316#endif
1317#ifdef CONFIG_MIGRATION
1318 "pgmigrate_success",
1319 "pgmigrate_fail",
1320 "thp_migration_success",
1321 "thp_migration_fail",
1322 "thp_migration_split",
1323#endif
1324#ifdef CONFIG_COMPACTION
1325 "compact_migrate_scanned",
1326 "compact_free_scanned",
1327 "compact_isolated",
1328 "compact_stall",
1329 "compact_fail",
1330 "compact_success",
1331 "compact_daemon_wake",
1332 "compact_daemon_migrate_scanned",
1333 "compact_daemon_free_scanned",
1334#endif
1335
1336#ifdef CONFIG_HUGETLB_PAGE
1337 "htlb_buddy_alloc_success",
1338 "htlb_buddy_alloc_fail",
1339#endif
1340#ifdef CONFIG_CMA
1341 "cma_alloc_success",
1342 "cma_alloc_fail",
1343#endif
1344 "unevictable_pgs_culled",
1345 "unevictable_pgs_scanned",
1346 "unevictable_pgs_rescued",
1347 "unevictable_pgs_mlocked",
1348 "unevictable_pgs_munlocked",
1349 "unevictable_pgs_cleared",
1350 "unevictable_pgs_stranded",
1351
1352#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1353 "thp_fault_alloc",
1354 "thp_fault_fallback",
1355 "thp_fault_fallback_charge",
1356 "thp_collapse_alloc",
1357 "thp_collapse_alloc_failed",
1358 "thp_file_alloc",
1359 "thp_file_fallback",
1360 "thp_file_fallback_charge",
1361 "thp_file_mapped",
1362 "thp_split_page",
1363 "thp_split_page_failed",
1364 "thp_deferred_split_page",
1365 "thp_split_pmd",
1366 "thp_scan_exceed_none_pte",
1367 "thp_scan_exceed_swap_pte",
1368 "thp_scan_exceed_share_pte",
1369#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1370 "thp_split_pud",
1371#endif
1372 "thp_zero_page_alloc",
1373 "thp_zero_page_alloc_failed",
1374 "thp_swpout",
1375 "thp_swpout_fallback",
1376#endif
1377#ifdef CONFIG_MEMORY_BALLOON
1378 "balloon_inflate",
1379 "balloon_deflate",
1380#ifdef CONFIG_BALLOON_COMPACTION
1381 "balloon_migrate",
1382#endif
1383#endif /* CONFIG_MEMORY_BALLOON */
1384#ifdef CONFIG_DEBUG_TLBFLUSH
1385 "nr_tlb_remote_flush",
1386 "nr_tlb_remote_flush_received",
1387 "nr_tlb_local_flush_all",
1388 "nr_tlb_local_flush_one",
1389#endif /* CONFIG_DEBUG_TLBFLUSH */
1390
1391#ifdef CONFIG_SWAP
1392 "swap_ra",
1393 "swap_ra_hit",
1394#ifdef CONFIG_KSM
1395 "ksm_swpin_copy",
1396#endif
1397#endif
1398#ifdef CONFIG_KSM
1399 "cow_ksm",
1400#endif
1401#ifdef CONFIG_ZSWAP
1402 "zswpin",
1403 "zswpout",
1404 "zswpwb",
1405#endif
1406#ifdef CONFIG_X86
1407 "direct_map_level2_splits",
1408 "direct_map_level3_splits",
1409#endif
1410#ifdef CONFIG_PER_VMA_LOCK_STATS
1411 "vma_lock_success",
1412 "vma_lock_abort",
1413 "vma_lock_retry",
1414 "vma_lock_miss",
1415#endif
1416#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
1417};
1418#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */
1419
1420#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
1421 defined(CONFIG_PROC_FS)
1422static void *frag_start(struct seq_file *m, loff_t *pos)
1423{
1424 pg_data_t *pgdat;
1425 loff_t node = *pos;
1426
1427 for (pgdat = first_online_pgdat();
1428 pgdat && node;
1429 pgdat = next_online_pgdat(pgdat))
1430 --node;
1431
1432 return pgdat;
1433}
1434
1435static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
1436{
1437 pg_data_t *pgdat = (pg_data_t *)arg;
1438
1439 (*pos)++;
1440 return next_online_pgdat(pgdat);
1441}
1442
1443static void frag_stop(struct seq_file *m, void *arg)
1444{
1445}
1446
1447/*
1448 * Walk zones in a node and print using a callback.
1449 * If @assert_populated is true, only use callback for zones that are populated.
1450 */
1451static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
1452 bool assert_populated, bool nolock,
1453 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
1454{
1455 struct zone *zone;
1456 struct zone *node_zones = pgdat->node_zones;
1457 unsigned long flags;
1458
1459 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
1460 if (assert_populated && !populated_zone(zone))
1461 continue;
1462
1463 if (!nolock)
1464 spin_lock_irqsave(&zone->lock, flags);
1465 print(m, pgdat, zone);
1466 if (!nolock)
1467 spin_unlock_irqrestore(&zone->lock, flags);
1468 }
1469}
1470#endif
1471
1472#ifdef CONFIG_PROC_FS
1473static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
1474 struct zone *zone)
1475{
1476 int order;
1477
1478 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1479 for (order = 0; order < NR_PAGE_ORDERS; ++order)
1480 /*
1481 * Access to nr_free is lockless as nr_free is used only for
1482 * printing purposes. Use data_race to avoid KCSAN warning.
1483 */
1484 seq_printf(m, "%6lu ", data_race(zone->free_area[order].nr_free));
1485 seq_putc(m, '\n');
1486}
1487
1488/*
1489 * This walks the free areas for each zone.
1490 */
1491static int frag_show(struct seq_file *m, void *arg)
1492{
1493 pg_data_t *pgdat = (pg_data_t *)arg;
1494 walk_zones_in_node(m, pgdat, true, false, frag_show_print);
1495 return 0;
1496}
1497
1498static void pagetypeinfo_showfree_print(struct seq_file *m,
1499 pg_data_t *pgdat, struct zone *zone)
1500{
1501 int order, mtype;
1502
1503 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
1504 seq_printf(m, "Node %4d, zone %8s, type %12s ",
1505 pgdat->node_id,
1506 zone->name,
1507 migratetype_names[mtype]);
1508 for (order = 0; order < NR_PAGE_ORDERS; ++order) {
1509 unsigned long freecount = 0;
1510 struct free_area *area;
1511 struct list_head *curr;
1512 bool overflow = false;
1513
1514 area = &(zone->free_area[order]);
1515
1516 list_for_each(curr, &area->free_list[mtype]) {
1517 /*
1518 * Cap the free_list iteration because it might
1519 * be really large and we are under a spinlock
1520 * so a long time spent here could trigger a
1521 * hard lockup detector. Anyway this is a
1522 * debugging tool so knowing there is a handful
1523 * of pages of this order should be more than
1524 * sufficient.
1525 */
1526 if (++freecount >= 100000) {
1527 overflow = true;
1528 break;
1529 }
1530 }
1531 seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount);
1532 spin_unlock_irq(&zone->lock);
1533 cond_resched();
1534 spin_lock_irq(&zone->lock);
1535 }
1536 seq_putc(m, '\n');
1537 }
1538}
1539
1540/* Print out the free pages at each order for each migatetype */
1541static void pagetypeinfo_showfree(struct seq_file *m, void *arg)
1542{
1543 int order;
1544 pg_data_t *pgdat = (pg_data_t *)arg;
1545
1546 /* Print header */
1547 seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
1548 for (order = 0; order < NR_PAGE_ORDERS; ++order)
1549 seq_printf(m, "%6d ", order);
1550 seq_putc(m, '\n');
1551
1552 walk_zones_in_node(m, pgdat, true, false, pagetypeinfo_showfree_print);
1553}
1554
1555static void pagetypeinfo_showblockcount_print(struct seq_file *m,
1556 pg_data_t *pgdat, struct zone *zone)
1557{
1558 int mtype;
1559 unsigned long pfn;
1560 unsigned long start_pfn = zone->zone_start_pfn;
1561 unsigned long end_pfn = zone_end_pfn(zone);
1562 unsigned long count[MIGRATE_TYPES] = { 0, };
1563
1564 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
1565 struct page *page;
1566
1567 page = pfn_to_online_page(pfn);
1568 if (!page)
1569 continue;
1570
1571 if (page_zone(page) != zone)
1572 continue;
1573
1574 mtype = get_pageblock_migratetype(page);
1575
1576 if (mtype < MIGRATE_TYPES)
1577 count[mtype]++;
1578 }
1579
1580 /* Print counts */
1581 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1582 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1583 seq_printf(m, "%12lu ", count[mtype]);
1584 seq_putc(m, '\n');
1585}
1586
1587/* Print out the number of pageblocks for each migratetype */
1588static void pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1589{
1590 int mtype;
1591 pg_data_t *pgdat = (pg_data_t *)arg;
1592
1593 seq_printf(m, "\n%-23s", "Number of blocks type ");
1594 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1595 seq_printf(m, "%12s ", migratetype_names[mtype]);
1596 seq_putc(m, '\n');
1597 walk_zones_in_node(m, pgdat, true, false,
1598 pagetypeinfo_showblockcount_print);
1599}
1600
1601/*
1602 * Print out the number of pageblocks for each migratetype that contain pages
1603 * of other types. This gives an indication of how well fallbacks are being
1604 * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1605 * to determine what is going on
1606 */
1607static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1608{
1609#ifdef CONFIG_PAGE_OWNER
1610 int mtype;
1611
1612 if (!static_branch_unlikely(&page_owner_inited))
1613 return;
1614
1615 drain_all_pages(NULL);
1616
1617 seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1618 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1619 seq_printf(m, "%12s ", migratetype_names[mtype]);
1620 seq_putc(m, '\n');
1621
1622 walk_zones_in_node(m, pgdat, true, true,
1623 pagetypeinfo_showmixedcount_print);
1624#endif /* CONFIG_PAGE_OWNER */
1625}
1626
1627/*
1628 * This prints out statistics in relation to grouping pages by mobility.
1629 * It is expensive to collect so do not constantly read the file.
1630 */
1631static int pagetypeinfo_show(struct seq_file *m, void *arg)
1632{
1633 pg_data_t *pgdat = (pg_data_t *)arg;
1634
1635 /* check memoryless node */
1636 if (!node_state(pgdat->node_id, N_MEMORY))
1637 return 0;
1638
1639 seq_printf(m, "Page block order: %d\n", pageblock_order);
1640 seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages);
1641 seq_putc(m, '\n');
1642 pagetypeinfo_showfree(m, pgdat);
1643 pagetypeinfo_showblockcount(m, pgdat);
1644 pagetypeinfo_showmixedcount(m, pgdat);
1645
1646 return 0;
1647}
1648
1649static const struct seq_operations fragmentation_op = {
1650 .start = frag_start,
1651 .next = frag_next,
1652 .stop = frag_stop,
1653 .show = frag_show,
1654};
1655
1656static const struct seq_operations pagetypeinfo_op = {
1657 .start = frag_start,
1658 .next = frag_next,
1659 .stop = frag_stop,
1660 .show = pagetypeinfo_show,
1661};
1662
1663static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
1664{
1665 int zid;
1666
1667 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1668 struct zone *compare = &pgdat->node_zones[zid];
1669
1670 if (populated_zone(compare))
1671 return zone == compare;
1672 }
1673
1674 return false;
1675}
1676
1677static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1678 struct zone *zone)
1679{
1680 int i;
1681 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1682 if (is_zone_first_populated(pgdat, zone)) {
1683 seq_printf(m, "\n per-node stats");
1684 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1685 unsigned long pages = node_page_state_pages(pgdat, i);
1686
1687 if (vmstat_item_print_in_thp(i))
1688 pages /= HPAGE_PMD_NR;
1689 seq_printf(m, "\n %-12s %lu", node_stat_name(i),
1690 pages);
1691 }
1692 }
1693 seq_printf(m,
1694 "\n pages free %lu"
1695 "\n boost %lu"
1696 "\n min %lu"
1697 "\n low %lu"
1698 "\n high %lu"
1699 "\n spanned %lu"
1700 "\n present %lu"
1701 "\n managed %lu"
1702 "\n cma %lu",
1703 zone_page_state(zone, NR_FREE_PAGES),
1704 zone->watermark_boost,
1705 min_wmark_pages(zone),
1706 low_wmark_pages(zone),
1707 high_wmark_pages(zone),
1708 zone->spanned_pages,
1709 zone->present_pages,
1710 zone_managed_pages(zone),
1711 zone_cma_pages(zone));
1712
1713 seq_printf(m,
1714 "\n protection: (%ld",
1715 zone->lowmem_reserve[0]);
1716 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1717 seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1718 seq_putc(m, ')');
1719
1720 /* If unpopulated, no other information is useful */
1721 if (!populated_zone(zone)) {
1722 seq_putc(m, '\n');
1723 return;
1724 }
1725
1726 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1727 seq_printf(m, "\n %-12s %lu", zone_stat_name(i),
1728 zone_page_state(zone, i));
1729
1730#ifdef CONFIG_NUMA
1731 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
1732 seq_printf(m, "\n %-12s %lu", numa_stat_name(i),
1733 zone_numa_event_state(zone, i));
1734#endif
1735
1736 seq_printf(m, "\n pagesets");
1737 for_each_online_cpu(i) {
1738 struct per_cpu_pages *pcp;
1739 struct per_cpu_zonestat __maybe_unused *pzstats;
1740
1741 pcp = per_cpu_ptr(zone->per_cpu_pageset, i);
1742 seq_printf(m,
1743 "\n cpu: %i"
1744 "\n count: %i"
1745 "\n high: %i"
1746 "\n batch: %i",
1747 i,
1748 pcp->count,
1749 pcp->high,
1750 pcp->batch);
1751#ifdef CONFIG_SMP
1752 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, i);
1753 seq_printf(m, "\n vm stats threshold: %d",
1754 pzstats->stat_threshold);
1755#endif
1756 }
1757 seq_printf(m,
1758 "\n node_unreclaimable: %u"
1759 "\n start_pfn: %lu",
1760 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES,
1761 zone->zone_start_pfn);
1762 seq_putc(m, '\n');
1763}
1764
1765/*
1766 * Output information about zones in @pgdat. All zones are printed regardless
1767 * of whether they are populated or not: lowmem_reserve_ratio operates on the
1768 * set of all zones and userspace would not be aware of such zones if they are
1769 * suppressed here (zoneinfo displays the effect of lowmem_reserve_ratio).
1770 */
1771static int zoneinfo_show(struct seq_file *m, void *arg)
1772{
1773 pg_data_t *pgdat = (pg_data_t *)arg;
1774 walk_zones_in_node(m, pgdat, false, false, zoneinfo_show_print);
1775 return 0;
1776}
1777
1778static const struct seq_operations zoneinfo_op = {
1779 .start = frag_start, /* iterate over all zones. The same as in
1780 * fragmentation. */
1781 .next = frag_next,
1782 .stop = frag_stop,
1783 .show = zoneinfo_show,
1784};
1785
1786#define NR_VMSTAT_ITEMS (NR_VM_ZONE_STAT_ITEMS + \
1787 NR_VM_NUMA_EVENT_ITEMS + \
1788 NR_VM_NODE_STAT_ITEMS + \
1789 NR_VM_WRITEBACK_STAT_ITEMS + \
1790 (IS_ENABLED(CONFIG_VM_EVENT_COUNTERS) ? \
1791 NR_VM_EVENT_ITEMS : 0))
1792
1793static void *vmstat_start(struct seq_file *m, loff_t *pos)
1794{
1795 unsigned long *v;
1796 int i;
1797
1798 if (*pos >= NR_VMSTAT_ITEMS)
1799 return NULL;
1800
1801 BUILD_BUG_ON(ARRAY_SIZE(vmstat_text) < NR_VMSTAT_ITEMS);
1802 fold_vm_numa_events();
1803 v = kmalloc_array(NR_VMSTAT_ITEMS, sizeof(unsigned long), GFP_KERNEL);
1804 m->private = v;
1805 if (!v)
1806 return ERR_PTR(-ENOMEM);
1807 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1808 v[i] = global_zone_page_state(i);
1809 v += NR_VM_ZONE_STAT_ITEMS;
1810
1811#ifdef CONFIG_NUMA
1812 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
1813 v[i] = global_numa_event_state(i);
1814 v += NR_VM_NUMA_EVENT_ITEMS;
1815#endif
1816
1817 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1818 v[i] = global_node_page_state_pages(i);
1819 if (vmstat_item_print_in_thp(i))
1820 v[i] /= HPAGE_PMD_NR;
1821 }
1822 v += NR_VM_NODE_STAT_ITEMS;
1823
1824 global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1825 v + NR_DIRTY_THRESHOLD);
1826 v += NR_VM_WRITEBACK_STAT_ITEMS;
1827
1828#ifdef CONFIG_VM_EVENT_COUNTERS
1829 all_vm_events(v);
1830 v[PGPGIN] /= 2; /* sectors -> kbytes */
1831 v[PGPGOUT] /= 2;
1832#endif
1833 return (unsigned long *)m->private + *pos;
1834}
1835
1836static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1837{
1838 (*pos)++;
1839 if (*pos >= NR_VMSTAT_ITEMS)
1840 return NULL;
1841 return (unsigned long *)m->private + *pos;
1842}
1843
1844static int vmstat_show(struct seq_file *m, void *arg)
1845{
1846 unsigned long *l = arg;
1847 unsigned long off = l - (unsigned long *)m->private;
1848
1849 seq_puts(m, vmstat_text[off]);
1850 seq_put_decimal_ull(m, " ", *l);
1851 seq_putc(m, '\n');
1852
1853 if (off == NR_VMSTAT_ITEMS - 1) {
1854 /*
1855 * We've come to the end - add any deprecated counters to avoid
1856 * breaking userspace which might depend on them being present.
1857 */
1858 seq_puts(m, "nr_unstable 0\n");
1859 }
1860 return 0;
1861}
1862
1863static void vmstat_stop(struct seq_file *m, void *arg)
1864{
1865 kfree(m->private);
1866 m->private = NULL;
1867}
1868
1869static const struct seq_operations vmstat_op = {
1870 .start = vmstat_start,
1871 .next = vmstat_next,
1872 .stop = vmstat_stop,
1873 .show = vmstat_show,
1874};
1875#endif /* CONFIG_PROC_FS */
1876
1877#ifdef CONFIG_SMP
1878static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1879int sysctl_stat_interval __read_mostly = HZ;
1880
1881#ifdef CONFIG_PROC_FS
1882static void refresh_vm_stats(struct work_struct *work)
1883{
1884 refresh_cpu_vm_stats(true);
1885}
1886
1887int vmstat_refresh(struct ctl_table *table, int write,
1888 void *buffer, size_t *lenp, loff_t *ppos)
1889{
1890 long val;
1891 int err;
1892 int i;
1893
1894 /*
1895 * The regular update, every sysctl_stat_interval, may come later
1896 * than expected: leaving a significant amount in per_cpu buckets.
1897 * This is particularly misleading when checking a quantity of HUGE
1898 * pages, immediately after running a test. /proc/sys/vm/stat_refresh,
1899 * which can equally be echo'ed to or cat'ted from (by root),
1900 * can be used to update the stats just before reading them.
1901 *
1902 * Oh, and since global_zone_page_state() etc. are so careful to hide
1903 * transiently negative values, report an error here if any of
1904 * the stats is negative, so we know to go looking for imbalance.
1905 */
1906 err = schedule_on_each_cpu(refresh_vm_stats);
1907 if (err)
1908 return err;
1909 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
1910 /*
1911 * Skip checking stats known to go negative occasionally.
1912 */
1913 switch (i) {
1914 case NR_ZONE_WRITE_PENDING:
1915 case NR_FREE_CMA_PAGES:
1916 continue;
1917 }
1918 val = atomic_long_read(&vm_zone_stat[i]);
1919 if (val < 0) {
1920 pr_warn("%s: %s %ld\n",
1921 __func__, zone_stat_name(i), val);
1922 }
1923 }
1924 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1925 /*
1926 * Skip checking stats known to go negative occasionally.
1927 */
1928 switch (i) {
1929 case NR_WRITEBACK:
1930 continue;
1931 }
1932 val = atomic_long_read(&vm_node_stat[i]);
1933 if (val < 0) {
1934 pr_warn("%s: %s %ld\n",
1935 __func__, node_stat_name(i), val);
1936 }
1937 }
1938 if (write)
1939 *ppos += *lenp;
1940 else
1941 *lenp = 0;
1942 return 0;
1943}
1944#endif /* CONFIG_PROC_FS */
1945
1946static void vmstat_update(struct work_struct *w)
1947{
1948 if (refresh_cpu_vm_stats(true)) {
1949 /*
1950 * Counters were updated so we expect more updates
1951 * to occur in the future. Keep on running the
1952 * update worker thread.
1953 */
1954 queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
1955 this_cpu_ptr(&vmstat_work),
1956 round_jiffies_relative(sysctl_stat_interval));
1957 }
1958}
1959
1960/*
1961 * Check if the diffs for a certain cpu indicate that
1962 * an update is needed.
1963 */
1964static bool need_update(int cpu)
1965{
1966 pg_data_t *last_pgdat = NULL;
1967 struct zone *zone;
1968
1969 for_each_populated_zone(zone) {
1970 struct per_cpu_zonestat *pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
1971 struct per_cpu_nodestat *n;
1972
1973 /*
1974 * The fast way of checking if there are any vmstat diffs.
1975 */
1976 if (memchr_inv(pzstats->vm_stat_diff, 0, sizeof(pzstats->vm_stat_diff)))
1977 return true;
1978
1979 if (last_pgdat == zone->zone_pgdat)
1980 continue;
1981 last_pgdat = zone->zone_pgdat;
1982 n = per_cpu_ptr(zone->zone_pgdat->per_cpu_nodestats, cpu);
1983 if (memchr_inv(n->vm_node_stat_diff, 0, sizeof(n->vm_node_stat_diff)))
1984 return true;
1985 }
1986 return false;
1987}
1988
1989/*
1990 * Switch off vmstat processing and then fold all the remaining differentials
1991 * until the diffs stay at zero. The function is used by NOHZ and can only be
1992 * invoked when tick processing is not active.
1993 */
1994void quiet_vmstat(void)
1995{
1996 if (system_state != SYSTEM_RUNNING)
1997 return;
1998
1999 if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
2000 return;
2001
2002 if (!need_update(smp_processor_id()))
2003 return;
2004
2005 /*
2006 * Just refresh counters and do not care about the pending delayed
2007 * vmstat_update. It doesn't fire that often to matter and canceling
2008 * it would be too expensive from this path.
2009 * vmstat_shepherd will take care about that for us.
2010 */
2011 refresh_cpu_vm_stats(false);
2012}
2013
2014/*
2015 * Shepherd worker thread that checks the
2016 * differentials of processors that have their worker
2017 * threads for vm statistics updates disabled because of
2018 * inactivity.
2019 */
2020static void vmstat_shepherd(struct work_struct *w);
2021
2022static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
2023
2024static void vmstat_shepherd(struct work_struct *w)
2025{
2026 int cpu;
2027
2028 cpus_read_lock();
2029 /* Check processors whose vmstat worker threads have been disabled */
2030 for_each_online_cpu(cpu) {
2031 struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
2032
2033 /*
2034 * In kernel users of vmstat counters either require the precise value and
2035 * they are using zone_page_state_snapshot interface or they can live with
2036 * an imprecision as the regular flushing can happen at arbitrary time and
2037 * cumulative error can grow (see calculate_normal_threshold).
2038 *
2039 * From that POV the regular flushing can be postponed for CPUs that have
2040 * been isolated from the kernel interference without critical
2041 * infrastructure ever noticing. Skip regular flushing from vmstat_shepherd
2042 * for all isolated CPUs to avoid interference with the isolated workload.
2043 */
2044 if (cpu_is_isolated(cpu))
2045 continue;
2046
2047 if (!delayed_work_pending(dw) && need_update(cpu))
2048 queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
2049
2050 cond_resched();
2051 }
2052 cpus_read_unlock();
2053
2054 schedule_delayed_work(&shepherd,
2055 round_jiffies_relative(sysctl_stat_interval));
2056}
2057
2058static void __init start_shepherd_timer(void)
2059{
2060 int cpu;
2061
2062 for_each_possible_cpu(cpu)
2063 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
2064 vmstat_update);
2065
2066 schedule_delayed_work(&shepherd,
2067 round_jiffies_relative(sysctl_stat_interval));
2068}
2069
2070static void __init init_cpu_node_state(void)
2071{
2072 int node;
2073
2074 for_each_online_node(node) {
2075 if (!cpumask_empty(cpumask_of_node(node)))
2076 node_set_state(node, N_CPU);
2077 }
2078}
2079
2080static int vmstat_cpu_online(unsigned int cpu)
2081{
2082 refresh_zone_stat_thresholds();
2083
2084 if (!node_state(cpu_to_node(cpu), N_CPU)) {
2085 node_set_state(cpu_to_node(cpu), N_CPU);
2086 }
2087
2088 return 0;
2089}
2090
2091static int vmstat_cpu_down_prep(unsigned int cpu)
2092{
2093 cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
2094 return 0;
2095}
2096
2097static int vmstat_cpu_dead(unsigned int cpu)
2098{
2099 const struct cpumask *node_cpus;
2100 int node;
2101
2102 node = cpu_to_node(cpu);
2103
2104 refresh_zone_stat_thresholds();
2105 node_cpus = cpumask_of_node(node);
2106 if (!cpumask_empty(node_cpus))
2107 return 0;
2108
2109 node_clear_state(node, N_CPU);
2110
2111 return 0;
2112}
2113
2114#endif
2115
2116struct workqueue_struct *mm_percpu_wq;
2117
2118void __init init_mm_internals(void)
2119{
2120 int ret __maybe_unused;
2121
2122 mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0);
2123
2124#ifdef CONFIG_SMP
2125 ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
2126 NULL, vmstat_cpu_dead);
2127 if (ret < 0)
2128 pr_err("vmstat: failed to register 'dead' hotplug state\n");
2129
2130 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online",
2131 vmstat_cpu_online,
2132 vmstat_cpu_down_prep);
2133 if (ret < 0)
2134 pr_err("vmstat: failed to register 'online' hotplug state\n");
2135
2136 cpus_read_lock();
2137 init_cpu_node_state();
2138 cpus_read_unlock();
2139
2140 start_shepherd_timer();
2141#endif
2142#ifdef CONFIG_PROC_FS
2143 proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op);
2144 proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op);
2145 proc_create_seq("vmstat", 0444, NULL, &vmstat_op);
2146 proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op);
2147#endif
2148}
2149
2150#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
2151
2152/*
2153 * Return an index indicating how much of the available free memory is
2154 * unusable for an allocation of the requested size.
2155 */
2156static int unusable_free_index(unsigned int order,
2157 struct contig_page_info *info)
2158{
2159 /* No free memory is interpreted as all free memory is unusable */
2160 if (info->free_pages == 0)
2161 return 1000;
2162
2163 /*
2164 * Index should be a value between 0 and 1. Return a value to 3
2165 * decimal places.
2166 *
2167 * 0 => no fragmentation
2168 * 1 => high fragmentation
2169 */
2170 return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
2171
2172}
2173
2174static void unusable_show_print(struct seq_file *m,
2175 pg_data_t *pgdat, struct zone *zone)
2176{
2177 unsigned int order;
2178 int index;
2179 struct contig_page_info info;
2180
2181 seq_printf(m, "Node %d, zone %8s ",
2182 pgdat->node_id,
2183 zone->name);
2184 for (order = 0; order < NR_PAGE_ORDERS; ++order) {
2185 fill_contig_page_info(zone, order, &info);
2186 index = unusable_free_index(order, &info);
2187 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2188 }
2189
2190 seq_putc(m, '\n');
2191}
2192
2193/*
2194 * Display unusable free space index
2195 *
2196 * The unusable free space index measures how much of the available free
2197 * memory cannot be used to satisfy an allocation of a given size and is a
2198 * value between 0 and 1. The higher the value, the more of free memory is
2199 * unusable and by implication, the worse the external fragmentation is. This
2200 * can be expressed as a percentage by multiplying by 100.
2201 */
2202static int unusable_show(struct seq_file *m, void *arg)
2203{
2204 pg_data_t *pgdat = (pg_data_t *)arg;
2205
2206 /* check memoryless node */
2207 if (!node_state(pgdat->node_id, N_MEMORY))
2208 return 0;
2209
2210 walk_zones_in_node(m, pgdat, true, false, unusable_show_print);
2211
2212 return 0;
2213}
2214
2215static const struct seq_operations unusable_sops = {
2216 .start = frag_start,
2217 .next = frag_next,
2218 .stop = frag_stop,
2219 .show = unusable_show,
2220};
2221
2222DEFINE_SEQ_ATTRIBUTE(unusable);
2223
2224static void extfrag_show_print(struct seq_file *m,
2225 pg_data_t *pgdat, struct zone *zone)
2226{
2227 unsigned int order;
2228 int index;
2229
2230 /* Alloc on stack as interrupts are disabled for zone walk */
2231 struct contig_page_info info;
2232
2233 seq_printf(m, "Node %d, zone %8s ",
2234 pgdat->node_id,
2235 zone->name);
2236 for (order = 0; order < NR_PAGE_ORDERS; ++order) {
2237 fill_contig_page_info(zone, order, &info);
2238 index = __fragmentation_index(order, &info);
2239 seq_printf(m, "%2d.%03d ", index / 1000, index % 1000);
2240 }
2241
2242 seq_putc(m, '\n');
2243}
2244
2245/*
2246 * Display fragmentation index for orders that allocations would fail for
2247 */
2248static int extfrag_show(struct seq_file *m, void *arg)
2249{
2250 pg_data_t *pgdat = (pg_data_t *)arg;
2251
2252 walk_zones_in_node(m, pgdat, true, false, extfrag_show_print);
2253
2254 return 0;
2255}
2256
2257static const struct seq_operations extfrag_sops = {
2258 .start = frag_start,
2259 .next = frag_next,
2260 .stop = frag_stop,
2261 .show = extfrag_show,
2262};
2263
2264DEFINE_SEQ_ATTRIBUTE(extfrag);
2265
2266static int __init extfrag_debug_init(void)
2267{
2268 struct dentry *extfrag_debug_root;
2269
2270 extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
2271
2272 debugfs_create_file("unusable_index", 0444, extfrag_debug_root, NULL,
2273 &unusable_fops);
2274
2275 debugfs_create_file("extfrag_index", 0444, extfrag_debug_root, NULL,
2276 &extfrag_fops);
2277
2278 return 0;
2279}
2280
2281module_init(extfrag_debug_init);
2282#endif