Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/mm/vmstat.c
4 *
5 * Manages VM statistics
6 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
7 *
8 * zoned VM statistics
9 * Copyright (C) 2006 Silicon Graphics, Inc.,
10 * Christoph Lameter <christoph@lameter.com>
11 * Copyright (C) 2008-2014 Christoph Lameter
12 */
13#include <linux/fs.h>
14#include <linux/mm.h>
15#include <linux/err.h>
16#include <linux/module.h>
17#include <linux/slab.h>
18#include <linux/cpu.h>
19#include <linux/cpumask.h>
20#include <linux/vmstat.h>
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
23#include <linux/debugfs.h>
24#include <linux/sched.h>
25#include <linux/math64.h>
26#include <linux/writeback.h>
27#include <linux/compaction.h>
28#include <linux/mm_inline.h>
29#include <linux/page_owner.h>
30#include <linux/sched/isolation.h>
31
32#include "internal.h"
33
34#ifdef CONFIG_NUMA
35int sysctl_vm_numa_stat = ENABLE_NUMA_STAT;
36
37/* zero numa counters within a zone */
38static void zero_zone_numa_counters(struct zone *zone)
39{
40 int item, cpu;
41
42 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++) {
43 atomic_long_set(&zone->vm_numa_event[item], 0);
44 for_each_online_cpu(cpu) {
45 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_numa_event[item]
46 = 0;
47 }
48 }
49}
50
51/* zero numa counters of all the populated zones */
52static void zero_zones_numa_counters(void)
53{
54 struct zone *zone;
55
56 for_each_populated_zone(zone)
57 zero_zone_numa_counters(zone);
58}
59
60/* zero global numa counters */
61static void zero_global_numa_counters(void)
62{
63 int item;
64
65 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
66 atomic_long_set(&vm_numa_event[item], 0);
67}
68
69static void invalid_numa_statistics(void)
70{
71 zero_zones_numa_counters();
72 zero_global_numa_counters();
73}
74
75static DEFINE_MUTEX(vm_numa_stat_lock);
76
77int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
78 void *buffer, size_t *length, loff_t *ppos)
79{
80 int ret, oldval;
81
82 mutex_lock(&vm_numa_stat_lock);
83 if (write)
84 oldval = sysctl_vm_numa_stat;
85 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
86 if (ret || !write)
87 goto out;
88
89 if (oldval == sysctl_vm_numa_stat)
90 goto out;
91 else if (sysctl_vm_numa_stat == ENABLE_NUMA_STAT) {
92 static_branch_enable(&vm_numa_stat_key);
93 pr_info("enable numa statistics\n");
94 } else {
95 static_branch_disable(&vm_numa_stat_key);
96 invalid_numa_statistics();
97 pr_info("disable numa statistics, and clear numa counters\n");
98 }
99
100out:
101 mutex_unlock(&vm_numa_stat_lock);
102 return ret;
103}
104#endif
105
106#ifdef CONFIG_VM_EVENT_COUNTERS
107DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
108EXPORT_PER_CPU_SYMBOL(vm_event_states);
109
110static void sum_vm_events(unsigned long *ret)
111{
112 int cpu;
113 int i;
114
115 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
116
117 for_each_online_cpu(cpu) {
118 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
119
120 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
121 ret[i] += this->event[i];
122 }
123}
124
125/*
126 * Accumulate the vm event counters across all CPUs.
127 * The result is unavoidably approximate - it can change
128 * during and after execution of this function.
129*/
130void all_vm_events(unsigned long *ret)
131{
132 cpus_read_lock();
133 sum_vm_events(ret);
134 cpus_read_unlock();
135}
136EXPORT_SYMBOL_GPL(all_vm_events);
137
138/*
139 * Fold the foreign cpu events into our own.
140 *
141 * This is adding to the events on one processor
142 * but keeps the global counts constant.
143 */
144void vm_events_fold_cpu(int cpu)
145{
146 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
147 int i;
148
149 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
150 count_vm_events(i, fold_state->event[i]);
151 fold_state->event[i] = 0;
152 }
153}
154
155#endif /* CONFIG_VM_EVENT_COUNTERS */
156
157/*
158 * Manage combined zone based / global counters
159 *
160 * vm_stat contains the global counters
161 */
162atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
163atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp;
164atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS] __cacheline_aligned_in_smp;
165EXPORT_SYMBOL(vm_zone_stat);
166EXPORT_SYMBOL(vm_node_stat);
167
168#ifdef CONFIG_NUMA
169static void fold_vm_zone_numa_events(struct zone *zone)
170{
171 unsigned long zone_numa_events[NR_VM_NUMA_EVENT_ITEMS] = { 0, };
172 int cpu;
173 enum numa_stat_item item;
174
175 for_each_online_cpu(cpu) {
176 struct per_cpu_zonestat *pzstats;
177
178 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
179 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
180 zone_numa_events[item] += xchg(&pzstats->vm_numa_event[item], 0);
181 }
182
183 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
184 zone_numa_event_add(zone_numa_events[item], zone, item);
185}
186
187void fold_vm_numa_events(void)
188{
189 struct zone *zone;
190
191 for_each_populated_zone(zone)
192 fold_vm_zone_numa_events(zone);
193}
194#endif
195
196#ifdef CONFIG_SMP
197
198int calculate_pressure_threshold(struct zone *zone)
199{
200 int threshold;
201 int watermark_distance;
202
203 /*
204 * As vmstats are not up to date, there is drift between the estimated
205 * and real values. For high thresholds and a high number of CPUs, it
206 * is possible for the min watermark to be breached while the estimated
207 * value looks fine. The pressure threshold is a reduced value such
208 * that even the maximum amount of drift will not accidentally breach
209 * the min watermark
210 */
211 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
212 threshold = max(1, (int)(watermark_distance / num_online_cpus()));
213
214 /*
215 * Maximum threshold is 125
216 */
217 threshold = min(125, threshold);
218
219 return threshold;
220}
221
222int calculate_normal_threshold(struct zone *zone)
223{
224 int threshold;
225 int mem; /* memory in 128 MB units */
226
227 /*
228 * The threshold scales with the number of processors and the amount
229 * of memory per zone. More memory means that we can defer updates for
230 * longer, more processors could lead to more contention.
231 * fls() is used to have a cheap way of logarithmic scaling.
232 *
233 * Some sample thresholds:
234 *
235 * Threshold Processors (fls) Zonesize fls(mem)+1
236 * ------------------------------------------------------------------
237 * 8 1 1 0.9-1 GB 4
238 * 16 2 2 0.9-1 GB 4
239 * 20 2 2 1-2 GB 5
240 * 24 2 2 2-4 GB 6
241 * 28 2 2 4-8 GB 7
242 * 32 2 2 8-16 GB 8
243 * 4 2 2 <128M 1
244 * 30 4 3 2-4 GB 5
245 * 48 4 3 8-16 GB 8
246 * 32 8 4 1-2 GB 4
247 * 32 8 4 0.9-1GB 4
248 * 10 16 5 <128M 1
249 * 40 16 5 900M 4
250 * 70 64 7 2-4 GB 5
251 * 84 64 7 4-8 GB 6
252 * 108 512 9 4-8 GB 6
253 * 125 1024 10 8-16 GB 8
254 * 125 1024 10 16-32 GB 9
255 */
256
257 mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT);
258
259 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
260
261 /*
262 * Maximum threshold is 125
263 */
264 threshold = min(125, threshold);
265
266 return threshold;
267}
268
269/*
270 * Refresh the thresholds for each zone.
271 */
272void refresh_zone_stat_thresholds(void)
273{
274 struct pglist_data *pgdat;
275 struct zone *zone;
276 int cpu;
277 int threshold;
278
279 /* Zero current pgdat thresholds */
280 for_each_online_pgdat(pgdat) {
281 for_each_online_cpu(cpu) {
282 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0;
283 }
284 }
285
286 for_each_populated_zone(zone) {
287 struct pglist_data *pgdat = zone->zone_pgdat;
288 unsigned long max_drift, tolerate_drift;
289
290 threshold = calculate_normal_threshold(zone);
291
292 for_each_online_cpu(cpu) {
293 int pgdat_threshold;
294
295 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
296 = threshold;
297
298 /* Base nodestat threshold on the largest populated zone. */
299 pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold;
300 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold
301 = max(threshold, pgdat_threshold);
302 }
303
304 /*
305 * Only set percpu_drift_mark if there is a danger that
306 * NR_FREE_PAGES reports the low watermark is ok when in fact
307 * the min watermark could be breached by an allocation
308 */
309 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
310 max_drift = num_online_cpus() * threshold;
311 if (max_drift > tolerate_drift)
312 zone->percpu_drift_mark = high_wmark_pages(zone) +
313 max_drift;
314 }
315}
316
317void set_pgdat_percpu_threshold(pg_data_t *pgdat,
318 int (*calculate_pressure)(struct zone *))
319{
320 struct zone *zone;
321 int cpu;
322 int threshold;
323 int i;
324
325 for (i = 0; i < pgdat->nr_zones; i++) {
326 zone = &pgdat->node_zones[i];
327 if (!zone->percpu_drift_mark)
328 continue;
329
330 threshold = (*calculate_pressure)(zone);
331 for_each_online_cpu(cpu)
332 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
333 = threshold;
334 }
335}
336
337/*
338 * For use when we know that interrupts are disabled,
339 * or when we know that preemption is disabled and that
340 * particular counter cannot be updated from interrupt context.
341 */
342void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
343 long delta)
344{
345 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
346 s8 __percpu *p = pcp->vm_stat_diff + item;
347 long x;
348 long t;
349
350 /*
351 * Accurate vmstat updates require a RMW. On !PREEMPT_RT kernels,
352 * atomicity is provided by IRQs being disabled -- either explicitly
353 * or via local_lock_irq. On PREEMPT_RT, local_lock_irq only disables
354 * CPU migrations and preemption potentially corrupts a counter so
355 * disable preemption.
356 */
357 preempt_disable_nested();
358
359 x = delta + __this_cpu_read(*p);
360
361 t = __this_cpu_read(pcp->stat_threshold);
362
363 if (unlikely(abs(x) > t)) {
364 zone_page_state_add(x, zone, item);
365 x = 0;
366 }
367 __this_cpu_write(*p, x);
368
369 preempt_enable_nested();
370}
371EXPORT_SYMBOL(__mod_zone_page_state);
372
373void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
374 long delta)
375{
376 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
377 s8 __percpu *p = pcp->vm_node_stat_diff + item;
378 long x;
379 long t;
380
381 if (vmstat_item_in_bytes(item)) {
382 /*
383 * Only cgroups use subpage accounting right now; at
384 * the global level, these items still change in
385 * multiples of whole pages. Store them as pages
386 * internally to keep the per-cpu counters compact.
387 */
388 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
389 delta >>= PAGE_SHIFT;
390 }
391
392 /* See __mod_node_page_state */
393 preempt_disable_nested();
394
395 x = delta + __this_cpu_read(*p);
396
397 t = __this_cpu_read(pcp->stat_threshold);
398
399 if (unlikely(abs(x) > t)) {
400 node_page_state_add(x, pgdat, item);
401 x = 0;
402 }
403 __this_cpu_write(*p, x);
404
405 preempt_enable_nested();
406}
407EXPORT_SYMBOL(__mod_node_page_state);
408
409/*
410 * Optimized increment and decrement functions.
411 *
412 * These are only for a single page and therefore can take a struct page *
413 * argument instead of struct zone *. This allows the inclusion of the code
414 * generated for page_zone(page) into the optimized functions.
415 *
416 * No overflow check is necessary and therefore the differential can be
417 * incremented or decremented in place which may allow the compilers to
418 * generate better code.
419 * The increment or decrement is known and therefore one boundary check can
420 * be omitted.
421 *
422 * NOTE: These functions are very performance sensitive. Change only
423 * with care.
424 *
425 * Some processors have inc/dec instructions that are atomic vs an interrupt.
426 * However, the code must first determine the differential location in a zone
427 * based on the processor number and then inc/dec the counter. There is no
428 * guarantee without disabling preemption that the processor will not change
429 * in between and therefore the atomicity vs. interrupt cannot be exploited
430 * in a useful way here.
431 */
432void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
433{
434 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
435 s8 __percpu *p = pcp->vm_stat_diff + item;
436 s8 v, t;
437
438 /* See __mod_node_page_state */
439 preempt_disable_nested();
440
441 v = __this_cpu_inc_return(*p);
442 t = __this_cpu_read(pcp->stat_threshold);
443 if (unlikely(v > t)) {
444 s8 overstep = t >> 1;
445
446 zone_page_state_add(v + overstep, zone, item);
447 __this_cpu_write(*p, -overstep);
448 }
449
450 preempt_enable_nested();
451}
452
453void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
454{
455 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
456 s8 __percpu *p = pcp->vm_node_stat_diff + item;
457 s8 v, t;
458
459 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
460
461 /* See __mod_node_page_state */
462 preempt_disable_nested();
463
464 v = __this_cpu_inc_return(*p);
465 t = __this_cpu_read(pcp->stat_threshold);
466 if (unlikely(v > t)) {
467 s8 overstep = t >> 1;
468
469 node_page_state_add(v + overstep, pgdat, item);
470 __this_cpu_write(*p, -overstep);
471 }
472
473 preempt_enable_nested();
474}
475
476void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
477{
478 __inc_zone_state(page_zone(page), item);
479}
480EXPORT_SYMBOL(__inc_zone_page_state);
481
482void __inc_node_page_state(struct page *page, enum node_stat_item item)
483{
484 __inc_node_state(page_pgdat(page), item);
485}
486EXPORT_SYMBOL(__inc_node_page_state);
487
488void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
489{
490 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
491 s8 __percpu *p = pcp->vm_stat_diff + item;
492 s8 v, t;
493
494 /* See __mod_node_page_state */
495 preempt_disable_nested();
496
497 v = __this_cpu_dec_return(*p);
498 t = __this_cpu_read(pcp->stat_threshold);
499 if (unlikely(v < - t)) {
500 s8 overstep = t >> 1;
501
502 zone_page_state_add(v - overstep, zone, item);
503 __this_cpu_write(*p, overstep);
504 }
505
506 preempt_enable_nested();
507}
508
509void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
510{
511 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
512 s8 __percpu *p = pcp->vm_node_stat_diff + item;
513 s8 v, t;
514
515 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
516
517 /* See __mod_node_page_state */
518 preempt_disable_nested();
519
520 v = __this_cpu_dec_return(*p);
521 t = __this_cpu_read(pcp->stat_threshold);
522 if (unlikely(v < - t)) {
523 s8 overstep = t >> 1;
524
525 node_page_state_add(v - overstep, pgdat, item);
526 __this_cpu_write(*p, overstep);
527 }
528
529 preempt_enable_nested();
530}
531
532void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
533{
534 __dec_zone_state(page_zone(page), item);
535}
536EXPORT_SYMBOL(__dec_zone_page_state);
537
538void __dec_node_page_state(struct page *page, enum node_stat_item item)
539{
540 __dec_node_state(page_pgdat(page), item);
541}
542EXPORT_SYMBOL(__dec_node_page_state);
543
544#ifdef CONFIG_HAVE_CMPXCHG_LOCAL
545/*
546 * If we have cmpxchg_local support then we do not need to incur the overhead
547 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
548 *
549 * mod_state() modifies the zone counter state through atomic per cpu
550 * operations.
551 *
552 * Overstep mode specifies how overstep should handled:
553 * 0 No overstepping
554 * 1 Overstepping half of threshold
555 * -1 Overstepping minus half of threshold
556*/
557static inline void mod_zone_state(struct zone *zone,
558 enum zone_stat_item item, long delta, int overstep_mode)
559{
560 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
561 s8 __percpu *p = pcp->vm_stat_diff + item;
562 long n, t, z;
563 s8 o;
564
565 o = this_cpu_read(*p);
566 do {
567 z = 0; /* overflow to zone counters */
568
569 /*
570 * The fetching of the stat_threshold is racy. We may apply
571 * a counter threshold to the wrong the cpu if we get
572 * rescheduled while executing here. However, the next
573 * counter update will apply the threshold again and
574 * therefore bring the counter under the threshold again.
575 *
576 * Most of the time the thresholds are the same anyways
577 * for all cpus in a zone.
578 */
579 t = this_cpu_read(pcp->stat_threshold);
580
581 n = delta + (long)o;
582
583 if (abs(n) > t) {
584 int os = overstep_mode * (t >> 1) ;
585
586 /* Overflow must be added to zone counters */
587 z = n + os;
588 n = -os;
589 }
590 } while (!this_cpu_try_cmpxchg(*p, &o, n));
591
592 if (z)
593 zone_page_state_add(z, zone, item);
594}
595
596void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
597 long delta)
598{
599 mod_zone_state(zone, item, delta, 0);
600}
601EXPORT_SYMBOL(mod_zone_page_state);
602
603void inc_zone_page_state(struct page *page, enum zone_stat_item item)
604{
605 mod_zone_state(page_zone(page), item, 1, 1);
606}
607EXPORT_SYMBOL(inc_zone_page_state);
608
609void dec_zone_page_state(struct page *page, enum zone_stat_item item)
610{
611 mod_zone_state(page_zone(page), item, -1, -1);
612}
613EXPORT_SYMBOL(dec_zone_page_state);
614
615static inline void mod_node_state(struct pglist_data *pgdat,
616 enum node_stat_item item, int delta, int overstep_mode)
617{
618 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
619 s8 __percpu *p = pcp->vm_node_stat_diff + item;
620 long n, t, z;
621 s8 o;
622
623 if (vmstat_item_in_bytes(item)) {
624 /*
625 * Only cgroups use subpage accounting right now; at
626 * the global level, these items still change in
627 * multiples of whole pages. Store them as pages
628 * internally to keep the per-cpu counters compact.
629 */
630 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
631 delta >>= PAGE_SHIFT;
632 }
633
634 o = this_cpu_read(*p);
635 do {
636 z = 0; /* overflow to node counters */
637
638 /*
639 * The fetching of the stat_threshold is racy. We may apply
640 * a counter threshold to the wrong the cpu if we get
641 * rescheduled while executing here. However, the next
642 * counter update will apply the threshold again and
643 * therefore bring the counter under the threshold again.
644 *
645 * Most of the time the thresholds are the same anyways
646 * for all cpus in a node.
647 */
648 t = this_cpu_read(pcp->stat_threshold);
649
650 n = delta + (long)o;
651
652 if (abs(n) > t) {
653 int os = overstep_mode * (t >> 1) ;
654
655 /* Overflow must be added to node counters */
656 z = n + os;
657 n = -os;
658 }
659 } while (!this_cpu_try_cmpxchg(*p, &o, n));
660
661 if (z)
662 node_page_state_add(z, pgdat, item);
663}
664
665void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
666 long delta)
667{
668 mod_node_state(pgdat, item, delta, 0);
669}
670EXPORT_SYMBOL(mod_node_page_state);
671
672void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
673{
674 mod_node_state(pgdat, item, 1, 1);
675}
676
677void inc_node_page_state(struct page *page, enum node_stat_item item)
678{
679 mod_node_state(page_pgdat(page), item, 1, 1);
680}
681EXPORT_SYMBOL(inc_node_page_state);
682
683void dec_node_page_state(struct page *page, enum node_stat_item item)
684{
685 mod_node_state(page_pgdat(page), item, -1, -1);
686}
687EXPORT_SYMBOL(dec_node_page_state);
688#else
689/*
690 * Use interrupt disable to serialize counter updates
691 */
692void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
693 long delta)
694{
695 unsigned long flags;
696
697 local_irq_save(flags);
698 __mod_zone_page_state(zone, item, delta);
699 local_irq_restore(flags);
700}
701EXPORT_SYMBOL(mod_zone_page_state);
702
703void inc_zone_page_state(struct page *page, enum zone_stat_item item)
704{
705 unsigned long flags;
706 struct zone *zone;
707
708 zone = page_zone(page);
709 local_irq_save(flags);
710 __inc_zone_state(zone, item);
711 local_irq_restore(flags);
712}
713EXPORT_SYMBOL(inc_zone_page_state);
714
715void dec_zone_page_state(struct page *page, enum zone_stat_item item)
716{
717 unsigned long flags;
718
719 local_irq_save(flags);
720 __dec_zone_page_state(page, item);
721 local_irq_restore(flags);
722}
723EXPORT_SYMBOL(dec_zone_page_state);
724
725void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
726{
727 unsigned long flags;
728
729 local_irq_save(flags);
730 __inc_node_state(pgdat, item);
731 local_irq_restore(flags);
732}
733EXPORT_SYMBOL(inc_node_state);
734
735void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
736 long delta)
737{
738 unsigned long flags;
739
740 local_irq_save(flags);
741 __mod_node_page_state(pgdat, item, delta);
742 local_irq_restore(flags);
743}
744EXPORT_SYMBOL(mod_node_page_state);
745
746void inc_node_page_state(struct page *page, enum node_stat_item item)
747{
748 unsigned long flags;
749 struct pglist_data *pgdat;
750
751 pgdat = page_pgdat(page);
752 local_irq_save(flags);
753 __inc_node_state(pgdat, item);
754 local_irq_restore(flags);
755}
756EXPORT_SYMBOL(inc_node_page_state);
757
758void dec_node_page_state(struct page *page, enum node_stat_item item)
759{
760 unsigned long flags;
761
762 local_irq_save(flags);
763 __dec_node_page_state(page, item);
764 local_irq_restore(flags);
765}
766EXPORT_SYMBOL(dec_node_page_state);
767#endif
768
769/*
770 * Fold a differential into the global counters.
771 * Returns the number of counters updated.
772 */
773static int fold_diff(int *zone_diff, int *node_diff)
774{
775 int i;
776 int changes = 0;
777
778 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
779 if (zone_diff[i]) {
780 atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
781 changes++;
782 }
783
784 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
785 if (node_diff[i]) {
786 atomic_long_add(node_diff[i], &vm_node_stat[i]);
787 changes++;
788 }
789 return changes;
790}
791
792/*
793 * Update the zone counters for the current cpu.
794 *
795 * Note that refresh_cpu_vm_stats strives to only access
796 * node local memory. The per cpu pagesets on remote zones are placed
797 * in the memory local to the processor using that pageset. So the
798 * loop over all zones will access a series of cachelines local to
799 * the processor.
800 *
801 * The call to zone_page_state_add updates the cachelines with the
802 * statistics in the remote zone struct as well as the global cachelines
803 * with the global counters. These could cause remote node cache line
804 * bouncing and will have to be only done when necessary.
805 *
806 * The function returns the number of global counters updated.
807 */
808static int refresh_cpu_vm_stats(bool do_pagesets)
809{
810 struct pglist_data *pgdat;
811 struct zone *zone;
812 int i;
813 int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
814 int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
815 int changes = 0;
816
817 for_each_populated_zone(zone) {
818 struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
819 struct per_cpu_pages __percpu *pcp = zone->per_cpu_pageset;
820
821 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
822 int v;
823
824 v = this_cpu_xchg(pzstats->vm_stat_diff[i], 0);
825 if (v) {
826
827 atomic_long_add(v, &zone->vm_stat[i]);
828 global_zone_diff[i] += v;
829#ifdef CONFIG_NUMA
830 /* 3 seconds idle till flush */
831 __this_cpu_write(pcp->expire, 3);
832#endif
833 }
834 }
835
836 if (do_pagesets) {
837 cond_resched();
838
839 changes += decay_pcp_high(zone, this_cpu_ptr(pcp));
840#ifdef CONFIG_NUMA
841 /*
842 * Deal with draining the remote pageset of this
843 * processor
844 *
845 * Check if there are pages remaining in this pageset
846 * if not then there is nothing to expire.
847 */
848 if (!__this_cpu_read(pcp->expire) ||
849 !__this_cpu_read(pcp->count))
850 continue;
851
852 /*
853 * We never drain zones local to this processor.
854 */
855 if (zone_to_nid(zone) == numa_node_id()) {
856 __this_cpu_write(pcp->expire, 0);
857 continue;
858 }
859
860 if (__this_cpu_dec_return(pcp->expire)) {
861 changes++;
862 continue;
863 }
864
865 if (__this_cpu_read(pcp->count)) {
866 drain_zone_pages(zone, this_cpu_ptr(pcp));
867 changes++;
868 }
869#endif
870 }
871 }
872
873 for_each_online_pgdat(pgdat) {
874 struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;
875
876 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
877 int v;
878
879 v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
880 if (v) {
881 atomic_long_add(v, &pgdat->vm_stat[i]);
882 global_node_diff[i] += v;
883 }
884 }
885 }
886
887 changes += fold_diff(global_zone_diff, global_node_diff);
888 return changes;
889}
890
891/*
892 * Fold the data for an offline cpu into the global array.
893 * There cannot be any access by the offline cpu and therefore
894 * synchronization is simplified.
895 */
896void cpu_vm_stats_fold(int cpu)
897{
898 struct pglist_data *pgdat;
899 struct zone *zone;
900 int i;
901 int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
902 int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
903
904 for_each_populated_zone(zone) {
905 struct per_cpu_zonestat *pzstats;
906
907 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
908
909 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
910 if (pzstats->vm_stat_diff[i]) {
911 int v;
912
913 v = pzstats->vm_stat_diff[i];
914 pzstats->vm_stat_diff[i] = 0;
915 atomic_long_add(v, &zone->vm_stat[i]);
916 global_zone_diff[i] += v;
917 }
918 }
919#ifdef CONFIG_NUMA
920 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) {
921 if (pzstats->vm_numa_event[i]) {
922 unsigned long v;
923
924 v = pzstats->vm_numa_event[i];
925 pzstats->vm_numa_event[i] = 0;
926 zone_numa_event_add(v, zone, i);
927 }
928 }
929#endif
930 }
931
932 for_each_online_pgdat(pgdat) {
933 struct per_cpu_nodestat *p;
934
935 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
936
937 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
938 if (p->vm_node_stat_diff[i]) {
939 int v;
940
941 v = p->vm_node_stat_diff[i];
942 p->vm_node_stat_diff[i] = 0;
943 atomic_long_add(v, &pgdat->vm_stat[i]);
944 global_node_diff[i] += v;
945 }
946 }
947
948 fold_diff(global_zone_diff, global_node_diff);
949}
950
951/*
952 * this is only called if !populated_zone(zone), which implies no other users of
953 * pset->vm_stat_diff[] exist.
954 */
955void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *pzstats)
956{
957 unsigned long v;
958 int i;
959
960 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
961 if (pzstats->vm_stat_diff[i]) {
962 v = pzstats->vm_stat_diff[i];
963 pzstats->vm_stat_diff[i] = 0;
964 zone_page_state_add(v, zone, i);
965 }
966 }
967
968#ifdef CONFIG_NUMA
969 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) {
970 if (pzstats->vm_numa_event[i]) {
971 v = pzstats->vm_numa_event[i];
972 pzstats->vm_numa_event[i] = 0;
973 zone_numa_event_add(v, zone, i);
974 }
975 }
976#endif
977}
978#endif
979
980#ifdef CONFIG_NUMA
981/*
982 * Determine the per node value of a stat item. This function
983 * is called frequently in a NUMA machine, so try to be as
984 * frugal as possible.
985 */
986unsigned long sum_zone_node_page_state(int node,
987 enum zone_stat_item item)
988{
989 struct zone *zones = NODE_DATA(node)->node_zones;
990 int i;
991 unsigned long count = 0;
992
993 for (i = 0; i < MAX_NR_ZONES; i++)
994 count += zone_page_state(zones + i, item);
995
996 return count;
997}
998
999/* Determine the per node value of a numa stat item. */
1000unsigned long sum_zone_numa_event_state(int node,
1001 enum numa_stat_item item)
1002{
1003 struct zone *zones = NODE_DATA(node)->node_zones;
1004 unsigned long count = 0;
1005 int i;
1006
1007 for (i = 0; i < MAX_NR_ZONES; i++)
1008 count += zone_numa_event_state(zones + i, item);
1009
1010 return count;
1011}
1012
1013/*
1014 * Determine the per node value of a stat item.
1015 */
1016unsigned long node_page_state_pages(struct pglist_data *pgdat,
1017 enum node_stat_item item)
1018{
1019 long x = atomic_long_read(&pgdat->vm_stat[item]);
1020#ifdef CONFIG_SMP
1021 if (x < 0)
1022 x = 0;
1023#endif
1024 return x;
1025}
1026
1027unsigned long node_page_state(struct pglist_data *pgdat,
1028 enum node_stat_item item)
1029{
1030 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
1031
1032 return node_page_state_pages(pgdat, item);
1033}
1034#endif
1035
1036#ifdef CONFIG_COMPACTION
1037
1038struct contig_page_info {
1039 unsigned long free_pages;
1040 unsigned long free_blocks_total;
1041 unsigned long free_blocks_suitable;
1042};
1043
1044/*
1045 * Calculate the number of free pages in a zone, how many contiguous
1046 * pages are free and how many are large enough to satisfy an allocation of
1047 * the target size. Note that this function makes no attempt to estimate
1048 * how many suitable free blocks there *might* be if MOVABLE pages were
1049 * migrated. Calculating that is possible, but expensive and can be
1050 * figured out from userspace
1051 */
1052static void fill_contig_page_info(struct zone *zone,
1053 unsigned int suitable_order,
1054 struct contig_page_info *info)
1055{
1056 unsigned int order;
1057
1058 info->free_pages = 0;
1059 info->free_blocks_total = 0;
1060 info->free_blocks_suitable = 0;
1061
1062 for (order = 0; order < NR_PAGE_ORDERS; order++) {
1063 unsigned long blocks;
1064
1065 /*
1066 * Count number of free blocks.
1067 *
1068 * Access to nr_free is lockless as nr_free is used only for
1069 * diagnostic purposes. Use data_race to avoid KCSAN warning.
1070 */
1071 blocks = data_race(zone->free_area[order].nr_free);
1072 info->free_blocks_total += blocks;
1073
1074 /* Count free base pages */
1075 info->free_pages += blocks << order;
1076
1077 /* Count the suitable free blocks */
1078 if (order >= suitable_order)
1079 info->free_blocks_suitable += blocks <<
1080 (order - suitable_order);
1081 }
1082}
1083
1084/*
1085 * A fragmentation index only makes sense if an allocation of a requested
1086 * size would fail. If that is true, the fragmentation index indicates
1087 * whether external fragmentation or a lack of memory was the problem.
1088 * The value can be used to determine if page reclaim or compaction
1089 * should be used
1090 */
1091static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
1092{
1093 unsigned long requested = 1UL << order;
1094
1095 if (WARN_ON_ONCE(order > MAX_PAGE_ORDER))
1096 return 0;
1097
1098 if (!info->free_blocks_total)
1099 return 0;
1100
1101 /* Fragmentation index only makes sense when a request would fail */
1102 if (info->free_blocks_suitable)
1103 return -1000;
1104
1105 /*
1106 * Index is between 0 and 1 so return within 3 decimal places
1107 *
1108 * 0 => allocation would fail due to lack of memory
1109 * 1 => allocation would fail due to fragmentation
1110 */
1111 return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
1112}
1113
1114/*
1115 * Calculates external fragmentation within a zone wrt the given order.
1116 * It is defined as the percentage of pages found in blocks of size
1117 * less than 1 << order. It returns values in range [0, 100].
1118 */
1119unsigned int extfrag_for_order(struct zone *zone, unsigned int order)
1120{
1121 struct contig_page_info info;
1122
1123 fill_contig_page_info(zone, order, &info);
1124 if (info.free_pages == 0)
1125 return 0;
1126
1127 return div_u64((info.free_pages -
1128 (info.free_blocks_suitable << order)) * 100,
1129 info.free_pages);
1130}
1131
1132/* Same as __fragmentation index but allocs contig_page_info on stack */
1133int fragmentation_index(struct zone *zone, unsigned int order)
1134{
1135 struct contig_page_info info;
1136
1137 fill_contig_page_info(zone, order, &info);
1138 return __fragmentation_index(order, &info);
1139}
1140#endif
1141
1142#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || \
1143 defined(CONFIG_NUMA) || defined(CONFIG_MEMCG)
1144#ifdef CONFIG_ZONE_DMA
1145#define TEXT_FOR_DMA(xx) xx "_dma",
1146#else
1147#define TEXT_FOR_DMA(xx)
1148#endif
1149
1150#ifdef CONFIG_ZONE_DMA32
1151#define TEXT_FOR_DMA32(xx) xx "_dma32",
1152#else
1153#define TEXT_FOR_DMA32(xx)
1154#endif
1155
1156#ifdef CONFIG_HIGHMEM
1157#define TEXT_FOR_HIGHMEM(xx) xx "_high",
1158#else
1159#define TEXT_FOR_HIGHMEM(xx)
1160#endif
1161
1162#ifdef CONFIG_ZONE_DEVICE
1163#define TEXT_FOR_DEVICE(xx) xx "_device",
1164#else
1165#define TEXT_FOR_DEVICE(xx)
1166#endif
1167
1168#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
1169 TEXT_FOR_HIGHMEM(xx) xx "_movable", \
1170 TEXT_FOR_DEVICE(xx)
1171
1172const char * const vmstat_text[] = {
1173 /* enum zone_stat_item counters */
1174 "nr_free_pages",
1175 "nr_zone_inactive_anon",
1176 "nr_zone_active_anon",
1177 "nr_zone_inactive_file",
1178 "nr_zone_active_file",
1179 "nr_zone_unevictable",
1180 "nr_zone_write_pending",
1181 "nr_mlock",
1182 "nr_bounce",
1183#if IS_ENABLED(CONFIG_ZSMALLOC)
1184 "nr_zspages",
1185#endif
1186 "nr_free_cma",
1187#ifdef CONFIG_UNACCEPTED_MEMORY
1188 "nr_unaccepted",
1189#endif
1190
1191 /* enum numa_stat_item counters */
1192#ifdef CONFIG_NUMA
1193 "numa_hit",
1194 "numa_miss",
1195 "numa_foreign",
1196 "numa_interleave",
1197 "numa_local",
1198 "numa_other",
1199#endif
1200
1201 /* enum node_stat_item counters */
1202 "nr_inactive_anon",
1203 "nr_active_anon",
1204 "nr_inactive_file",
1205 "nr_active_file",
1206 "nr_unevictable",
1207 "nr_slab_reclaimable",
1208 "nr_slab_unreclaimable",
1209 "nr_isolated_anon",
1210 "nr_isolated_file",
1211 "workingset_nodes",
1212 "workingset_refault_anon",
1213 "workingset_refault_file",
1214 "workingset_activate_anon",
1215 "workingset_activate_file",
1216 "workingset_restore_anon",
1217 "workingset_restore_file",
1218 "workingset_nodereclaim",
1219 "nr_anon_pages",
1220 "nr_mapped",
1221 "nr_file_pages",
1222 "nr_dirty",
1223 "nr_writeback",
1224 "nr_writeback_temp",
1225 "nr_shmem",
1226 "nr_shmem_hugepages",
1227 "nr_shmem_pmdmapped",
1228 "nr_file_hugepages",
1229 "nr_file_pmdmapped",
1230 "nr_anon_transparent_hugepages",
1231 "nr_vmscan_write",
1232 "nr_vmscan_immediate_reclaim",
1233 "nr_dirtied",
1234 "nr_written",
1235 "nr_throttled_written",
1236 "nr_kernel_misc_reclaimable",
1237 "nr_foll_pin_acquired",
1238 "nr_foll_pin_released",
1239 "nr_kernel_stack",
1240#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
1241 "nr_shadow_call_stack",
1242#endif
1243 "nr_page_table_pages",
1244 "nr_sec_page_table_pages",
1245#ifdef CONFIG_SWAP
1246 "nr_swapcached",
1247#endif
1248#ifdef CONFIG_NUMA_BALANCING
1249 "pgpromote_success",
1250 "pgpromote_candidate",
1251#endif
1252 "pgdemote_kswapd",
1253 "pgdemote_direct",
1254 "pgdemote_khugepaged",
1255
1256 /* enum writeback_stat_item counters */
1257 "nr_dirty_threshold",
1258 "nr_dirty_background_threshold",
1259
1260#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
1261 /* enum vm_event_item counters */
1262 "pgpgin",
1263 "pgpgout",
1264 "pswpin",
1265 "pswpout",
1266
1267 TEXTS_FOR_ZONES("pgalloc")
1268 TEXTS_FOR_ZONES("allocstall")
1269 TEXTS_FOR_ZONES("pgskip")
1270
1271 "pgfree",
1272 "pgactivate",
1273 "pgdeactivate",
1274 "pglazyfree",
1275
1276 "pgfault",
1277 "pgmajfault",
1278 "pglazyfreed",
1279
1280 "pgrefill",
1281 "pgreuse",
1282 "pgsteal_kswapd",
1283 "pgsteal_direct",
1284 "pgsteal_khugepaged",
1285 "pgscan_kswapd",
1286 "pgscan_direct",
1287 "pgscan_khugepaged",
1288 "pgscan_direct_throttle",
1289 "pgscan_anon",
1290 "pgscan_file",
1291 "pgsteal_anon",
1292 "pgsteal_file",
1293
1294#ifdef CONFIG_NUMA
1295 "zone_reclaim_failed",
1296#endif
1297 "pginodesteal",
1298 "slabs_scanned",
1299 "kswapd_inodesteal",
1300 "kswapd_low_wmark_hit_quickly",
1301 "kswapd_high_wmark_hit_quickly",
1302 "pageoutrun",
1303
1304 "pgrotated",
1305
1306 "drop_pagecache",
1307 "drop_slab",
1308 "oom_kill",
1309
1310#ifdef CONFIG_NUMA_BALANCING
1311 "numa_pte_updates",
1312 "numa_huge_pte_updates",
1313 "numa_hint_faults",
1314 "numa_hint_faults_local",
1315 "numa_pages_migrated",
1316#endif
1317#ifdef CONFIG_MIGRATION
1318 "pgmigrate_success",
1319 "pgmigrate_fail",
1320 "thp_migration_success",
1321 "thp_migration_fail",
1322 "thp_migration_split",
1323#endif
1324#ifdef CONFIG_COMPACTION
1325 "compact_migrate_scanned",
1326 "compact_free_scanned",
1327 "compact_isolated",
1328 "compact_stall",
1329 "compact_fail",
1330 "compact_success",
1331 "compact_daemon_wake",
1332 "compact_daemon_migrate_scanned",
1333 "compact_daemon_free_scanned",
1334#endif
1335
1336#ifdef CONFIG_HUGETLB_PAGE
1337 "htlb_buddy_alloc_success",
1338 "htlb_buddy_alloc_fail",
1339#endif
1340#ifdef CONFIG_CMA
1341 "cma_alloc_success",
1342 "cma_alloc_fail",
1343#endif
1344 "unevictable_pgs_culled",
1345 "unevictable_pgs_scanned",
1346 "unevictable_pgs_rescued",
1347 "unevictable_pgs_mlocked",
1348 "unevictable_pgs_munlocked",
1349 "unevictable_pgs_cleared",
1350 "unevictable_pgs_stranded",
1351
1352#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1353 "thp_fault_alloc",
1354 "thp_fault_fallback",
1355 "thp_fault_fallback_charge",
1356 "thp_collapse_alloc",
1357 "thp_collapse_alloc_failed",
1358 "thp_file_alloc",
1359 "thp_file_fallback",
1360 "thp_file_fallback_charge",
1361 "thp_file_mapped",
1362 "thp_split_page",
1363 "thp_split_page_failed",
1364 "thp_deferred_split_page",
1365 "thp_split_pmd",
1366 "thp_scan_exceed_none_pte",
1367 "thp_scan_exceed_swap_pte",
1368 "thp_scan_exceed_share_pte",
1369#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1370 "thp_split_pud",
1371#endif
1372 "thp_zero_page_alloc",
1373 "thp_zero_page_alloc_failed",
1374 "thp_swpout",
1375 "thp_swpout_fallback",
1376#endif
1377#ifdef CONFIG_MEMORY_BALLOON
1378 "balloon_inflate",
1379 "balloon_deflate",
1380#ifdef CONFIG_BALLOON_COMPACTION
1381 "balloon_migrate",
1382#endif
1383#endif /* CONFIG_MEMORY_BALLOON */
1384#ifdef CONFIG_DEBUG_TLBFLUSH
1385 "nr_tlb_remote_flush",
1386 "nr_tlb_remote_flush_received",
1387 "nr_tlb_local_flush_all",
1388 "nr_tlb_local_flush_one",
1389#endif /* CONFIG_DEBUG_TLBFLUSH */
1390
1391#ifdef CONFIG_SWAP
1392 "swap_ra",
1393 "swap_ra_hit",
1394#ifdef CONFIG_KSM
1395 "ksm_swpin_copy",
1396#endif
1397#endif
1398#ifdef CONFIG_KSM
1399 "cow_ksm",
1400#endif
1401#ifdef CONFIG_ZSWAP
1402 "zswpin",
1403 "zswpout",
1404 "zswpwb",
1405#endif
1406#ifdef CONFIG_X86
1407 "direct_map_level2_splits",
1408 "direct_map_level3_splits",
1409#endif
1410#ifdef CONFIG_PER_VMA_LOCK_STATS
1411 "vma_lock_success",
1412 "vma_lock_abort",
1413 "vma_lock_retry",
1414 "vma_lock_miss",
1415#endif
1416#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
1417};
1418#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */
1419
1420#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
1421 defined(CONFIG_PROC_FS)
1422static void *frag_start(struct seq_file *m, loff_t *pos)
1423{
1424 pg_data_t *pgdat;
1425 loff_t node = *pos;
1426
1427 for (pgdat = first_online_pgdat();
1428 pgdat && node;
1429 pgdat = next_online_pgdat(pgdat))
1430 --node;
1431
1432 return pgdat;
1433}
1434
1435static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
1436{
1437 pg_data_t *pgdat = (pg_data_t *)arg;
1438
1439 (*pos)++;
1440 return next_online_pgdat(pgdat);
1441}
1442
1443static void frag_stop(struct seq_file *m, void *arg)
1444{
1445}
1446
1447/*
1448 * Walk zones in a node and print using a callback.
1449 * If @assert_populated is true, only use callback for zones that are populated.
1450 */
1451static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
1452 bool assert_populated, bool nolock,
1453 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
1454{
1455 struct zone *zone;
1456 struct zone *node_zones = pgdat->node_zones;
1457 unsigned long flags;
1458
1459 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
1460 if (assert_populated && !populated_zone(zone))
1461 continue;
1462
1463 if (!nolock)
1464 spin_lock_irqsave(&zone->lock, flags);
1465 print(m, pgdat, zone);
1466 if (!nolock)
1467 spin_unlock_irqrestore(&zone->lock, flags);
1468 }
1469}
1470#endif
1471
1472#ifdef CONFIG_PROC_FS
1473static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
1474 struct zone *zone)
1475{
1476 int order;
1477
1478 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1479 for (order = 0; order < NR_PAGE_ORDERS; ++order)
1480 /*
1481 * Access to nr_free is lockless as nr_free is used only for
1482 * printing purposes. Use data_race to avoid KCSAN warning.
1483 */
1484 seq_printf(m, "%6lu ", data_race(zone->free_area[order].nr_free));
1485 seq_putc(m, '\n');
1486}
1487
1488/*
1489 * This walks the free areas for each zone.
1490 */
1491static int frag_show(struct seq_file *m, void *arg)
1492{
1493 pg_data_t *pgdat = (pg_data_t *)arg;
1494 walk_zones_in_node(m, pgdat, true, false, frag_show_print);
1495 return 0;
1496}
1497
1498static void pagetypeinfo_showfree_print(struct seq_file *m,
1499 pg_data_t *pgdat, struct zone *zone)
1500{
1501 int order, mtype;
1502
1503 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
1504 seq_printf(m, "Node %4d, zone %8s, type %12s ",
1505 pgdat->node_id,
1506 zone->name,
1507 migratetype_names[mtype]);
1508 for (order = 0; order < NR_PAGE_ORDERS; ++order) {
1509 unsigned long freecount = 0;
1510 struct free_area *area;
1511 struct list_head *curr;
1512 bool overflow = false;
1513
1514 area = &(zone->free_area[order]);
1515
1516 list_for_each(curr, &area->free_list[mtype]) {
1517 /*
1518 * Cap the free_list iteration because it might
1519 * be really large and we are under a spinlock
1520 * so a long time spent here could trigger a
1521 * hard lockup detector. Anyway this is a
1522 * debugging tool so knowing there is a handful
1523 * of pages of this order should be more than
1524 * sufficient.
1525 */
1526 if (++freecount >= 100000) {
1527 overflow = true;
1528 break;
1529 }
1530 }
1531 seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount);
1532 spin_unlock_irq(&zone->lock);
1533 cond_resched();
1534 spin_lock_irq(&zone->lock);
1535 }
1536 seq_putc(m, '\n');
1537 }
1538}
1539
1540/* Print out the free pages at each order for each migatetype */
1541static void pagetypeinfo_showfree(struct seq_file *m, void *arg)
1542{
1543 int order;
1544 pg_data_t *pgdat = (pg_data_t *)arg;
1545
1546 /* Print header */
1547 seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
1548 for (order = 0; order < NR_PAGE_ORDERS; ++order)
1549 seq_printf(m, "%6d ", order);
1550 seq_putc(m, '\n');
1551
1552 walk_zones_in_node(m, pgdat, true, false, pagetypeinfo_showfree_print);
1553}
1554
1555static void pagetypeinfo_showblockcount_print(struct seq_file *m,
1556 pg_data_t *pgdat, struct zone *zone)
1557{
1558 int mtype;
1559 unsigned long pfn;
1560 unsigned long start_pfn = zone->zone_start_pfn;
1561 unsigned long end_pfn = zone_end_pfn(zone);
1562 unsigned long count[MIGRATE_TYPES] = { 0, };
1563
1564 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
1565 struct page *page;
1566
1567 page = pfn_to_online_page(pfn);
1568 if (!page)
1569 continue;
1570
1571 if (page_zone(page) != zone)
1572 continue;
1573
1574 mtype = get_pageblock_migratetype(page);
1575
1576 if (mtype < MIGRATE_TYPES)
1577 count[mtype]++;
1578 }
1579
1580 /* Print counts */
1581 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1582 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1583 seq_printf(m, "%12lu ", count[mtype]);
1584 seq_putc(m, '\n');
1585}
1586
1587/* Print out the number of pageblocks for each migratetype */
1588static void pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1589{
1590 int mtype;
1591 pg_data_t *pgdat = (pg_data_t *)arg;
1592
1593 seq_printf(m, "\n%-23s", "Number of blocks type ");
1594 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1595 seq_printf(m, "%12s ", migratetype_names[mtype]);
1596 seq_putc(m, '\n');
1597 walk_zones_in_node(m, pgdat, true, false,
1598 pagetypeinfo_showblockcount_print);
1599}
1600
1601/*
1602 * Print out the number of pageblocks for each migratetype that contain pages
1603 * of other types. This gives an indication of how well fallbacks are being
1604 * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1605 * to determine what is going on
1606 */
1607static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1608{
1609#ifdef CONFIG_PAGE_OWNER
1610 int mtype;
1611
1612 if (!static_branch_unlikely(&page_owner_inited))
1613 return;
1614
1615 drain_all_pages(NULL);
1616
1617 seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1618 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1619 seq_printf(m, "%12s ", migratetype_names[mtype]);
1620 seq_putc(m, '\n');
1621
1622 walk_zones_in_node(m, pgdat, true, true,
1623 pagetypeinfo_showmixedcount_print);
1624#endif /* CONFIG_PAGE_OWNER */
1625}
1626
1627/*
1628 * This prints out statistics in relation to grouping pages by mobility.
1629 * It is expensive to collect so do not constantly read the file.
1630 */
1631static int pagetypeinfo_show(struct seq_file *m, void *arg)
1632{
1633 pg_data_t *pgdat = (pg_data_t *)arg;
1634
1635 /* check memoryless node */
1636 if (!node_state(pgdat->node_id, N_MEMORY))
1637 return 0;
1638
1639 seq_printf(m, "Page block order: %d\n", pageblock_order);
1640 seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages);
1641 seq_putc(m, '\n');
1642 pagetypeinfo_showfree(m, pgdat);
1643 pagetypeinfo_showblockcount(m, pgdat);
1644 pagetypeinfo_showmixedcount(m, pgdat);
1645
1646 return 0;
1647}
1648
1649static const struct seq_operations fragmentation_op = {
1650 .start = frag_start,
1651 .next = frag_next,
1652 .stop = frag_stop,
1653 .show = frag_show,
1654};
1655
1656static const struct seq_operations pagetypeinfo_op = {
1657 .start = frag_start,
1658 .next = frag_next,
1659 .stop = frag_stop,
1660 .show = pagetypeinfo_show,
1661};
1662
1663static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
1664{
1665 int zid;
1666
1667 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1668 struct zone *compare = &pgdat->node_zones[zid];
1669
1670 if (populated_zone(compare))
1671 return zone == compare;
1672 }
1673
1674 return false;
1675}
1676
1677static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1678 struct zone *zone)
1679{
1680 int i;
1681 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1682 if (is_zone_first_populated(pgdat, zone)) {
1683 seq_printf(m, "\n per-node stats");
1684 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1685 unsigned long pages = node_page_state_pages(pgdat, i);
1686
1687 if (vmstat_item_print_in_thp(i))
1688 pages /= HPAGE_PMD_NR;
1689 seq_printf(m, "\n %-12s %lu", node_stat_name(i),
1690 pages);
1691 }
1692 }
1693 seq_printf(m,
1694 "\n pages free %lu"
1695 "\n boost %lu"
1696 "\n min %lu"
1697 "\n low %lu"
1698 "\n high %lu"
1699 "\n spanned %lu"
1700 "\n present %lu"
1701 "\n managed %lu"
1702 "\n cma %lu",
1703 zone_page_state(zone, NR_FREE_PAGES),
1704 zone->watermark_boost,
1705 min_wmark_pages(zone),
1706 low_wmark_pages(zone),
1707 high_wmark_pages(zone),
1708 zone->spanned_pages,
1709 zone->present_pages,
1710 zone_managed_pages(zone),
1711 zone_cma_pages(zone));
1712
1713 seq_printf(m,
1714 "\n protection: (%ld",
1715 zone->lowmem_reserve[0]);
1716 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1717 seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1718 seq_putc(m, ')');
1719
1720 /* If unpopulated, no other information is useful */
1721 if (!populated_zone(zone)) {
1722 seq_putc(m, '\n');
1723 return;
1724 }
1725
1726 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1727 seq_printf(m, "\n %-12s %lu", zone_stat_name(i),
1728 zone_page_state(zone, i));
1729
1730#ifdef CONFIG_NUMA
1731 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
1732 seq_printf(m, "\n %-12s %lu", numa_stat_name(i),
1733 zone_numa_event_state(zone, i));
1734#endif
1735
1736 seq_printf(m, "\n pagesets");
1737 for_each_online_cpu(i) {
1738 struct per_cpu_pages *pcp;
1739 struct per_cpu_zonestat __maybe_unused *pzstats;
1740
1741 pcp = per_cpu_ptr(zone->per_cpu_pageset, i);
1742 seq_printf(m,
1743 "\n cpu: %i"
1744 "\n count: %i"
1745 "\n high: %i"
1746 "\n batch: %i",
1747 i,
1748 pcp->count,
1749 pcp->high,
1750 pcp->batch);
1751#ifdef CONFIG_SMP
1752 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, i);
1753 seq_printf(m, "\n vm stats threshold: %d",
1754 pzstats->stat_threshold);
1755#endif
1756 }
1757 seq_printf(m,
1758 "\n node_unreclaimable: %u"
1759 "\n start_pfn: %lu",
1760 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES,
1761 zone->zone_start_pfn);
1762 seq_putc(m, '\n');
1763}
1764
1765/*
1766 * Output information about zones in @pgdat. All zones are printed regardless
1767 * of whether they are populated or not: lowmem_reserve_ratio operates on the
1768 * set of all zones and userspace would not be aware of such zones if they are
1769 * suppressed here (zoneinfo displays the effect of lowmem_reserve_ratio).
1770 */
1771static int zoneinfo_show(struct seq_file *m, void *arg)
1772{
1773 pg_data_t *pgdat = (pg_data_t *)arg;
1774 walk_zones_in_node(m, pgdat, false, false, zoneinfo_show_print);
1775 return 0;
1776}
1777
1778static const struct seq_operations zoneinfo_op = {
1779 .start = frag_start, /* iterate over all zones. The same as in
1780 * fragmentation. */
1781 .next = frag_next,
1782 .stop = frag_stop,
1783 .show = zoneinfo_show,
1784};
1785
1786#define NR_VMSTAT_ITEMS (NR_VM_ZONE_STAT_ITEMS + \
1787 NR_VM_NUMA_EVENT_ITEMS + \
1788 NR_VM_NODE_STAT_ITEMS + \
1789 NR_VM_WRITEBACK_STAT_ITEMS + \
1790 (IS_ENABLED(CONFIG_VM_EVENT_COUNTERS) ? \
1791 NR_VM_EVENT_ITEMS : 0))
1792
1793static void *vmstat_start(struct seq_file *m, loff_t *pos)
1794{
1795 unsigned long *v;
1796 int i;
1797
1798 if (*pos >= NR_VMSTAT_ITEMS)
1799 return NULL;
1800
1801 BUILD_BUG_ON(ARRAY_SIZE(vmstat_text) < NR_VMSTAT_ITEMS);
1802 fold_vm_numa_events();
1803 v = kmalloc_array(NR_VMSTAT_ITEMS, sizeof(unsigned long), GFP_KERNEL);
1804 m->private = v;
1805 if (!v)
1806 return ERR_PTR(-ENOMEM);
1807 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1808 v[i] = global_zone_page_state(i);
1809 v += NR_VM_ZONE_STAT_ITEMS;
1810
1811#ifdef CONFIG_NUMA
1812 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
1813 v[i] = global_numa_event_state(i);
1814 v += NR_VM_NUMA_EVENT_ITEMS;
1815#endif
1816
1817 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1818 v[i] = global_node_page_state_pages(i);
1819 if (vmstat_item_print_in_thp(i))
1820 v[i] /= HPAGE_PMD_NR;
1821 }
1822 v += NR_VM_NODE_STAT_ITEMS;
1823
1824 global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1825 v + NR_DIRTY_THRESHOLD);
1826 v += NR_VM_WRITEBACK_STAT_ITEMS;
1827
1828#ifdef CONFIG_VM_EVENT_COUNTERS
1829 all_vm_events(v);
1830 v[PGPGIN] /= 2; /* sectors -> kbytes */
1831 v[PGPGOUT] /= 2;
1832#endif
1833 return (unsigned long *)m->private + *pos;
1834}
1835
1836static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1837{
1838 (*pos)++;
1839 if (*pos >= NR_VMSTAT_ITEMS)
1840 return NULL;
1841 return (unsigned long *)m->private + *pos;
1842}
1843
1844static int vmstat_show(struct seq_file *m, void *arg)
1845{
1846 unsigned long *l = arg;
1847 unsigned long off = l - (unsigned long *)m->private;
1848
1849 seq_puts(m, vmstat_text[off]);
1850 seq_put_decimal_ull(m, " ", *l);
1851 seq_putc(m, '\n');
1852
1853 if (off == NR_VMSTAT_ITEMS - 1) {
1854 /*
1855 * We've come to the end - add any deprecated counters to avoid
1856 * breaking userspace which might depend on them being present.
1857 */
1858 seq_puts(m, "nr_unstable 0\n");
1859 }
1860 return 0;
1861}
1862
1863static void vmstat_stop(struct seq_file *m, void *arg)
1864{
1865 kfree(m->private);
1866 m->private = NULL;
1867}
1868
1869static const struct seq_operations vmstat_op = {
1870 .start = vmstat_start,
1871 .next = vmstat_next,
1872 .stop = vmstat_stop,
1873 .show = vmstat_show,
1874};
1875#endif /* CONFIG_PROC_FS */
1876
1877#ifdef CONFIG_SMP
1878static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1879int sysctl_stat_interval __read_mostly = HZ;
1880
1881#ifdef CONFIG_PROC_FS
1882static void refresh_vm_stats(struct work_struct *work)
1883{
1884 refresh_cpu_vm_stats(true);
1885}
1886
1887int vmstat_refresh(struct ctl_table *table, int write,
1888 void *buffer, size_t *lenp, loff_t *ppos)
1889{
1890 long val;
1891 int err;
1892 int i;
1893
1894 /*
1895 * The regular update, every sysctl_stat_interval, may come later
1896 * than expected: leaving a significant amount in per_cpu buckets.
1897 * This is particularly misleading when checking a quantity of HUGE
1898 * pages, immediately after running a test. /proc/sys/vm/stat_refresh,
1899 * which can equally be echo'ed to or cat'ted from (by root),
1900 * can be used to update the stats just before reading them.
1901 *
1902 * Oh, and since global_zone_page_state() etc. are so careful to hide
1903 * transiently negative values, report an error here if any of
1904 * the stats is negative, so we know to go looking for imbalance.
1905 */
1906 err = schedule_on_each_cpu(refresh_vm_stats);
1907 if (err)
1908 return err;
1909 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
1910 /*
1911 * Skip checking stats known to go negative occasionally.
1912 */
1913 switch (i) {
1914 case NR_ZONE_WRITE_PENDING:
1915 case NR_FREE_CMA_PAGES:
1916 continue;
1917 }
1918 val = atomic_long_read(&vm_zone_stat[i]);
1919 if (val < 0) {
1920 pr_warn("%s: %s %ld\n",
1921 __func__, zone_stat_name(i), val);
1922 }
1923 }
1924 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1925 /*
1926 * Skip checking stats known to go negative occasionally.
1927 */
1928 switch (i) {
1929 case NR_WRITEBACK:
1930 continue;
1931 }
1932 val = atomic_long_read(&vm_node_stat[i]);
1933 if (val < 0) {
1934 pr_warn("%s: %s %ld\n",
1935 __func__, node_stat_name(i), val);
1936 }
1937 }
1938 if (write)
1939 *ppos += *lenp;
1940 else
1941 *lenp = 0;
1942 return 0;
1943}
1944#endif /* CONFIG_PROC_FS */
1945
1946static void vmstat_update(struct work_struct *w)
1947{
1948 if (refresh_cpu_vm_stats(true)) {
1949 /*
1950 * Counters were updated so we expect more updates
1951 * to occur in the future. Keep on running the
1952 * update worker thread.
1953 */
1954 queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
1955 this_cpu_ptr(&vmstat_work),
1956 round_jiffies_relative(sysctl_stat_interval));
1957 }
1958}
1959
1960/*
1961 * Check if the diffs for a certain cpu indicate that
1962 * an update is needed.
1963 */
1964static bool need_update(int cpu)
1965{
1966 pg_data_t *last_pgdat = NULL;
1967 struct zone *zone;
1968
1969 for_each_populated_zone(zone) {
1970 struct per_cpu_zonestat *pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
1971 struct per_cpu_nodestat *n;
1972
1973 /*
1974 * The fast way of checking if there are any vmstat diffs.
1975 */
1976 if (memchr_inv(pzstats->vm_stat_diff, 0, sizeof(pzstats->vm_stat_diff)))
1977 return true;
1978
1979 if (last_pgdat == zone->zone_pgdat)
1980 continue;
1981 last_pgdat = zone->zone_pgdat;
1982 n = per_cpu_ptr(zone->zone_pgdat->per_cpu_nodestats, cpu);
1983 if (memchr_inv(n->vm_node_stat_diff, 0, sizeof(n->vm_node_stat_diff)))
1984 return true;
1985 }
1986 return false;
1987}
1988
1989/*
1990 * Switch off vmstat processing and then fold all the remaining differentials
1991 * until the diffs stay at zero. The function is used by NOHZ and can only be
1992 * invoked when tick processing is not active.
1993 */
1994void quiet_vmstat(void)
1995{
1996 if (system_state != SYSTEM_RUNNING)
1997 return;
1998
1999 if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
2000 return;
2001
2002 if (!need_update(smp_processor_id()))
2003 return;
2004
2005 /*
2006 * Just refresh counters and do not care about the pending delayed
2007 * vmstat_update. It doesn't fire that often to matter and canceling
2008 * it would be too expensive from this path.
2009 * vmstat_shepherd will take care about that for us.
2010 */
2011 refresh_cpu_vm_stats(false);
2012}
2013
2014/*
2015 * Shepherd worker thread that checks the
2016 * differentials of processors that have their worker
2017 * threads for vm statistics updates disabled because of
2018 * inactivity.
2019 */
2020static void vmstat_shepherd(struct work_struct *w);
2021
2022static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
2023
2024static void vmstat_shepherd(struct work_struct *w)
2025{
2026 int cpu;
2027
2028 cpus_read_lock();
2029 /* Check processors whose vmstat worker threads have been disabled */
2030 for_each_online_cpu(cpu) {
2031 struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
2032
2033 /*
2034 * In kernel users of vmstat counters either require the precise value and
2035 * they are using zone_page_state_snapshot interface or they can live with
2036 * an imprecision as the regular flushing can happen at arbitrary time and
2037 * cumulative error can grow (see calculate_normal_threshold).
2038 *
2039 * From that POV the regular flushing can be postponed for CPUs that have
2040 * been isolated from the kernel interference without critical
2041 * infrastructure ever noticing. Skip regular flushing from vmstat_shepherd
2042 * for all isolated CPUs to avoid interference with the isolated workload.
2043 */
2044 if (cpu_is_isolated(cpu))
2045 continue;
2046
2047 if (!delayed_work_pending(dw) && need_update(cpu))
2048 queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
2049
2050 cond_resched();
2051 }
2052 cpus_read_unlock();
2053
2054 schedule_delayed_work(&shepherd,
2055 round_jiffies_relative(sysctl_stat_interval));
2056}
2057
2058static void __init start_shepherd_timer(void)
2059{
2060 int cpu;
2061
2062 for_each_possible_cpu(cpu)
2063 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
2064 vmstat_update);
2065
2066 schedule_delayed_work(&shepherd,
2067 round_jiffies_relative(sysctl_stat_interval));
2068}
2069
2070static void __init init_cpu_node_state(void)
2071{
2072 int node;
2073
2074 for_each_online_node(node) {
2075 if (!cpumask_empty(cpumask_of_node(node)))
2076 node_set_state(node, N_CPU);
2077 }
2078}
2079
2080static int vmstat_cpu_online(unsigned int cpu)
2081{
2082 refresh_zone_stat_thresholds();
2083
2084 if (!node_state(cpu_to_node(cpu), N_CPU)) {
2085 node_set_state(cpu_to_node(cpu), N_CPU);
2086 }
2087
2088 return 0;
2089}
2090
2091static int vmstat_cpu_down_prep(unsigned int cpu)
2092{
2093 cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
2094 return 0;
2095}
2096
2097static int vmstat_cpu_dead(unsigned int cpu)
2098{
2099 const struct cpumask *node_cpus;
2100 int node;
2101
2102 node = cpu_to_node(cpu);
2103
2104 refresh_zone_stat_thresholds();
2105 node_cpus = cpumask_of_node(node);
2106 if (!cpumask_empty(node_cpus))
2107 return 0;
2108
2109 node_clear_state(node, N_CPU);
2110
2111 return 0;
2112}
2113
2114#endif
2115
2116struct workqueue_struct *mm_percpu_wq;
2117
2118void __init init_mm_internals(void)
2119{
2120 int ret __maybe_unused;
2121
2122 mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0);
2123
2124#ifdef CONFIG_SMP
2125 ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
2126 NULL, vmstat_cpu_dead);
2127 if (ret < 0)
2128 pr_err("vmstat: failed to register 'dead' hotplug state\n");
2129
2130 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online",
2131 vmstat_cpu_online,
2132 vmstat_cpu_down_prep);
2133 if (ret < 0)
2134 pr_err("vmstat: failed to register 'online' hotplug state\n");
2135
2136 cpus_read_lock();
2137 init_cpu_node_state();
2138 cpus_read_unlock();
2139
2140 start_shepherd_timer();
2141#endif
2142#ifdef CONFIG_PROC_FS
2143 proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op);
2144 proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op);
2145 proc_create_seq("vmstat", 0444, NULL, &vmstat_op);
2146 proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op);
2147#endif
2148}
2149
2150#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
2151
2152/*
2153 * Return an index indicating how much of the available free memory is
2154 * unusable for an allocation of the requested size.
2155 */
2156static int unusable_free_index(unsigned int order,
2157 struct contig_page_info *info)
2158{
2159 /* No free memory is interpreted as all free memory is unusable */
2160 if (info->free_pages == 0)
2161 return 1000;
2162
2163 /*
2164 * Index should be a value between 0 and 1. Return a value to 3
2165 * decimal places.
2166 *
2167 * 0 => no fragmentation
2168 * 1 => high fragmentation
2169 */
2170 return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
2171
2172}
2173
2174static void unusable_show_print(struct seq_file *m,
2175 pg_data_t *pgdat, struct zone *zone)
2176{
2177 unsigned int order;
2178 int index;
2179 struct contig_page_info info;
2180
2181 seq_printf(m, "Node %d, zone %8s ",
2182 pgdat->node_id,
2183 zone->name);
2184 for (order = 0; order < NR_PAGE_ORDERS; ++order) {
2185 fill_contig_page_info(zone, order, &info);
2186 index = unusable_free_index(order, &info);
2187 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2188 }
2189
2190 seq_putc(m, '\n');
2191}
2192
2193/*
2194 * Display unusable free space index
2195 *
2196 * The unusable free space index measures how much of the available free
2197 * memory cannot be used to satisfy an allocation of a given size and is a
2198 * value between 0 and 1. The higher the value, the more of free memory is
2199 * unusable and by implication, the worse the external fragmentation is. This
2200 * can be expressed as a percentage by multiplying by 100.
2201 */
2202static int unusable_show(struct seq_file *m, void *arg)
2203{
2204 pg_data_t *pgdat = (pg_data_t *)arg;
2205
2206 /* check memoryless node */
2207 if (!node_state(pgdat->node_id, N_MEMORY))
2208 return 0;
2209
2210 walk_zones_in_node(m, pgdat, true, false, unusable_show_print);
2211
2212 return 0;
2213}
2214
2215static const struct seq_operations unusable_sops = {
2216 .start = frag_start,
2217 .next = frag_next,
2218 .stop = frag_stop,
2219 .show = unusable_show,
2220};
2221
2222DEFINE_SEQ_ATTRIBUTE(unusable);
2223
2224static void extfrag_show_print(struct seq_file *m,
2225 pg_data_t *pgdat, struct zone *zone)
2226{
2227 unsigned int order;
2228 int index;
2229
2230 /* Alloc on stack as interrupts are disabled for zone walk */
2231 struct contig_page_info info;
2232
2233 seq_printf(m, "Node %d, zone %8s ",
2234 pgdat->node_id,
2235 zone->name);
2236 for (order = 0; order < NR_PAGE_ORDERS; ++order) {
2237 fill_contig_page_info(zone, order, &info);
2238 index = __fragmentation_index(order, &info);
2239 seq_printf(m, "%2d.%03d ", index / 1000, index % 1000);
2240 }
2241
2242 seq_putc(m, '\n');
2243}
2244
2245/*
2246 * Display fragmentation index for orders that allocations would fail for
2247 */
2248static int extfrag_show(struct seq_file *m, void *arg)
2249{
2250 pg_data_t *pgdat = (pg_data_t *)arg;
2251
2252 walk_zones_in_node(m, pgdat, true, false, extfrag_show_print);
2253
2254 return 0;
2255}
2256
2257static const struct seq_operations extfrag_sops = {
2258 .start = frag_start,
2259 .next = frag_next,
2260 .stop = frag_stop,
2261 .show = extfrag_show,
2262};
2263
2264DEFINE_SEQ_ATTRIBUTE(extfrag);
2265
2266static int __init extfrag_debug_init(void)
2267{
2268 struct dentry *extfrag_debug_root;
2269
2270 extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
2271
2272 debugfs_create_file("unusable_index", 0444, extfrag_debug_root, NULL,
2273 &unusable_fops);
2274
2275 debugfs_create_file("extfrag_index", 0444, extfrag_debug_root, NULL,
2276 &extfrag_fops);
2277
2278 return 0;
2279}
2280
2281module_init(extfrag_debug_init);
2282#endif
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/mm/vmstat.c
4 *
5 * Manages VM statistics
6 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
7 *
8 * zoned VM statistics
9 * Copyright (C) 2006 Silicon Graphics, Inc.,
10 * Christoph Lameter <christoph@lameter.com>
11 * Copyright (C) 2008-2014 Christoph Lameter
12 */
13#include <linux/fs.h>
14#include <linux/mm.h>
15#include <linux/err.h>
16#include <linux/module.h>
17#include <linux/slab.h>
18#include <linux/cpu.h>
19#include <linux/cpumask.h>
20#include <linux/vmstat.h>
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
23#include <linux/debugfs.h>
24#include <linux/sched.h>
25#include <linux/math64.h>
26#include <linux/writeback.h>
27#include <linux/compaction.h>
28#include <linux/mm_inline.h>
29#include <linux/page_ext.h>
30#include <linux/page_owner.h>
31
32#include "internal.h"
33
34#ifdef CONFIG_NUMA
35int sysctl_vm_numa_stat = ENABLE_NUMA_STAT;
36
37/* zero numa counters within a zone */
38static void zero_zone_numa_counters(struct zone *zone)
39{
40 int item, cpu;
41
42 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++) {
43 atomic_long_set(&zone->vm_numa_event[item], 0);
44 for_each_online_cpu(cpu) {
45 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_numa_event[item]
46 = 0;
47 }
48 }
49}
50
51/* zero numa counters of all the populated zones */
52static void zero_zones_numa_counters(void)
53{
54 struct zone *zone;
55
56 for_each_populated_zone(zone)
57 zero_zone_numa_counters(zone);
58}
59
60/* zero global numa counters */
61static void zero_global_numa_counters(void)
62{
63 int item;
64
65 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
66 atomic_long_set(&vm_numa_event[item], 0);
67}
68
69static void invalid_numa_statistics(void)
70{
71 zero_zones_numa_counters();
72 zero_global_numa_counters();
73}
74
75static DEFINE_MUTEX(vm_numa_stat_lock);
76
77int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
78 void *buffer, size_t *length, loff_t *ppos)
79{
80 int ret, oldval;
81
82 mutex_lock(&vm_numa_stat_lock);
83 if (write)
84 oldval = sysctl_vm_numa_stat;
85 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
86 if (ret || !write)
87 goto out;
88
89 if (oldval == sysctl_vm_numa_stat)
90 goto out;
91 else if (sysctl_vm_numa_stat == ENABLE_NUMA_STAT) {
92 static_branch_enable(&vm_numa_stat_key);
93 pr_info("enable numa statistics\n");
94 } else {
95 static_branch_disable(&vm_numa_stat_key);
96 invalid_numa_statistics();
97 pr_info("disable numa statistics, and clear numa counters\n");
98 }
99
100out:
101 mutex_unlock(&vm_numa_stat_lock);
102 return ret;
103}
104#endif
105
106#ifdef CONFIG_VM_EVENT_COUNTERS
107DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
108EXPORT_PER_CPU_SYMBOL(vm_event_states);
109
110static void sum_vm_events(unsigned long *ret)
111{
112 int cpu;
113 int i;
114
115 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
116
117 for_each_online_cpu(cpu) {
118 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
119
120 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
121 ret[i] += this->event[i];
122 }
123}
124
125/*
126 * Accumulate the vm event counters across all CPUs.
127 * The result is unavoidably approximate - it can change
128 * during and after execution of this function.
129*/
130void all_vm_events(unsigned long *ret)
131{
132 get_online_cpus();
133 sum_vm_events(ret);
134 put_online_cpus();
135}
136EXPORT_SYMBOL_GPL(all_vm_events);
137
138/*
139 * Fold the foreign cpu events into our own.
140 *
141 * This is adding to the events on one processor
142 * but keeps the global counts constant.
143 */
144void vm_events_fold_cpu(int cpu)
145{
146 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
147 int i;
148
149 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
150 count_vm_events(i, fold_state->event[i]);
151 fold_state->event[i] = 0;
152 }
153}
154
155#endif /* CONFIG_VM_EVENT_COUNTERS */
156
157/*
158 * Manage combined zone based / global counters
159 *
160 * vm_stat contains the global counters
161 */
162atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
163atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp;
164atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS] __cacheline_aligned_in_smp;
165EXPORT_SYMBOL(vm_zone_stat);
166EXPORT_SYMBOL(vm_node_stat);
167
168#ifdef CONFIG_SMP
169
170int calculate_pressure_threshold(struct zone *zone)
171{
172 int threshold;
173 int watermark_distance;
174
175 /*
176 * As vmstats are not up to date, there is drift between the estimated
177 * and real values. For high thresholds and a high number of CPUs, it
178 * is possible for the min watermark to be breached while the estimated
179 * value looks fine. The pressure threshold is a reduced value such
180 * that even the maximum amount of drift will not accidentally breach
181 * the min watermark
182 */
183 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
184 threshold = max(1, (int)(watermark_distance / num_online_cpus()));
185
186 /*
187 * Maximum threshold is 125
188 */
189 threshold = min(125, threshold);
190
191 return threshold;
192}
193
194int calculate_normal_threshold(struct zone *zone)
195{
196 int threshold;
197 int mem; /* memory in 128 MB units */
198
199 /*
200 * The threshold scales with the number of processors and the amount
201 * of memory per zone. More memory means that we can defer updates for
202 * longer, more processors could lead to more contention.
203 * fls() is used to have a cheap way of logarithmic scaling.
204 *
205 * Some sample thresholds:
206 *
207 * Threshold Processors (fls) Zonesize fls(mem+1)
208 * ------------------------------------------------------------------
209 * 8 1 1 0.9-1 GB 4
210 * 16 2 2 0.9-1 GB 4
211 * 20 2 2 1-2 GB 5
212 * 24 2 2 2-4 GB 6
213 * 28 2 2 4-8 GB 7
214 * 32 2 2 8-16 GB 8
215 * 4 2 2 <128M 1
216 * 30 4 3 2-4 GB 5
217 * 48 4 3 8-16 GB 8
218 * 32 8 4 1-2 GB 4
219 * 32 8 4 0.9-1GB 4
220 * 10 16 5 <128M 1
221 * 40 16 5 900M 4
222 * 70 64 7 2-4 GB 5
223 * 84 64 7 4-8 GB 6
224 * 108 512 9 4-8 GB 6
225 * 125 1024 10 8-16 GB 8
226 * 125 1024 10 16-32 GB 9
227 */
228
229 mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT);
230
231 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
232
233 /*
234 * Maximum threshold is 125
235 */
236 threshold = min(125, threshold);
237
238 return threshold;
239}
240
241/*
242 * Refresh the thresholds for each zone.
243 */
244void refresh_zone_stat_thresholds(void)
245{
246 struct pglist_data *pgdat;
247 struct zone *zone;
248 int cpu;
249 int threshold;
250
251 /* Zero current pgdat thresholds */
252 for_each_online_pgdat(pgdat) {
253 for_each_online_cpu(cpu) {
254 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0;
255 }
256 }
257
258 for_each_populated_zone(zone) {
259 struct pglist_data *pgdat = zone->zone_pgdat;
260 unsigned long max_drift, tolerate_drift;
261
262 threshold = calculate_normal_threshold(zone);
263
264 for_each_online_cpu(cpu) {
265 int pgdat_threshold;
266
267 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
268 = threshold;
269
270 /* Base nodestat threshold on the largest populated zone. */
271 pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold;
272 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold
273 = max(threshold, pgdat_threshold);
274 }
275
276 /*
277 * Only set percpu_drift_mark if there is a danger that
278 * NR_FREE_PAGES reports the low watermark is ok when in fact
279 * the min watermark could be breached by an allocation
280 */
281 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
282 max_drift = num_online_cpus() * threshold;
283 if (max_drift > tolerate_drift)
284 zone->percpu_drift_mark = high_wmark_pages(zone) +
285 max_drift;
286 }
287}
288
289void set_pgdat_percpu_threshold(pg_data_t *pgdat,
290 int (*calculate_pressure)(struct zone *))
291{
292 struct zone *zone;
293 int cpu;
294 int threshold;
295 int i;
296
297 for (i = 0; i < pgdat->nr_zones; i++) {
298 zone = &pgdat->node_zones[i];
299 if (!zone->percpu_drift_mark)
300 continue;
301
302 threshold = (*calculate_pressure)(zone);
303 for_each_online_cpu(cpu)
304 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
305 = threshold;
306 }
307}
308
309/*
310 * For use when we know that interrupts are disabled,
311 * or when we know that preemption is disabled and that
312 * particular counter cannot be updated from interrupt context.
313 */
314void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
315 long delta)
316{
317 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
318 s8 __percpu *p = pcp->vm_stat_diff + item;
319 long x;
320 long t;
321
322 x = delta + __this_cpu_read(*p);
323
324 t = __this_cpu_read(pcp->stat_threshold);
325
326 if (unlikely(abs(x) > t)) {
327 zone_page_state_add(x, zone, item);
328 x = 0;
329 }
330 __this_cpu_write(*p, x);
331}
332EXPORT_SYMBOL(__mod_zone_page_state);
333
334void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
335 long delta)
336{
337 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
338 s8 __percpu *p = pcp->vm_node_stat_diff + item;
339 long x;
340 long t;
341
342 if (vmstat_item_in_bytes(item)) {
343 /*
344 * Only cgroups use subpage accounting right now; at
345 * the global level, these items still change in
346 * multiples of whole pages. Store them as pages
347 * internally to keep the per-cpu counters compact.
348 */
349 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
350 delta >>= PAGE_SHIFT;
351 }
352
353 x = delta + __this_cpu_read(*p);
354
355 t = __this_cpu_read(pcp->stat_threshold);
356
357 if (unlikely(abs(x) > t)) {
358 node_page_state_add(x, pgdat, item);
359 x = 0;
360 }
361 __this_cpu_write(*p, x);
362}
363EXPORT_SYMBOL(__mod_node_page_state);
364
365/*
366 * Optimized increment and decrement functions.
367 *
368 * These are only for a single page and therefore can take a struct page *
369 * argument instead of struct zone *. This allows the inclusion of the code
370 * generated for page_zone(page) into the optimized functions.
371 *
372 * No overflow check is necessary and therefore the differential can be
373 * incremented or decremented in place which may allow the compilers to
374 * generate better code.
375 * The increment or decrement is known and therefore one boundary check can
376 * be omitted.
377 *
378 * NOTE: These functions are very performance sensitive. Change only
379 * with care.
380 *
381 * Some processors have inc/dec instructions that are atomic vs an interrupt.
382 * However, the code must first determine the differential location in a zone
383 * based on the processor number and then inc/dec the counter. There is no
384 * guarantee without disabling preemption that the processor will not change
385 * in between and therefore the atomicity vs. interrupt cannot be exploited
386 * in a useful way here.
387 */
388void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
389{
390 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
391 s8 __percpu *p = pcp->vm_stat_diff + item;
392 s8 v, t;
393
394 v = __this_cpu_inc_return(*p);
395 t = __this_cpu_read(pcp->stat_threshold);
396 if (unlikely(v > t)) {
397 s8 overstep = t >> 1;
398
399 zone_page_state_add(v + overstep, zone, item);
400 __this_cpu_write(*p, -overstep);
401 }
402}
403
404void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
405{
406 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
407 s8 __percpu *p = pcp->vm_node_stat_diff + item;
408 s8 v, t;
409
410 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
411
412 v = __this_cpu_inc_return(*p);
413 t = __this_cpu_read(pcp->stat_threshold);
414 if (unlikely(v > t)) {
415 s8 overstep = t >> 1;
416
417 node_page_state_add(v + overstep, pgdat, item);
418 __this_cpu_write(*p, -overstep);
419 }
420}
421
422void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
423{
424 __inc_zone_state(page_zone(page), item);
425}
426EXPORT_SYMBOL(__inc_zone_page_state);
427
428void __inc_node_page_state(struct page *page, enum node_stat_item item)
429{
430 __inc_node_state(page_pgdat(page), item);
431}
432EXPORT_SYMBOL(__inc_node_page_state);
433
434void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
435{
436 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
437 s8 __percpu *p = pcp->vm_stat_diff + item;
438 s8 v, t;
439
440 v = __this_cpu_dec_return(*p);
441 t = __this_cpu_read(pcp->stat_threshold);
442 if (unlikely(v < - t)) {
443 s8 overstep = t >> 1;
444
445 zone_page_state_add(v - overstep, zone, item);
446 __this_cpu_write(*p, overstep);
447 }
448}
449
450void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
451{
452 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
453 s8 __percpu *p = pcp->vm_node_stat_diff + item;
454 s8 v, t;
455
456 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
457
458 v = __this_cpu_dec_return(*p);
459 t = __this_cpu_read(pcp->stat_threshold);
460 if (unlikely(v < - t)) {
461 s8 overstep = t >> 1;
462
463 node_page_state_add(v - overstep, pgdat, item);
464 __this_cpu_write(*p, overstep);
465 }
466}
467
468void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
469{
470 __dec_zone_state(page_zone(page), item);
471}
472EXPORT_SYMBOL(__dec_zone_page_state);
473
474void __dec_node_page_state(struct page *page, enum node_stat_item item)
475{
476 __dec_node_state(page_pgdat(page), item);
477}
478EXPORT_SYMBOL(__dec_node_page_state);
479
480#ifdef CONFIG_HAVE_CMPXCHG_LOCAL
481/*
482 * If we have cmpxchg_local support then we do not need to incur the overhead
483 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
484 *
485 * mod_state() modifies the zone counter state through atomic per cpu
486 * operations.
487 *
488 * Overstep mode specifies how overstep should handled:
489 * 0 No overstepping
490 * 1 Overstepping half of threshold
491 * -1 Overstepping minus half of threshold
492*/
493static inline void mod_zone_state(struct zone *zone,
494 enum zone_stat_item item, long delta, int overstep_mode)
495{
496 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
497 s8 __percpu *p = pcp->vm_stat_diff + item;
498 long o, n, t, z;
499
500 do {
501 z = 0; /* overflow to zone counters */
502
503 /*
504 * The fetching of the stat_threshold is racy. We may apply
505 * a counter threshold to the wrong the cpu if we get
506 * rescheduled while executing here. However, the next
507 * counter update will apply the threshold again and
508 * therefore bring the counter under the threshold again.
509 *
510 * Most of the time the thresholds are the same anyways
511 * for all cpus in a zone.
512 */
513 t = this_cpu_read(pcp->stat_threshold);
514
515 o = this_cpu_read(*p);
516 n = delta + o;
517
518 if (abs(n) > t) {
519 int os = overstep_mode * (t >> 1) ;
520
521 /* Overflow must be added to zone counters */
522 z = n + os;
523 n = -os;
524 }
525 } while (this_cpu_cmpxchg(*p, o, n) != o);
526
527 if (z)
528 zone_page_state_add(z, zone, item);
529}
530
531void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
532 long delta)
533{
534 mod_zone_state(zone, item, delta, 0);
535}
536EXPORT_SYMBOL(mod_zone_page_state);
537
538void inc_zone_page_state(struct page *page, enum zone_stat_item item)
539{
540 mod_zone_state(page_zone(page), item, 1, 1);
541}
542EXPORT_SYMBOL(inc_zone_page_state);
543
544void dec_zone_page_state(struct page *page, enum zone_stat_item item)
545{
546 mod_zone_state(page_zone(page), item, -1, -1);
547}
548EXPORT_SYMBOL(dec_zone_page_state);
549
550static inline void mod_node_state(struct pglist_data *pgdat,
551 enum node_stat_item item, int delta, int overstep_mode)
552{
553 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
554 s8 __percpu *p = pcp->vm_node_stat_diff + item;
555 long o, n, t, z;
556
557 if (vmstat_item_in_bytes(item)) {
558 /*
559 * Only cgroups use subpage accounting right now; at
560 * the global level, these items still change in
561 * multiples of whole pages. Store them as pages
562 * internally to keep the per-cpu counters compact.
563 */
564 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
565 delta >>= PAGE_SHIFT;
566 }
567
568 do {
569 z = 0; /* overflow to node counters */
570
571 /*
572 * The fetching of the stat_threshold is racy. We may apply
573 * a counter threshold to the wrong the cpu if we get
574 * rescheduled while executing here. However, the next
575 * counter update will apply the threshold again and
576 * therefore bring the counter under the threshold again.
577 *
578 * Most of the time the thresholds are the same anyways
579 * for all cpus in a node.
580 */
581 t = this_cpu_read(pcp->stat_threshold);
582
583 o = this_cpu_read(*p);
584 n = delta + o;
585
586 if (abs(n) > t) {
587 int os = overstep_mode * (t >> 1) ;
588
589 /* Overflow must be added to node counters */
590 z = n + os;
591 n = -os;
592 }
593 } while (this_cpu_cmpxchg(*p, o, n) != o);
594
595 if (z)
596 node_page_state_add(z, pgdat, item);
597}
598
599void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
600 long delta)
601{
602 mod_node_state(pgdat, item, delta, 0);
603}
604EXPORT_SYMBOL(mod_node_page_state);
605
606void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
607{
608 mod_node_state(pgdat, item, 1, 1);
609}
610
611void inc_node_page_state(struct page *page, enum node_stat_item item)
612{
613 mod_node_state(page_pgdat(page), item, 1, 1);
614}
615EXPORT_SYMBOL(inc_node_page_state);
616
617void dec_node_page_state(struct page *page, enum node_stat_item item)
618{
619 mod_node_state(page_pgdat(page), item, -1, -1);
620}
621EXPORT_SYMBOL(dec_node_page_state);
622#else
623/*
624 * Use interrupt disable to serialize counter updates
625 */
626void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
627 long delta)
628{
629 unsigned long flags;
630
631 local_irq_save(flags);
632 __mod_zone_page_state(zone, item, delta);
633 local_irq_restore(flags);
634}
635EXPORT_SYMBOL(mod_zone_page_state);
636
637void inc_zone_page_state(struct page *page, enum zone_stat_item item)
638{
639 unsigned long flags;
640 struct zone *zone;
641
642 zone = page_zone(page);
643 local_irq_save(flags);
644 __inc_zone_state(zone, item);
645 local_irq_restore(flags);
646}
647EXPORT_SYMBOL(inc_zone_page_state);
648
649void dec_zone_page_state(struct page *page, enum zone_stat_item item)
650{
651 unsigned long flags;
652
653 local_irq_save(flags);
654 __dec_zone_page_state(page, item);
655 local_irq_restore(flags);
656}
657EXPORT_SYMBOL(dec_zone_page_state);
658
659void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
660{
661 unsigned long flags;
662
663 local_irq_save(flags);
664 __inc_node_state(pgdat, item);
665 local_irq_restore(flags);
666}
667EXPORT_SYMBOL(inc_node_state);
668
669void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
670 long delta)
671{
672 unsigned long flags;
673
674 local_irq_save(flags);
675 __mod_node_page_state(pgdat, item, delta);
676 local_irq_restore(flags);
677}
678EXPORT_SYMBOL(mod_node_page_state);
679
680void inc_node_page_state(struct page *page, enum node_stat_item item)
681{
682 unsigned long flags;
683 struct pglist_data *pgdat;
684
685 pgdat = page_pgdat(page);
686 local_irq_save(flags);
687 __inc_node_state(pgdat, item);
688 local_irq_restore(flags);
689}
690EXPORT_SYMBOL(inc_node_page_state);
691
692void dec_node_page_state(struct page *page, enum node_stat_item item)
693{
694 unsigned long flags;
695
696 local_irq_save(flags);
697 __dec_node_page_state(page, item);
698 local_irq_restore(flags);
699}
700EXPORT_SYMBOL(dec_node_page_state);
701#endif
702
703/*
704 * Fold a differential into the global counters.
705 * Returns the number of counters updated.
706 */
707static int fold_diff(int *zone_diff, int *node_diff)
708{
709 int i;
710 int changes = 0;
711
712 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
713 if (zone_diff[i]) {
714 atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
715 changes++;
716 }
717
718 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
719 if (node_diff[i]) {
720 atomic_long_add(node_diff[i], &vm_node_stat[i]);
721 changes++;
722 }
723 return changes;
724}
725
726#ifdef CONFIG_NUMA
727static void fold_vm_zone_numa_events(struct zone *zone)
728{
729 unsigned long zone_numa_events[NR_VM_NUMA_EVENT_ITEMS] = { 0, };
730 int cpu;
731 enum numa_stat_item item;
732
733 for_each_online_cpu(cpu) {
734 struct per_cpu_zonestat *pzstats;
735
736 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
737 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
738 zone_numa_events[item] += xchg(&pzstats->vm_numa_event[item], 0);
739 }
740
741 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
742 zone_numa_event_add(zone_numa_events[item], zone, item);
743}
744
745void fold_vm_numa_events(void)
746{
747 struct zone *zone;
748
749 for_each_populated_zone(zone)
750 fold_vm_zone_numa_events(zone);
751}
752#endif
753
754/*
755 * Update the zone counters for the current cpu.
756 *
757 * Note that refresh_cpu_vm_stats strives to only access
758 * node local memory. The per cpu pagesets on remote zones are placed
759 * in the memory local to the processor using that pageset. So the
760 * loop over all zones will access a series of cachelines local to
761 * the processor.
762 *
763 * The call to zone_page_state_add updates the cachelines with the
764 * statistics in the remote zone struct as well as the global cachelines
765 * with the global counters. These could cause remote node cache line
766 * bouncing and will have to be only done when necessary.
767 *
768 * The function returns the number of global counters updated.
769 */
770static int refresh_cpu_vm_stats(bool do_pagesets)
771{
772 struct pglist_data *pgdat;
773 struct zone *zone;
774 int i;
775 int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
776 int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
777 int changes = 0;
778
779 for_each_populated_zone(zone) {
780 struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
781#ifdef CONFIG_NUMA
782 struct per_cpu_pages __percpu *pcp = zone->per_cpu_pageset;
783#endif
784
785 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
786 int v;
787
788 v = this_cpu_xchg(pzstats->vm_stat_diff[i], 0);
789 if (v) {
790
791 atomic_long_add(v, &zone->vm_stat[i]);
792 global_zone_diff[i] += v;
793#ifdef CONFIG_NUMA
794 /* 3 seconds idle till flush */
795 __this_cpu_write(pcp->expire, 3);
796#endif
797 }
798 }
799#ifdef CONFIG_NUMA
800
801 if (do_pagesets) {
802 cond_resched();
803 /*
804 * Deal with draining the remote pageset of this
805 * processor
806 *
807 * Check if there are pages remaining in this pageset
808 * if not then there is nothing to expire.
809 */
810 if (!__this_cpu_read(pcp->expire) ||
811 !__this_cpu_read(pcp->count))
812 continue;
813
814 /*
815 * We never drain zones local to this processor.
816 */
817 if (zone_to_nid(zone) == numa_node_id()) {
818 __this_cpu_write(pcp->expire, 0);
819 continue;
820 }
821
822 if (__this_cpu_dec_return(pcp->expire))
823 continue;
824
825 if (__this_cpu_read(pcp->count)) {
826 drain_zone_pages(zone, this_cpu_ptr(pcp));
827 changes++;
828 }
829 }
830#endif
831 }
832
833 for_each_online_pgdat(pgdat) {
834 struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;
835
836 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
837 int v;
838
839 v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
840 if (v) {
841 atomic_long_add(v, &pgdat->vm_stat[i]);
842 global_node_diff[i] += v;
843 }
844 }
845 }
846
847 changes += fold_diff(global_zone_diff, global_node_diff);
848 return changes;
849}
850
851/*
852 * Fold the data for an offline cpu into the global array.
853 * There cannot be any access by the offline cpu and therefore
854 * synchronization is simplified.
855 */
856void cpu_vm_stats_fold(int cpu)
857{
858 struct pglist_data *pgdat;
859 struct zone *zone;
860 int i;
861 int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
862 int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
863
864 for_each_populated_zone(zone) {
865 struct per_cpu_zonestat *pzstats;
866
867 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
868
869 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
870 if (pzstats->vm_stat_diff[i]) {
871 int v;
872
873 v = pzstats->vm_stat_diff[i];
874 pzstats->vm_stat_diff[i] = 0;
875 atomic_long_add(v, &zone->vm_stat[i]);
876 global_zone_diff[i] += v;
877 }
878 }
879#ifdef CONFIG_NUMA
880 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) {
881 if (pzstats->vm_numa_event[i]) {
882 unsigned long v;
883
884 v = pzstats->vm_numa_event[i];
885 pzstats->vm_numa_event[i] = 0;
886 zone_numa_event_add(v, zone, i);
887 }
888 }
889#endif
890 }
891
892 for_each_online_pgdat(pgdat) {
893 struct per_cpu_nodestat *p;
894
895 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
896
897 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
898 if (p->vm_node_stat_diff[i]) {
899 int v;
900
901 v = p->vm_node_stat_diff[i];
902 p->vm_node_stat_diff[i] = 0;
903 atomic_long_add(v, &pgdat->vm_stat[i]);
904 global_node_diff[i] += v;
905 }
906 }
907
908 fold_diff(global_zone_diff, global_node_diff);
909}
910
911/*
912 * this is only called if !populated_zone(zone), which implies no other users of
913 * pset->vm_stat_diff[] exist.
914 */
915void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *pzstats)
916{
917 unsigned long v;
918 int i;
919
920 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
921 if (pzstats->vm_stat_diff[i]) {
922 v = pzstats->vm_stat_diff[i];
923 pzstats->vm_stat_diff[i] = 0;
924 zone_page_state_add(v, zone, i);
925 }
926 }
927
928#ifdef CONFIG_NUMA
929 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) {
930 if (pzstats->vm_numa_event[i]) {
931 v = pzstats->vm_numa_event[i];
932 pzstats->vm_numa_event[i] = 0;
933 zone_numa_event_add(v, zone, i);
934 }
935 }
936#endif
937}
938#endif
939
940#ifdef CONFIG_NUMA
941/*
942 * Determine the per node value of a stat item. This function
943 * is called frequently in a NUMA machine, so try to be as
944 * frugal as possible.
945 */
946unsigned long sum_zone_node_page_state(int node,
947 enum zone_stat_item item)
948{
949 struct zone *zones = NODE_DATA(node)->node_zones;
950 int i;
951 unsigned long count = 0;
952
953 for (i = 0; i < MAX_NR_ZONES; i++)
954 count += zone_page_state(zones + i, item);
955
956 return count;
957}
958
959/* Determine the per node value of a numa stat item. */
960unsigned long sum_zone_numa_event_state(int node,
961 enum numa_stat_item item)
962{
963 struct zone *zones = NODE_DATA(node)->node_zones;
964 unsigned long count = 0;
965 int i;
966
967 for (i = 0; i < MAX_NR_ZONES; i++)
968 count += zone_numa_event_state(zones + i, item);
969
970 return count;
971}
972
973/*
974 * Determine the per node value of a stat item.
975 */
976unsigned long node_page_state_pages(struct pglist_data *pgdat,
977 enum node_stat_item item)
978{
979 long x = atomic_long_read(&pgdat->vm_stat[item]);
980#ifdef CONFIG_SMP
981 if (x < 0)
982 x = 0;
983#endif
984 return x;
985}
986
987unsigned long node_page_state(struct pglist_data *pgdat,
988 enum node_stat_item item)
989{
990 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
991
992 return node_page_state_pages(pgdat, item);
993}
994#endif
995
996#ifdef CONFIG_COMPACTION
997
998struct contig_page_info {
999 unsigned long free_pages;
1000 unsigned long free_blocks_total;
1001 unsigned long free_blocks_suitable;
1002};
1003
1004/*
1005 * Calculate the number of free pages in a zone, how many contiguous
1006 * pages are free and how many are large enough to satisfy an allocation of
1007 * the target size. Note that this function makes no attempt to estimate
1008 * how many suitable free blocks there *might* be if MOVABLE pages were
1009 * migrated. Calculating that is possible, but expensive and can be
1010 * figured out from userspace
1011 */
1012static void fill_contig_page_info(struct zone *zone,
1013 unsigned int suitable_order,
1014 struct contig_page_info *info)
1015{
1016 unsigned int order;
1017
1018 info->free_pages = 0;
1019 info->free_blocks_total = 0;
1020 info->free_blocks_suitable = 0;
1021
1022 for (order = 0; order < MAX_ORDER; order++) {
1023 unsigned long blocks;
1024
1025 /* Count number of free blocks */
1026 blocks = zone->free_area[order].nr_free;
1027 info->free_blocks_total += blocks;
1028
1029 /* Count free base pages */
1030 info->free_pages += blocks << order;
1031
1032 /* Count the suitable free blocks */
1033 if (order >= suitable_order)
1034 info->free_blocks_suitable += blocks <<
1035 (order - suitable_order);
1036 }
1037}
1038
1039/*
1040 * A fragmentation index only makes sense if an allocation of a requested
1041 * size would fail. If that is true, the fragmentation index indicates
1042 * whether external fragmentation or a lack of memory was the problem.
1043 * The value can be used to determine if page reclaim or compaction
1044 * should be used
1045 */
1046static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
1047{
1048 unsigned long requested = 1UL << order;
1049
1050 if (WARN_ON_ONCE(order >= MAX_ORDER))
1051 return 0;
1052
1053 if (!info->free_blocks_total)
1054 return 0;
1055
1056 /* Fragmentation index only makes sense when a request would fail */
1057 if (info->free_blocks_suitable)
1058 return -1000;
1059
1060 /*
1061 * Index is between 0 and 1 so return within 3 decimal places
1062 *
1063 * 0 => allocation would fail due to lack of memory
1064 * 1 => allocation would fail due to fragmentation
1065 */
1066 return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
1067}
1068
1069/*
1070 * Calculates external fragmentation within a zone wrt the given order.
1071 * It is defined as the percentage of pages found in blocks of size
1072 * less than 1 << order. It returns values in range [0, 100].
1073 */
1074unsigned int extfrag_for_order(struct zone *zone, unsigned int order)
1075{
1076 struct contig_page_info info;
1077
1078 fill_contig_page_info(zone, order, &info);
1079 if (info.free_pages == 0)
1080 return 0;
1081
1082 return div_u64((info.free_pages -
1083 (info.free_blocks_suitable << order)) * 100,
1084 info.free_pages);
1085}
1086
1087/* Same as __fragmentation index but allocs contig_page_info on stack */
1088int fragmentation_index(struct zone *zone, unsigned int order)
1089{
1090 struct contig_page_info info;
1091
1092 fill_contig_page_info(zone, order, &info);
1093 return __fragmentation_index(order, &info);
1094}
1095#endif
1096
1097#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || \
1098 defined(CONFIG_NUMA) || defined(CONFIG_MEMCG)
1099#ifdef CONFIG_ZONE_DMA
1100#define TEXT_FOR_DMA(xx) xx "_dma",
1101#else
1102#define TEXT_FOR_DMA(xx)
1103#endif
1104
1105#ifdef CONFIG_ZONE_DMA32
1106#define TEXT_FOR_DMA32(xx) xx "_dma32",
1107#else
1108#define TEXT_FOR_DMA32(xx)
1109#endif
1110
1111#ifdef CONFIG_HIGHMEM
1112#define TEXT_FOR_HIGHMEM(xx) xx "_high",
1113#else
1114#define TEXT_FOR_HIGHMEM(xx)
1115#endif
1116
1117#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
1118 TEXT_FOR_HIGHMEM(xx) xx "_movable",
1119
1120const char * const vmstat_text[] = {
1121 /* enum zone_stat_item counters */
1122 "nr_free_pages",
1123 "nr_zone_inactive_anon",
1124 "nr_zone_active_anon",
1125 "nr_zone_inactive_file",
1126 "nr_zone_active_file",
1127 "nr_zone_unevictable",
1128 "nr_zone_write_pending",
1129 "nr_mlock",
1130 "nr_bounce",
1131#if IS_ENABLED(CONFIG_ZSMALLOC)
1132 "nr_zspages",
1133#endif
1134 "nr_free_cma",
1135
1136 /* enum numa_stat_item counters */
1137#ifdef CONFIG_NUMA
1138 "numa_hit",
1139 "numa_miss",
1140 "numa_foreign",
1141 "numa_interleave",
1142 "numa_local",
1143 "numa_other",
1144#endif
1145
1146 /* enum node_stat_item counters */
1147 "nr_inactive_anon",
1148 "nr_active_anon",
1149 "nr_inactive_file",
1150 "nr_active_file",
1151 "nr_unevictable",
1152 "nr_slab_reclaimable",
1153 "nr_slab_unreclaimable",
1154 "nr_isolated_anon",
1155 "nr_isolated_file",
1156 "workingset_nodes",
1157 "workingset_refault_anon",
1158 "workingset_refault_file",
1159 "workingset_activate_anon",
1160 "workingset_activate_file",
1161 "workingset_restore_anon",
1162 "workingset_restore_file",
1163 "workingset_nodereclaim",
1164 "nr_anon_pages",
1165 "nr_mapped",
1166 "nr_file_pages",
1167 "nr_dirty",
1168 "nr_writeback",
1169 "nr_writeback_temp",
1170 "nr_shmem",
1171 "nr_shmem_hugepages",
1172 "nr_shmem_pmdmapped",
1173 "nr_file_hugepages",
1174 "nr_file_pmdmapped",
1175 "nr_anon_transparent_hugepages",
1176 "nr_vmscan_write",
1177 "nr_vmscan_immediate_reclaim",
1178 "nr_dirtied",
1179 "nr_written",
1180 "nr_kernel_misc_reclaimable",
1181 "nr_foll_pin_acquired",
1182 "nr_foll_pin_released",
1183 "nr_kernel_stack",
1184#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
1185 "nr_shadow_call_stack",
1186#endif
1187 "nr_page_table_pages",
1188#ifdef CONFIG_SWAP
1189 "nr_swapcached",
1190#endif
1191
1192 /* enum writeback_stat_item counters */
1193 "nr_dirty_threshold",
1194 "nr_dirty_background_threshold",
1195
1196#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
1197 /* enum vm_event_item counters */
1198 "pgpgin",
1199 "pgpgout",
1200 "pswpin",
1201 "pswpout",
1202
1203 TEXTS_FOR_ZONES("pgalloc")
1204 TEXTS_FOR_ZONES("allocstall")
1205 TEXTS_FOR_ZONES("pgskip")
1206
1207 "pgfree",
1208 "pgactivate",
1209 "pgdeactivate",
1210 "pglazyfree",
1211
1212 "pgfault",
1213 "pgmajfault",
1214 "pglazyfreed",
1215
1216 "pgrefill",
1217 "pgreuse",
1218 "pgsteal_kswapd",
1219 "pgsteal_direct",
1220 "pgscan_kswapd",
1221 "pgscan_direct",
1222 "pgscan_direct_throttle",
1223 "pgscan_anon",
1224 "pgscan_file",
1225 "pgsteal_anon",
1226 "pgsteal_file",
1227
1228#ifdef CONFIG_NUMA
1229 "zone_reclaim_failed",
1230#endif
1231 "pginodesteal",
1232 "slabs_scanned",
1233 "kswapd_inodesteal",
1234 "kswapd_low_wmark_hit_quickly",
1235 "kswapd_high_wmark_hit_quickly",
1236 "pageoutrun",
1237
1238 "pgrotated",
1239
1240 "drop_pagecache",
1241 "drop_slab",
1242 "oom_kill",
1243
1244#ifdef CONFIG_NUMA_BALANCING
1245 "numa_pte_updates",
1246 "numa_huge_pte_updates",
1247 "numa_hint_faults",
1248 "numa_hint_faults_local",
1249 "numa_pages_migrated",
1250#endif
1251#ifdef CONFIG_MIGRATION
1252 "pgmigrate_success",
1253 "pgmigrate_fail",
1254 "thp_migration_success",
1255 "thp_migration_fail",
1256 "thp_migration_split",
1257#endif
1258#ifdef CONFIG_COMPACTION
1259 "compact_migrate_scanned",
1260 "compact_free_scanned",
1261 "compact_isolated",
1262 "compact_stall",
1263 "compact_fail",
1264 "compact_success",
1265 "compact_daemon_wake",
1266 "compact_daemon_migrate_scanned",
1267 "compact_daemon_free_scanned",
1268#endif
1269
1270#ifdef CONFIG_HUGETLB_PAGE
1271 "htlb_buddy_alloc_success",
1272 "htlb_buddy_alloc_fail",
1273#endif
1274#ifdef CONFIG_CMA
1275 "cma_alloc_success",
1276 "cma_alloc_fail",
1277#endif
1278 "unevictable_pgs_culled",
1279 "unevictable_pgs_scanned",
1280 "unevictable_pgs_rescued",
1281 "unevictable_pgs_mlocked",
1282 "unevictable_pgs_munlocked",
1283 "unevictable_pgs_cleared",
1284 "unevictable_pgs_stranded",
1285
1286#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1287 "thp_fault_alloc",
1288 "thp_fault_fallback",
1289 "thp_fault_fallback_charge",
1290 "thp_collapse_alloc",
1291 "thp_collapse_alloc_failed",
1292 "thp_file_alloc",
1293 "thp_file_fallback",
1294 "thp_file_fallback_charge",
1295 "thp_file_mapped",
1296 "thp_split_page",
1297 "thp_split_page_failed",
1298 "thp_deferred_split_page",
1299 "thp_split_pmd",
1300#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1301 "thp_split_pud",
1302#endif
1303 "thp_zero_page_alloc",
1304 "thp_zero_page_alloc_failed",
1305 "thp_swpout",
1306 "thp_swpout_fallback",
1307#endif
1308#ifdef CONFIG_MEMORY_BALLOON
1309 "balloon_inflate",
1310 "balloon_deflate",
1311#ifdef CONFIG_BALLOON_COMPACTION
1312 "balloon_migrate",
1313#endif
1314#endif /* CONFIG_MEMORY_BALLOON */
1315#ifdef CONFIG_DEBUG_TLBFLUSH
1316 "nr_tlb_remote_flush",
1317 "nr_tlb_remote_flush_received",
1318 "nr_tlb_local_flush_all",
1319 "nr_tlb_local_flush_one",
1320#endif /* CONFIG_DEBUG_TLBFLUSH */
1321
1322#ifdef CONFIG_DEBUG_VM_VMACACHE
1323 "vmacache_find_calls",
1324 "vmacache_find_hits",
1325#endif
1326#ifdef CONFIG_SWAP
1327 "swap_ra",
1328 "swap_ra_hit",
1329#endif
1330#ifdef CONFIG_X86
1331 "direct_map_level2_splits",
1332 "direct_map_level3_splits",
1333#endif
1334#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
1335};
1336#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */
1337
1338#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
1339 defined(CONFIG_PROC_FS)
1340static void *frag_start(struct seq_file *m, loff_t *pos)
1341{
1342 pg_data_t *pgdat;
1343 loff_t node = *pos;
1344
1345 for (pgdat = first_online_pgdat();
1346 pgdat && node;
1347 pgdat = next_online_pgdat(pgdat))
1348 --node;
1349
1350 return pgdat;
1351}
1352
1353static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
1354{
1355 pg_data_t *pgdat = (pg_data_t *)arg;
1356
1357 (*pos)++;
1358 return next_online_pgdat(pgdat);
1359}
1360
1361static void frag_stop(struct seq_file *m, void *arg)
1362{
1363}
1364
1365/*
1366 * Walk zones in a node and print using a callback.
1367 * If @assert_populated is true, only use callback for zones that are populated.
1368 */
1369static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
1370 bool assert_populated, bool nolock,
1371 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
1372{
1373 struct zone *zone;
1374 struct zone *node_zones = pgdat->node_zones;
1375 unsigned long flags;
1376
1377 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
1378 if (assert_populated && !populated_zone(zone))
1379 continue;
1380
1381 if (!nolock)
1382 spin_lock_irqsave(&zone->lock, flags);
1383 print(m, pgdat, zone);
1384 if (!nolock)
1385 spin_unlock_irqrestore(&zone->lock, flags);
1386 }
1387}
1388#endif
1389
1390#ifdef CONFIG_PROC_FS
1391static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
1392 struct zone *zone)
1393{
1394 int order;
1395
1396 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1397 for (order = 0; order < MAX_ORDER; ++order)
1398 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
1399 seq_putc(m, '\n');
1400}
1401
1402/*
1403 * This walks the free areas for each zone.
1404 */
1405static int frag_show(struct seq_file *m, void *arg)
1406{
1407 pg_data_t *pgdat = (pg_data_t *)arg;
1408 walk_zones_in_node(m, pgdat, true, false, frag_show_print);
1409 return 0;
1410}
1411
1412static void pagetypeinfo_showfree_print(struct seq_file *m,
1413 pg_data_t *pgdat, struct zone *zone)
1414{
1415 int order, mtype;
1416
1417 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
1418 seq_printf(m, "Node %4d, zone %8s, type %12s ",
1419 pgdat->node_id,
1420 zone->name,
1421 migratetype_names[mtype]);
1422 for (order = 0; order < MAX_ORDER; ++order) {
1423 unsigned long freecount = 0;
1424 struct free_area *area;
1425 struct list_head *curr;
1426 bool overflow = false;
1427
1428 area = &(zone->free_area[order]);
1429
1430 list_for_each(curr, &area->free_list[mtype]) {
1431 /*
1432 * Cap the free_list iteration because it might
1433 * be really large and we are under a spinlock
1434 * so a long time spent here could trigger a
1435 * hard lockup detector. Anyway this is a
1436 * debugging tool so knowing there is a handful
1437 * of pages of this order should be more than
1438 * sufficient.
1439 */
1440 if (++freecount >= 100000) {
1441 overflow = true;
1442 break;
1443 }
1444 }
1445 seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount);
1446 spin_unlock_irq(&zone->lock);
1447 cond_resched();
1448 spin_lock_irq(&zone->lock);
1449 }
1450 seq_putc(m, '\n');
1451 }
1452}
1453
1454/* Print out the free pages at each order for each migatetype */
1455static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
1456{
1457 int order;
1458 pg_data_t *pgdat = (pg_data_t *)arg;
1459
1460 /* Print header */
1461 seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
1462 for (order = 0; order < MAX_ORDER; ++order)
1463 seq_printf(m, "%6d ", order);
1464 seq_putc(m, '\n');
1465
1466 walk_zones_in_node(m, pgdat, true, false, pagetypeinfo_showfree_print);
1467
1468 return 0;
1469}
1470
1471static void pagetypeinfo_showblockcount_print(struct seq_file *m,
1472 pg_data_t *pgdat, struct zone *zone)
1473{
1474 int mtype;
1475 unsigned long pfn;
1476 unsigned long start_pfn = zone->zone_start_pfn;
1477 unsigned long end_pfn = zone_end_pfn(zone);
1478 unsigned long count[MIGRATE_TYPES] = { 0, };
1479
1480 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
1481 struct page *page;
1482
1483 page = pfn_to_online_page(pfn);
1484 if (!page)
1485 continue;
1486
1487 if (page_zone(page) != zone)
1488 continue;
1489
1490 mtype = get_pageblock_migratetype(page);
1491
1492 if (mtype < MIGRATE_TYPES)
1493 count[mtype]++;
1494 }
1495
1496 /* Print counts */
1497 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1498 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1499 seq_printf(m, "%12lu ", count[mtype]);
1500 seq_putc(m, '\n');
1501}
1502
1503/* Print out the number of pageblocks for each migratetype */
1504static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1505{
1506 int mtype;
1507 pg_data_t *pgdat = (pg_data_t *)arg;
1508
1509 seq_printf(m, "\n%-23s", "Number of blocks type ");
1510 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1511 seq_printf(m, "%12s ", migratetype_names[mtype]);
1512 seq_putc(m, '\n');
1513 walk_zones_in_node(m, pgdat, true, false,
1514 pagetypeinfo_showblockcount_print);
1515
1516 return 0;
1517}
1518
1519/*
1520 * Print out the number of pageblocks for each migratetype that contain pages
1521 * of other types. This gives an indication of how well fallbacks are being
1522 * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1523 * to determine what is going on
1524 */
1525static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1526{
1527#ifdef CONFIG_PAGE_OWNER
1528 int mtype;
1529
1530 if (!static_branch_unlikely(&page_owner_inited))
1531 return;
1532
1533 drain_all_pages(NULL);
1534
1535 seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1536 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1537 seq_printf(m, "%12s ", migratetype_names[mtype]);
1538 seq_putc(m, '\n');
1539
1540 walk_zones_in_node(m, pgdat, true, true,
1541 pagetypeinfo_showmixedcount_print);
1542#endif /* CONFIG_PAGE_OWNER */
1543}
1544
1545/*
1546 * This prints out statistics in relation to grouping pages by mobility.
1547 * It is expensive to collect so do not constantly read the file.
1548 */
1549static int pagetypeinfo_show(struct seq_file *m, void *arg)
1550{
1551 pg_data_t *pgdat = (pg_data_t *)arg;
1552
1553 /* check memoryless node */
1554 if (!node_state(pgdat->node_id, N_MEMORY))
1555 return 0;
1556
1557 seq_printf(m, "Page block order: %d\n", pageblock_order);
1558 seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages);
1559 seq_putc(m, '\n');
1560 pagetypeinfo_showfree(m, pgdat);
1561 pagetypeinfo_showblockcount(m, pgdat);
1562 pagetypeinfo_showmixedcount(m, pgdat);
1563
1564 return 0;
1565}
1566
1567static const struct seq_operations fragmentation_op = {
1568 .start = frag_start,
1569 .next = frag_next,
1570 .stop = frag_stop,
1571 .show = frag_show,
1572};
1573
1574static const struct seq_operations pagetypeinfo_op = {
1575 .start = frag_start,
1576 .next = frag_next,
1577 .stop = frag_stop,
1578 .show = pagetypeinfo_show,
1579};
1580
1581static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
1582{
1583 int zid;
1584
1585 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1586 struct zone *compare = &pgdat->node_zones[zid];
1587
1588 if (populated_zone(compare))
1589 return zone == compare;
1590 }
1591
1592 return false;
1593}
1594
1595static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1596 struct zone *zone)
1597{
1598 int i;
1599 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1600 if (is_zone_first_populated(pgdat, zone)) {
1601 seq_printf(m, "\n per-node stats");
1602 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1603 unsigned long pages = node_page_state_pages(pgdat, i);
1604
1605 if (vmstat_item_print_in_thp(i))
1606 pages /= HPAGE_PMD_NR;
1607 seq_printf(m, "\n %-12s %lu", node_stat_name(i),
1608 pages);
1609 }
1610 }
1611 seq_printf(m,
1612 "\n pages free %lu"
1613 "\n min %lu"
1614 "\n low %lu"
1615 "\n high %lu"
1616 "\n spanned %lu"
1617 "\n present %lu"
1618 "\n managed %lu"
1619 "\n cma %lu",
1620 zone_page_state(zone, NR_FREE_PAGES),
1621 min_wmark_pages(zone),
1622 low_wmark_pages(zone),
1623 high_wmark_pages(zone),
1624 zone->spanned_pages,
1625 zone->present_pages,
1626 zone_managed_pages(zone),
1627 zone_cma_pages(zone));
1628
1629 seq_printf(m,
1630 "\n protection: (%ld",
1631 zone->lowmem_reserve[0]);
1632 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1633 seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1634 seq_putc(m, ')');
1635
1636 /* If unpopulated, no other information is useful */
1637 if (!populated_zone(zone)) {
1638 seq_putc(m, '\n');
1639 return;
1640 }
1641
1642 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1643 seq_printf(m, "\n %-12s %lu", zone_stat_name(i),
1644 zone_page_state(zone, i));
1645
1646#ifdef CONFIG_NUMA
1647 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
1648 seq_printf(m, "\n %-12s %lu", numa_stat_name(i),
1649 zone_numa_event_state(zone, i));
1650#endif
1651
1652 seq_printf(m, "\n pagesets");
1653 for_each_online_cpu(i) {
1654 struct per_cpu_pages *pcp;
1655 struct per_cpu_zonestat __maybe_unused *pzstats;
1656
1657 pcp = per_cpu_ptr(zone->per_cpu_pageset, i);
1658 seq_printf(m,
1659 "\n cpu: %i"
1660 "\n count: %i"
1661 "\n high: %i"
1662 "\n batch: %i",
1663 i,
1664 pcp->count,
1665 pcp->high,
1666 pcp->batch);
1667#ifdef CONFIG_SMP
1668 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, i);
1669 seq_printf(m, "\n vm stats threshold: %d",
1670 pzstats->stat_threshold);
1671#endif
1672 }
1673 seq_printf(m,
1674 "\n node_unreclaimable: %u"
1675 "\n start_pfn: %lu",
1676 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES,
1677 zone->zone_start_pfn);
1678 seq_putc(m, '\n');
1679}
1680
1681/*
1682 * Output information about zones in @pgdat. All zones are printed regardless
1683 * of whether they are populated or not: lowmem_reserve_ratio operates on the
1684 * set of all zones and userspace would not be aware of such zones if they are
1685 * suppressed here (zoneinfo displays the effect of lowmem_reserve_ratio).
1686 */
1687static int zoneinfo_show(struct seq_file *m, void *arg)
1688{
1689 pg_data_t *pgdat = (pg_data_t *)arg;
1690 walk_zones_in_node(m, pgdat, false, false, zoneinfo_show_print);
1691 return 0;
1692}
1693
1694static const struct seq_operations zoneinfo_op = {
1695 .start = frag_start, /* iterate over all zones. The same as in
1696 * fragmentation. */
1697 .next = frag_next,
1698 .stop = frag_stop,
1699 .show = zoneinfo_show,
1700};
1701
1702#define NR_VMSTAT_ITEMS (NR_VM_ZONE_STAT_ITEMS + \
1703 NR_VM_NUMA_EVENT_ITEMS + \
1704 NR_VM_NODE_STAT_ITEMS + \
1705 NR_VM_WRITEBACK_STAT_ITEMS + \
1706 (IS_ENABLED(CONFIG_VM_EVENT_COUNTERS) ? \
1707 NR_VM_EVENT_ITEMS : 0))
1708
1709static void *vmstat_start(struct seq_file *m, loff_t *pos)
1710{
1711 unsigned long *v;
1712 int i;
1713
1714 if (*pos >= NR_VMSTAT_ITEMS)
1715 return NULL;
1716
1717 BUILD_BUG_ON(ARRAY_SIZE(vmstat_text) < NR_VMSTAT_ITEMS);
1718 fold_vm_numa_events();
1719 v = kmalloc_array(NR_VMSTAT_ITEMS, sizeof(unsigned long), GFP_KERNEL);
1720 m->private = v;
1721 if (!v)
1722 return ERR_PTR(-ENOMEM);
1723 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1724 v[i] = global_zone_page_state(i);
1725 v += NR_VM_ZONE_STAT_ITEMS;
1726
1727#ifdef CONFIG_NUMA
1728 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
1729 v[i] = global_numa_event_state(i);
1730 v += NR_VM_NUMA_EVENT_ITEMS;
1731#endif
1732
1733 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1734 v[i] = global_node_page_state_pages(i);
1735 if (vmstat_item_print_in_thp(i))
1736 v[i] /= HPAGE_PMD_NR;
1737 }
1738 v += NR_VM_NODE_STAT_ITEMS;
1739
1740 global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1741 v + NR_DIRTY_THRESHOLD);
1742 v += NR_VM_WRITEBACK_STAT_ITEMS;
1743
1744#ifdef CONFIG_VM_EVENT_COUNTERS
1745 all_vm_events(v);
1746 v[PGPGIN] /= 2; /* sectors -> kbytes */
1747 v[PGPGOUT] /= 2;
1748#endif
1749 return (unsigned long *)m->private + *pos;
1750}
1751
1752static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1753{
1754 (*pos)++;
1755 if (*pos >= NR_VMSTAT_ITEMS)
1756 return NULL;
1757 return (unsigned long *)m->private + *pos;
1758}
1759
1760static int vmstat_show(struct seq_file *m, void *arg)
1761{
1762 unsigned long *l = arg;
1763 unsigned long off = l - (unsigned long *)m->private;
1764
1765 seq_puts(m, vmstat_text[off]);
1766 seq_put_decimal_ull(m, " ", *l);
1767 seq_putc(m, '\n');
1768
1769 if (off == NR_VMSTAT_ITEMS - 1) {
1770 /*
1771 * We've come to the end - add any deprecated counters to avoid
1772 * breaking userspace which might depend on them being present.
1773 */
1774 seq_puts(m, "nr_unstable 0\n");
1775 }
1776 return 0;
1777}
1778
1779static void vmstat_stop(struct seq_file *m, void *arg)
1780{
1781 kfree(m->private);
1782 m->private = NULL;
1783}
1784
1785static const struct seq_operations vmstat_op = {
1786 .start = vmstat_start,
1787 .next = vmstat_next,
1788 .stop = vmstat_stop,
1789 .show = vmstat_show,
1790};
1791#endif /* CONFIG_PROC_FS */
1792
1793#ifdef CONFIG_SMP
1794static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1795int sysctl_stat_interval __read_mostly = HZ;
1796
1797#ifdef CONFIG_PROC_FS
1798static void refresh_vm_stats(struct work_struct *work)
1799{
1800 refresh_cpu_vm_stats(true);
1801}
1802
1803int vmstat_refresh(struct ctl_table *table, int write,
1804 void *buffer, size_t *lenp, loff_t *ppos)
1805{
1806 long val;
1807 int err;
1808 int i;
1809
1810 /*
1811 * The regular update, every sysctl_stat_interval, may come later
1812 * than expected: leaving a significant amount in per_cpu buckets.
1813 * This is particularly misleading when checking a quantity of HUGE
1814 * pages, immediately after running a test. /proc/sys/vm/stat_refresh,
1815 * which can equally be echo'ed to or cat'ted from (by root),
1816 * can be used to update the stats just before reading them.
1817 *
1818 * Oh, and since global_zone_page_state() etc. are so careful to hide
1819 * transiently negative values, report an error here if any of
1820 * the stats is negative, so we know to go looking for imbalance.
1821 */
1822 err = schedule_on_each_cpu(refresh_vm_stats);
1823 if (err)
1824 return err;
1825 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
1826 /*
1827 * Skip checking stats known to go negative occasionally.
1828 */
1829 switch (i) {
1830 case NR_ZONE_WRITE_PENDING:
1831 case NR_FREE_CMA_PAGES:
1832 continue;
1833 }
1834 val = atomic_long_read(&vm_zone_stat[i]);
1835 if (val < 0) {
1836 pr_warn("%s: %s %ld\n",
1837 __func__, zone_stat_name(i), val);
1838 }
1839 }
1840 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1841 /*
1842 * Skip checking stats known to go negative occasionally.
1843 */
1844 switch (i) {
1845 case NR_WRITEBACK:
1846 continue;
1847 }
1848 val = atomic_long_read(&vm_node_stat[i]);
1849 if (val < 0) {
1850 pr_warn("%s: %s %ld\n",
1851 __func__, node_stat_name(i), val);
1852 }
1853 }
1854 if (write)
1855 *ppos += *lenp;
1856 else
1857 *lenp = 0;
1858 return 0;
1859}
1860#endif /* CONFIG_PROC_FS */
1861
1862static void vmstat_update(struct work_struct *w)
1863{
1864 if (refresh_cpu_vm_stats(true)) {
1865 /*
1866 * Counters were updated so we expect more updates
1867 * to occur in the future. Keep on running the
1868 * update worker thread.
1869 */
1870 queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
1871 this_cpu_ptr(&vmstat_work),
1872 round_jiffies_relative(sysctl_stat_interval));
1873 }
1874}
1875
1876/*
1877 * Switch off vmstat processing and then fold all the remaining differentials
1878 * until the diffs stay at zero. The function is used by NOHZ and can only be
1879 * invoked when tick processing is not active.
1880 */
1881/*
1882 * Check if the diffs for a certain cpu indicate that
1883 * an update is needed.
1884 */
1885static bool need_update(int cpu)
1886{
1887 pg_data_t *last_pgdat = NULL;
1888 struct zone *zone;
1889
1890 for_each_populated_zone(zone) {
1891 struct per_cpu_zonestat *pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
1892 struct per_cpu_nodestat *n;
1893
1894 /*
1895 * The fast way of checking if there are any vmstat diffs.
1896 */
1897 if (memchr_inv(pzstats->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS *
1898 sizeof(pzstats->vm_stat_diff[0])))
1899 return true;
1900
1901 if (last_pgdat == zone->zone_pgdat)
1902 continue;
1903 last_pgdat = zone->zone_pgdat;
1904 n = per_cpu_ptr(zone->zone_pgdat->per_cpu_nodestats, cpu);
1905 if (memchr_inv(n->vm_node_stat_diff, 0, NR_VM_NODE_STAT_ITEMS *
1906 sizeof(n->vm_node_stat_diff[0])))
1907 return true;
1908 }
1909 return false;
1910}
1911
1912/*
1913 * Switch off vmstat processing and then fold all the remaining differentials
1914 * until the diffs stay at zero. The function is used by NOHZ and can only be
1915 * invoked when tick processing is not active.
1916 */
1917void quiet_vmstat(void)
1918{
1919 if (system_state != SYSTEM_RUNNING)
1920 return;
1921
1922 if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
1923 return;
1924
1925 if (!need_update(smp_processor_id()))
1926 return;
1927
1928 /*
1929 * Just refresh counters and do not care about the pending delayed
1930 * vmstat_update. It doesn't fire that often to matter and canceling
1931 * it would be too expensive from this path.
1932 * vmstat_shepherd will take care about that for us.
1933 */
1934 refresh_cpu_vm_stats(false);
1935}
1936
1937/*
1938 * Shepherd worker thread that checks the
1939 * differentials of processors that have their worker
1940 * threads for vm statistics updates disabled because of
1941 * inactivity.
1942 */
1943static void vmstat_shepherd(struct work_struct *w);
1944
1945static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
1946
1947static void vmstat_shepherd(struct work_struct *w)
1948{
1949 int cpu;
1950
1951 get_online_cpus();
1952 /* Check processors whose vmstat worker threads have been disabled */
1953 for_each_online_cpu(cpu) {
1954 struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
1955
1956 if (!delayed_work_pending(dw) && need_update(cpu))
1957 queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
1958
1959 cond_resched();
1960 }
1961 put_online_cpus();
1962
1963 schedule_delayed_work(&shepherd,
1964 round_jiffies_relative(sysctl_stat_interval));
1965}
1966
1967static void __init start_shepherd_timer(void)
1968{
1969 int cpu;
1970
1971 for_each_possible_cpu(cpu)
1972 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
1973 vmstat_update);
1974
1975 schedule_delayed_work(&shepherd,
1976 round_jiffies_relative(sysctl_stat_interval));
1977}
1978
1979static void __init init_cpu_node_state(void)
1980{
1981 int node;
1982
1983 for_each_online_node(node) {
1984 if (cpumask_weight(cpumask_of_node(node)) > 0)
1985 node_set_state(node, N_CPU);
1986 }
1987}
1988
1989static int vmstat_cpu_online(unsigned int cpu)
1990{
1991 refresh_zone_stat_thresholds();
1992 node_set_state(cpu_to_node(cpu), N_CPU);
1993 return 0;
1994}
1995
1996static int vmstat_cpu_down_prep(unsigned int cpu)
1997{
1998 cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
1999 return 0;
2000}
2001
2002static int vmstat_cpu_dead(unsigned int cpu)
2003{
2004 const struct cpumask *node_cpus;
2005 int node;
2006
2007 node = cpu_to_node(cpu);
2008
2009 refresh_zone_stat_thresholds();
2010 node_cpus = cpumask_of_node(node);
2011 if (cpumask_weight(node_cpus) > 0)
2012 return 0;
2013
2014 node_clear_state(node, N_CPU);
2015 return 0;
2016}
2017
2018#endif
2019
2020struct workqueue_struct *mm_percpu_wq;
2021
2022void __init init_mm_internals(void)
2023{
2024 int ret __maybe_unused;
2025
2026 mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0);
2027
2028#ifdef CONFIG_SMP
2029 ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
2030 NULL, vmstat_cpu_dead);
2031 if (ret < 0)
2032 pr_err("vmstat: failed to register 'dead' hotplug state\n");
2033
2034 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online",
2035 vmstat_cpu_online,
2036 vmstat_cpu_down_prep);
2037 if (ret < 0)
2038 pr_err("vmstat: failed to register 'online' hotplug state\n");
2039
2040 get_online_cpus();
2041 init_cpu_node_state();
2042 put_online_cpus();
2043
2044 start_shepherd_timer();
2045#endif
2046#ifdef CONFIG_PROC_FS
2047 proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op);
2048 proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op);
2049 proc_create_seq("vmstat", 0444, NULL, &vmstat_op);
2050 proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op);
2051#endif
2052}
2053
2054#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
2055
2056/*
2057 * Return an index indicating how much of the available free memory is
2058 * unusable for an allocation of the requested size.
2059 */
2060static int unusable_free_index(unsigned int order,
2061 struct contig_page_info *info)
2062{
2063 /* No free memory is interpreted as all free memory is unusable */
2064 if (info->free_pages == 0)
2065 return 1000;
2066
2067 /*
2068 * Index should be a value between 0 and 1. Return a value to 3
2069 * decimal places.
2070 *
2071 * 0 => no fragmentation
2072 * 1 => high fragmentation
2073 */
2074 return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
2075
2076}
2077
2078static void unusable_show_print(struct seq_file *m,
2079 pg_data_t *pgdat, struct zone *zone)
2080{
2081 unsigned int order;
2082 int index;
2083 struct contig_page_info info;
2084
2085 seq_printf(m, "Node %d, zone %8s ",
2086 pgdat->node_id,
2087 zone->name);
2088 for (order = 0; order < MAX_ORDER; ++order) {
2089 fill_contig_page_info(zone, order, &info);
2090 index = unusable_free_index(order, &info);
2091 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2092 }
2093
2094 seq_putc(m, '\n');
2095}
2096
2097/*
2098 * Display unusable free space index
2099 *
2100 * The unusable free space index measures how much of the available free
2101 * memory cannot be used to satisfy an allocation of a given size and is a
2102 * value between 0 and 1. The higher the value, the more of free memory is
2103 * unusable and by implication, the worse the external fragmentation is. This
2104 * can be expressed as a percentage by multiplying by 100.
2105 */
2106static int unusable_show(struct seq_file *m, void *arg)
2107{
2108 pg_data_t *pgdat = (pg_data_t *)arg;
2109
2110 /* check memoryless node */
2111 if (!node_state(pgdat->node_id, N_MEMORY))
2112 return 0;
2113
2114 walk_zones_in_node(m, pgdat, true, false, unusable_show_print);
2115
2116 return 0;
2117}
2118
2119static const struct seq_operations unusable_sops = {
2120 .start = frag_start,
2121 .next = frag_next,
2122 .stop = frag_stop,
2123 .show = unusable_show,
2124};
2125
2126DEFINE_SEQ_ATTRIBUTE(unusable);
2127
2128static void extfrag_show_print(struct seq_file *m,
2129 pg_data_t *pgdat, struct zone *zone)
2130{
2131 unsigned int order;
2132 int index;
2133
2134 /* Alloc on stack as interrupts are disabled for zone walk */
2135 struct contig_page_info info;
2136
2137 seq_printf(m, "Node %d, zone %8s ",
2138 pgdat->node_id,
2139 zone->name);
2140 for (order = 0; order < MAX_ORDER; ++order) {
2141 fill_contig_page_info(zone, order, &info);
2142 index = __fragmentation_index(order, &info);
2143 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2144 }
2145
2146 seq_putc(m, '\n');
2147}
2148
2149/*
2150 * Display fragmentation index for orders that allocations would fail for
2151 */
2152static int extfrag_show(struct seq_file *m, void *arg)
2153{
2154 pg_data_t *pgdat = (pg_data_t *)arg;
2155
2156 walk_zones_in_node(m, pgdat, true, false, extfrag_show_print);
2157
2158 return 0;
2159}
2160
2161static const struct seq_operations extfrag_sops = {
2162 .start = frag_start,
2163 .next = frag_next,
2164 .stop = frag_stop,
2165 .show = extfrag_show,
2166};
2167
2168DEFINE_SEQ_ATTRIBUTE(extfrag);
2169
2170static int __init extfrag_debug_init(void)
2171{
2172 struct dentry *extfrag_debug_root;
2173
2174 extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
2175
2176 debugfs_create_file("unusable_index", 0444, extfrag_debug_root, NULL,
2177 &unusable_fops);
2178
2179 debugfs_create_file("extfrag_index", 0444, extfrag_debug_root, NULL,
2180 &extfrag_fops);
2181
2182 return 0;
2183}
2184
2185module_init(extfrag_debug_init);
2186#endif