Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/mm/vmscan.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 *
7 * Swap reorganised 29.12.95, Stephen Tweedie.
8 * kswapd added: 7.1.96 sct
9 * Removed kswapd_ctl limits, and swap out as many pages as needed
10 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
11 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
12 * Multiqueue VM started 5.8.00, Rik van Riel.
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/mm.h>
18#include <linux/sched/mm.h>
19#include <linux/module.h>
20#include <linux/gfp.h>
21#include <linux/kernel_stat.h>
22#include <linux/swap.h>
23#include <linux/pagemap.h>
24#include <linux/init.h>
25#include <linux/highmem.h>
26#include <linux/vmpressure.h>
27#include <linux/vmstat.h>
28#include <linux/file.h>
29#include <linux/writeback.h>
30#include <linux/blkdev.h>
31#include <linux/buffer_head.h> /* for try_to_release_page(),
32 buffer_heads_over_limit */
33#include <linux/mm_inline.h>
34#include <linux/backing-dev.h>
35#include <linux/rmap.h>
36#include <linux/topology.h>
37#include <linux/cpu.h>
38#include <linux/cpuset.h>
39#include <linux/compaction.h>
40#include <linux/notifier.h>
41#include <linux/rwsem.h>
42#include <linux/delay.h>
43#include <linux/kthread.h>
44#include <linux/freezer.h>
45#include <linux/memcontrol.h>
46#include <linux/delayacct.h>
47#include <linux/sysctl.h>
48#include <linux/oom.h>
49#include <linux/pagevec.h>
50#include <linux/prefetch.h>
51#include <linux/printk.h>
52#include <linux/dax.h>
53#include <linux/psi.h>
54
55#include <asm/tlbflush.h>
56#include <asm/div64.h>
57
58#include <linux/swapops.h>
59#include <linux/balloon_compaction.h>
60
61#include "internal.h"
62
63#define CREATE_TRACE_POINTS
64#include <trace/events/vmscan.h>
65
66struct scan_control {
67 /* How many pages shrink_list() should reclaim */
68 unsigned long nr_to_reclaim;
69
70 /*
71 * Nodemask of nodes allowed by the caller. If NULL, all nodes
72 * are scanned.
73 */
74 nodemask_t *nodemask;
75
76 /*
77 * The memory cgroup that hit its limit and as a result is the
78 * primary target of this reclaim invocation.
79 */
80 struct mem_cgroup *target_mem_cgroup;
81
82 /*
83 * Scan pressure balancing between anon and file LRUs
84 */
85 unsigned long anon_cost;
86 unsigned long file_cost;
87
88 /* Can active pages be deactivated as part of reclaim? */
89#define DEACTIVATE_ANON 1
90#define DEACTIVATE_FILE 2
91 unsigned int may_deactivate:2;
92 unsigned int force_deactivate:1;
93 unsigned int skipped_deactivate:1;
94
95 /* Writepage batching in laptop mode; RECLAIM_WRITE */
96 unsigned int may_writepage:1;
97
98 /* Can mapped pages be reclaimed? */
99 unsigned int may_unmap:1;
100
101 /* Can pages be swapped as part of reclaim? */
102 unsigned int may_swap:1;
103
104 /*
105 * Cgroups are not reclaimed below their configured memory.low,
106 * unless we threaten to OOM. If any cgroups are skipped due to
107 * memory.low and nothing was reclaimed, go back for memory.low.
108 */
109 unsigned int memcg_low_reclaim:1;
110 unsigned int memcg_low_skipped:1;
111
112 unsigned int hibernation_mode:1;
113
114 /* One of the zones is ready for compaction */
115 unsigned int compaction_ready:1;
116
117 /* There is easily reclaimable cold cache in the current node */
118 unsigned int cache_trim_mode:1;
119
120 /* The file pages on the current node are dangerously low */
121 unsigned int file_is_tiny:1;
122
123 /* Allocation order */
124 s8 order;
125
126 /* Scan (total_size >> priority) pages at once */
127 s8 priority;
128
129 /* The highest zone to isolate pages for reclaim from */
130 s8 reclaim_idx;
131
132 /* This context's GFP mask */
133 gfp_t gfp_mask;
134
135 /* Incremented by the number of inactive pages that were scanned */
136 unsigned long nr_scanned;
137
138 /* Number of pages freed so far during a call to shrink_zones() */
139 unsigned long nr_reclaimed;
140
141 struct {
142 unsigned int dirty;
143 unsigned int unqueued_dirty;
144 unsigned int congested;
145 unsigned int writeback;
146 unsigned int immediate;
147 unsigned int file_taken;
148 unsigned int taken;
149 } nr;
150
151 /* for recording the reclaimed slab by now */
152 struct reclaim_state reclaim_state;
153};
154
155#ifdef ARCH_HAS_PREFETCHW
156#define prefetchw_prev_lru_page(_page, _base, _field) \
157 do { \
158 if ((_page)->lru.prev != _base) { \
159 struct page *prev; \
160 \
161 prev = lru_to_page(&(_page->lru)); \
162 prefetchw(&prev->_field); \
163 } \
164 } while (0)
165#else
166#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
167#endif
168
169/*
170 * From 0 .. 200. Higher means more swappy.
171 */
172int vm_swappiness = 60;
173
174static void set_task_reclaim_state(struct task_struct *task,
175 struct reclaim_state *rs)
176{
177 /* Check for an overwrite */
178 WARN_ON_ONCE(rs && task->reclaim_state);
179
180 /* Check for the nulling of an already-nulled member */
181 WARN_ON_ONCE(!rs && !task->reclaim_state);
182
183 task->reclaim_state = rs;
184}
185
186static LIST_HEAD(shrinker_list);
187static DECLARE_RWSEM(shrinker_rwsem);
188
189#ifdef CONFIG_MEMCG
190/*
191 * We allow subsystems to populate their shrinker-related
192 * LRU lists before register_shrinker_prepared() is called
193 * for the shrinker, since we don't want to impose
194 * restrictions on their internal registration order.
195 * In this case shrink_slab_memcg() may find corresponding
196 * bit is set in the shrinkers map.
197 *
198 * This value is used by the function to detect registering
199 * shrinkers and to skip do_shrink_slab() calls for them.
200 */
201#define SHRINKER_REGISTERING ((struct shrinker *)~0UL)
202
203static DEFINE_IDR(shrinker_idr);
204static int shrinker_nr_max;
205
206static int prealloc_memcg_shrinker(struct shrinker *shrinker)
207{
208 int id, ret = -ENOMEM;
209
210 down_write(&shrinker_rwsem);
211 /* This may call shrinker, so it must use down_read_trylock() */
212 id = idr_alloc(&shrinker_idr, SHRINKER_REGISTERING, 0, 0, GFP_KERNEL);
213 if (id < 0)
214 goto unlock;
215
216 if (id >= shrinker_nr_max) {
217 if (memcg_expand_shrinker_maps(id)) {
218 idr_remove(&shrinker_idr, id);
219 goto unlock;
220 }
221
222 shrinker_nr_max = id + 1;
223 }
224 shrinker->id = id;
225 ret = 0;
226unlock:
227 up_write(&shrinker_rwsem);
228 return ret;
229}
230
231static void unregister_memcg_shrinker(struct shrinker *shrinker)
232{
233 int id = shrinker->id;
234
235 BUG_ON(id < 0);
236
237 down_write(&shrinker_rwsem);
238 idr_remove(&shrinker_idr, id);
239 up_write(&shrinker_rwsem);
240}
241
242static bool cgroup_reclaim(struct scan_control *sc)
243{
244 return sc->target_mem_cgroup;
245}
246
247/**
248 * writeback_throttling_sane - is the usual dirty throttling mechanism available?
249 * @sc: scan_control in question
250 *
251 * The normal page dirty throttling mechanism in balance_dirty_pages() is
252 * completely broken with the legacy memcg and direct stalling in
253 * shrink_page_list() is used for throttling instead, which lacks all the
254 * niceties such as fairness, adaptive pausing, bandwidth proportional
255 * allocation and configurability.
256 *
257 * This function tests whether the vmscan currently in progress can assume
258 * that the normal dirty throttling mechanism is operational.
259 */
260static bool writeback_throttling_sane(struct scan_control *sc)
261{
262 if (!cgroup_reclaim(sc))
263 return true;
264#ifdef CONFIG_CGROUP_WRITEBACK
265 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
266 return true;
267#endif
268 return false;
269}
270#else
271static int prealloc_memcg_shrinker(struct shrinker *shrinker)
272{
273 return 0;
274}
275
276static void unregister_memcg_shrinker(struct shrinker *shrinker)
277{
278}
279
280static bool cgroup_reclaim(struct scan_control *sc)
281{
282 return false;
283}
284
285static bool writeback_throttling_sane(struct scan_control *sc)
286{
287 return true;
288}
289#endif
290
291/*
292 * This misses isolated pages which are not accounted for to save counters.
293 * As the data only determines if reclaim or compaction continues, it is
294 * not expected that isolated pages will be a dominating factor.
295 */
296unsigned long zone_reclaimable_pages(struct zone *zone)
297{
298 unsigned long nr;
299
300 nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) +
301 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE);
302 if (get_nr_swap_pages() > 0)
303 nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
304 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
305
306 return nr;
307}
308
309/**
310 * lruvec_lru_size - Returns the number of pages on the given LRU list.
311 * @lruvec: lru vector
312 * @lru: lru to use
313 * @zone_idx: zones to consider (use MAX_NR_ZONES for the whole LRU list)
314 */
315unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx)
316{
317 unsigned long size = 0;
318 int zid;
319
320 for (zid = 0; zid <= zone_idx && zid < MAX_NR_ZONES; zid++) {
321 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
322
323 if (!managed_zone(zone))
324 continue;
325
326 if (!mem_cgroup_disabled())
327 size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
328 else
329 size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru);
330 }
331 return size;
332}
333
334/*
335 * Add a shrinker callback to be called from the vm.
336 */
337int prealloc_shrinker(struct shrinker *shrinker)
338{
339 unsigned int size = sizeof(*shrinker->nr_deferred);
340
341 if (shrinker->flags & SHRINKER_NUMA_AWARE)
342 size *= nr_node_ids;
343
344 shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
345 if (!shrinker->nr_deferred)
346 return -ENOMEM;
347
348 if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
349 if (prealloc_memcg_shrinker(shrinker))
350 goto free_deferred;
351 }
352
353 return 0;
354
355free_deferred:
356 kfree(shrinker->nr_deferred);
357 shrinker->nr_deferred = NULL;
358 return -ENOMEM;
359}
360
361void free_prealloced_shrinker(struct shrinker *shrinker)
362{
363 if (!shrinker->nr_deferred)
364 return;
365
366 if (shrinker->flags & SHRINKER_MEMCG_AWARE)
367 unregister_memcg_shrinker(shrinker);
368
369 kfree(shrinker->nr_deferred);
370 shrinker->nr_deferred = NULL;
371}
372
373void register_shrinker_prepared(struct shrinker *shrinker)
374{
375 down_write(&shrinker_rwsem);
376 list_add_tail(&shrinker->list, &shrinker_list);
377#ifdef CONFIG_MEMCG
378 if (shrinker->flags & SHRINKER_MEMCG_AWARE)
379 idr_replace(&shrinker_idr, shrinker, shrinker->id);
380#endif
381 up_write(&shrinker_rwsem);
382}
383
384int register_shrinker(struct shrinker *shrinker)
385{
386 int err = prealloc_shrinker(shrinker);
387
388 if (err)
389 return err;
390 register_shrinker_prepared(shrinker);
391 return 0;
392}
393EXPORT_SYMBOL(register_shrinker);
394
395/*
396 * Remove one
397 */
398void unregister_shrinker(struct shrinker *shrinker)
399{
400 if (!shrinker->nr_deferred)
401 return;
402 if (shrinker->flags & SHRINKER_MEMCG_AWARE)
403 unregister_memcg_shrinker(shrinker);
404 down_write(&shrinker_rwsem);
405 list_del(&shrinker->list);
406 up_write(&shrinker_rwsem);
407 kfree(shrinker->nr_deferred);
408 shrinker->nr_deferred = NULL;
409}
410EXPORT_SYMBOL(unregister_shrinker);
411
412#define SHRINK_BATCH 128
413
414static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
415 struct shrinker *shrinker, int priority)
416{
417 unsigned long freed = 0;
418 unsigned long long delta;
419 long total_scan;
420 long freeable;
421 long nr;
422 long new_nr;
423 int nid = shrinkctl->nid;
424 long batch_size = shrinker->batch ? shrinker->batch
425 : SHRINK_BATCH;
426 long scanned = 0, next_deferred;
427
428 if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
429 nid = 0;
430
431 freeable = shrinker->count_objects(shrinker, shrinkctl);
432 if (freeable == 0 || freeable == SHRINK_EMPTY)
433 return freeable;
434
435 /*
436 * copy the current shrinker scan count into a local variable
437 * and zero it so that other concurrent shrinker invocations
438 * don't also do this scanning work.
439 */
440 nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
441
442 total_scan = nr;
443 if (shrinker->seeks) {
444 delta = freeable >> priority;
445 delta *= 4;
446 do_div(delta, shrinker->seeks);
447 } else {
448 /*
449 * These objects don't require any IO to create. Trim
450 * them aggressively under memory pressure to keep
451 * them from causing refetches in the IO caches.
452 */
453 delta = freeable / 2;
454 }
455
456 total_scan += delta;
457 if (total_scan < 0) {
458 pr_err("shrink_slab: %pS negative objects to delete nr=%ld\n",
459 shrinker->scan_objects, total_scan);
460 total_scan = freeable;
461 next_deferred = nr;
462 } else
463 next_deferred = total_scan;
464
465 /*
466 * We need to avoid excessive windup on filesystem shrinkers
467 * due to large numbers of GFP_NOFS allocations causing the
468 * shrinkers to return -1 all the time. This results in a large
469 * nr being built up so when a shrink that can do some work
470 * comes along it empties the entire cache due to nr >>>
471 * freeable. This is bad for sustaining a working set in
472 * memory.
473 *
474 * Hence only allow the shrinker to scan the entire cache when
475 * a large delta change is calculated directly.
476 */
477 if (delta < freeable / 4)
478 total_scan = min(total_scan, freeable / 2);
479
480 /*
481 * Avoid risking looping forever due to too large nr value:
482 * never try to free more than twice the estimate number of
483 * freeable entries.
484 */
485 if (total_scan > freeable * 2)
486 total_scan = freeable * 2;
487
488 trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
489 freeable, delta, total_scan, priority);
490
491 /*
492 * Normally, we should not scan less than batch_size objects in one
493 * pass to avoid too frequent shrinker calls, but if the slab has less
494 * than batch_size objects in total and we are really tight on memory,
495 * we will try to reclaim all available objects, otherwise we can end
496 * up failing allocations although there are plenty of reclaimable
497 * objects spread over several slabs with usage less than the
498 * batch_size.
499 *
500 * We detect the "tight on memory" situations by looking at the total
501 * number of objects we want to scan (total_scan). If it is greater
502 * than the total number of objects on slab (freeable), we must be
503 * scanning at high prio and therefore should try to reclaim as much as
504 * possible.
505 */
506 while (total_scan >= batch_size ||
507 total_scan >= freeable) {
508 unsigned long ret;
509 unsigned long nr_to_scan = min(batch_size, total_scan);
510
511 shrinkctl->nr_to_scan = nr_to_scan;
512 shrinkctl->nr_scanned = nr_to_scan;
513 ret = shrinker->scan_objects(shrinker, shrinkctl);
514 if (ret == SHRINK_STOP)
515 break;
516 freed += ret;
517
518 count_vm_events(SLABS_SCANNED, shrinkctl->nr_scanned);
519 total_scan -= shrinkctl->nr_scanned;
520 scanned += shrinkctl->nr_scanned;
521
522 cond_resched();
523 }
524
525 if (next_deferred >= scanned)
526 next_deferred -= scanned;
527 else
528 next_deferred = 0;
529 /*
530 * move the unused scan count back into the shrinker in a
531 * manner that handles concurrent updates. If we exhausted the
532 * scan, there is no need to do an update.
533 */
534 if (next_deferred > 0)
535 new_nr = atomic_long_add_return(next_deferred,
536 &shrinker->nr_deferred[nid]);
537 else
538 new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
539
540 trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan);
541 return freed;
542}
543
544#ifdef CONFIG_MEMCG
545static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
546 struct mem_cgroup *memcg, int priority)
547{
548 struct memcg_shrinker_map *map;
549 unsigned long ret, freed = 0;
550 int i;
551
552 if (!mem_cgroup_online(memcg))
553 return 0;
554
555 if (!down_read_trylock(&shrinker_rwsem))
556 return 0;
557
558 map = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_map,
559 true);
560 if (unlikely(!map))
561 goto unlock;
562
563 for_each_set_bit(i, map->map, shrinker_nr_max) {
564 struct shrink_control sc = {
565 .gfp_mask = gfp_mask,
566 .nid = nid,
567 .memcg = memcg,
568 };
569 struct shrinker *shrinker;
570
571 shrinker = idr_find(&shrinker_idr, i);
572 if (unlikely(!shrinker || shrinker == SHRINKER_REGISTERING)) {
573 if (!shrinker)
574 clear_bit(i, map->map);
575 continue;
576 }
577
578 /* Call non-slab shrinkers even though kmem is disabled */
579 if (!memcg_kmem_enabled() &&
580 !(shrinker->flags & SHRINKER_NONSLAB))
581 continue;
582
583 ret = do_shrink_slab(&sc, shrinker, priority);
584 if (ret == SHRINK_EMPTY) {
585 clear_bit(i, map->map);
586 /*
587 * After the shrinker reported that it had no objects to
588 * free, but before we cleared the corresponding bit in
589 * the memcg shrinker map, a new object might have been
590 * added. To make sure, we have the bit set in this
591 * case, we invoke the shrinker one more time and reset
592 * the bit if it reports that it is not empty anymore.
593 * The memory barrier here pairs with the barrier in
594 * memcg_set_shrinker_bit():
595 *
596 * list_lru_add() shrink_slab_memcg()
597 * list_add_tail() clear_bit()
598 * <MB> <MB>
599 * set_bit() do_shrink_slab()
600 */
601 smp_mb__after_atomic();
602 ret = do_shrink_slab(&sc, shrinker, priority);
603 if (ret == SHRINK_EMPTY)
604 ret = 0;
605 else
606 memcg_set_shrinker_bit(memcg, nid, i);
607 }
608 freed += ret;
609
610 if (rwsem_is_contended(&shrinker_rwsem)) {
611 freed = freed ? : 1;
612 break;
613 }
614 }
615unlock:
616 up_read(&shrinker_rwsem);
617 return freed;
618}
619#else /* CONFIG_MEMCG */
620static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
621 struct mem_cgroup *memcg, int priority)
622{
623 return 0;
624}
625#endif /* CONFIG_MEMCG */
626
627/**
628 * shrink_slab - shrink slab caches
629 * @gfp_mask: allocation context
630 * @nid: node whose slab caches to target
631 * @memcg: memory cgroup whose slab caches to target
632 * @priority: the reclaim priority
633 *
634 * Call the shrink functions to age shrinkable caches.
635 *
636 * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set,
637 * unaware shrinkers will receive a node id of 0 instead.
638 *
639 * @memcg specifies the memory cgroup to target. Unaware shrinkers
640 * are called only if it is the root cgroup.
641 *
642 * @priority is sc->priority, we take the number of objects and >> by priority
643 * in order to get the scan target.
644 *
645 * Returns the number of reclaimed slab objects.
646 */
647static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
648 struct mem_cgroup *memcg,
649 int priority)
650{
651 unsigned long ret, freed = 0;
652 struct shrinker *shrinker;
653
654 /*
655 * The root memcg might be allocated even though memcg is disabled
656 * via "cgroup_disable=memory" boot parameter. This could make
657 * mem_cgroup_is_root() return false, then just run memcg slab
658 * shrink, but skip global shrink. This may result in premature
659 * oom.
660 */
661 if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
662 return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
663
664 if (!down_read_trylock(&shrinker_rwsem))
665 goto out;
666
667 list_for_each_entry(shrinker, &shrinker_list, list) {
668 struct shrink_control sc = {
669 .gfp_mask = gfp_mask,
670 .nid = nid,
671 .memcg = memcg,
672 };
673
674 ret = do_shrink_slab(&sc, shrinker, priority);
675 if (ret == SHRINK_EMPTY)
676 ret = 0;
677 freed += ret;
678 /*
679 * Bail out if someone want to register a new shrinker to
680 * prevent the registration from being stalled for long periods
681 * by parallel ongoing shrinking.
682 */
683 if (rwsem_is_contended(&shrinker_rwsem)) {
684 freed = freed ? : 1;
685 break;
686 }
687 }
688
689 up_read(&shrinker_rwsem);
690out:
691 cond_resched();
692 return freed;
693}
694
695void drop_slab_node(int nid)
696{
697 unsigned long freed;
698
699 do {
700 struct mem_cgroup *memcg = NULL;
701
702 freed = 0;
703 memcg = mem_cgroup_iter(NULL, NULL, NULL);
704 do {
705 freed += shrink_slab(GFP_KERNEL, nid, memcg, 0);
706 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
707 } while (freed > 10);
708}
709
710void drop_slab(void)
711{
712 int nid;
713
714 for_each_online_node(nid)
715 drop_slab_node(nid);
716}
717
718static inline int is_page_cache_freeable(struct page *page)
719{
720 /*
721 * A freeable page cache page is referenced only by the caller
722 * that isolated the page, the page cache and optional buffer
723 * heads at page->private.
724 */
725 int page_cache_pins = PageTransHuge(page) && PageSwapCache(page) ?
726 HPAGE_PMD_NR : 1;
727 return page_count(page) - page_has_private(page) == 1 + page_cache_pins;
728}
729
730static int may_write_to_inode(struct inode *inode)
731{
732 if (current->flags & PF_SWAPWRITE)
733 return 1;
734 if (!inode_write_congested(inode))
735 return 1;
736 if (inode_to_bdi(inode) == current->backing_dev_info)
737 return 1;
738 return 0;
739}
740
741/*
742 * We detected a synchronous write error writing a page out. Probably
743 * -ENOSPC. We need to propagate that into the address_space for a subsequent
744 * fsync(), msync() or close().
745 *
746 * The tricky part is that after writepage we cannot touch the mapping: nothing
747 * prevents it from being freed up. But we have a ref on the page and once
748 * that page is locked, the mapping is pinned.
749 *
750 * We're allowed to run sleeping lock_page() here because we know the caller has
751 * __GFP_FS.
752 */
753static void handle_write_error(struct address_space *mapping,
754 struct page *page, int error)
755{
756 lock_page(page);
757 if (page_mapping(page) == mapping)
758 mapping_set_error(mapping, error);
759 unlock_page(page);
760}
761
762/* possible outcome of pageout() */
763typedef enum {
764 /* failed to write page out, page is locked */
765 PAGE_KEEP,
766 /* move page to the active list, page is locked */
767 PAGE_ACTIVATE,
768 /* page has been sent to the disk successfully, page is unlocked */
769 PAGE_SUCCESS,
770 /* page is clean and locked */
771 PAGE_CLEAN,
772} pageout_t;
773
774/*
775 * pageout is called by shrink_page_list() for each dirty page.
776 * Calls ->writepage().
777 */
778static pageout_t pageout(struct page *page, struct address_space *mapping)
779{
780 /*
781 * If the page is dirty, only perform writeback if that write
782 * will be non-blocking. To prevent this allocation from being
783 * stalled by pagecache activity. But note that there may be
784 * stalls if we need to run get_block(). We could test
785 * PagePrivate for that.
786 *
787 * If this process is currently in __generic_file_write_iter() against
788 * this page's queue, we can perform writeback even if that
789 * will block.
790 *
791 * If the page is swapcache, write it back even if that would
792 * block, for some throttling. This happens by accident, because
793 * swap_backing_dev_info is bust: it doesn't reflect the
794 * congestion state of the swapdevs. Easy to fix, if needed.
795 */
796 if (!is_page_cache_freeable(page))
797 return PAGE_KEEP;
798 if (!mapping) {
799 /*
800 * Some data journaling orphaned pages can have
801 * page->mapping == NULL while being dirty with clean buffers.
802 */
803 if (page_has_private(page)) {
804 if (try_to_free_buffers(page)) {
805 ClearPageDirty(page);
806 pr_info("%s: orphaned page\n", __func__);
807 return PAGE_CLEAN;
808 }
809 }
810 return PAGE_KEEP;
811 }
812 if (mapping->a_ops->writepage == NULL)
813 return PAGE_ACTIVATE;
814 if (!may_write_to_inode(mapping->host))
815 return PAGE_KEEP;
816
817 if (clear_page_dirty_for_io(page)) {
818 int res;
819 struct writeback_control wbc = {
820 .sync_mode = WB_SYNC_NONE,
821 .nr_to_write = SWAP_CLUSTER_MAX,
822 .range_start = 0,
823 .range_end = LLONG_MAX,
824 .for_reclaim = 1,
825 };
826
827 SetPageReclaim(page);
828 res = mapping->a_ops->writepage(page, &wbc);
829 if (res < 0)
830 handle_write_error(mapping, page, res);
831 if (res == AOP_WRITEPAGE_ACTIVATE) {
832 ClearPageReclaim(page);
833 return PAGE_ACTIVATE;
834 }
835
836 if (!PageWriteback(page)) {
837 /* synchronous write or broken a_ops? */
838 ClearPageReclaim(page);
839 }
840 trace_mm_vmscan_writepage(page);
841 inc_node_page_state(page, NR_VMSCAN_WRITE);
842 return PAGE_SUCCESS;
843 }
844
845 return PAGE_CLEAN;
846}
847
848/*
849 * Same as remove_mapping, but if the page is removed from the mapping, it
850 * gets returned with a refcount of 0.
851 */
852static int __remove_mapping(struct address_space *mapping, struct page *page,
853 bool reclaimed, struct mem_cgroup *target_memcg)
854{
855 unsigned long flags;
856 int refcount;
857 void *shadow = NULL;
858
859 BUG_ON(!PageLocked(page));
860 BUG_ON(mapping != page_mapping(page));
861
862 xa_lock_irqsave(&mapping->i_pages, flags);
863 /*
864 * The non racy check for a busy page.
865 *
866 * Must be careful with the order of the tests. When someone has
867 * a ref to the page, it may be possible that they dirty it then
868 * drop the reference. So if PageDirty is tested before page_count
869 * here, then the following race may occur:
870 *
871 * get_user_pages(&page);
872 * [user mapping goes away]
873 * write_to(page);
874 * !PageDirty(page) [good]
875 * SetPageDirty(page);
876 * put_page(page);
877 * !page_count(page) [good, discard it]
878 *
879 * [oops, our write_to data is lost]
880 *
881 * Reversing the order of the tests ensures such a situation cannot
882 * escape unnoticed. The smp_rmb is needed to ensure the page->flags
883 * load is not satisfied before that of page->_refcount.
884 *
885 * Note that if SetPageDirty is always performed via set_page_dirty,
886 * and thus under the i_pages lock, then this ordering is not required.
887 */
888 refcount = 1 + compound_nr(page);
889 if (!page_ref_freeze(page, refcount))
890 goto cannot_free;
891 /* note: atomic_cmpxchg in page_ref_freeze provides the smp_rmb */
892 if (unlikely(PageDirty(page))) {
893 page_ref_unfreeze(page, refcount);
894 goto cannot_free;
895 }
896
897 if (PageSwapCache(page)) {
898 swp_entry_t swap = { .val = page_private(page) };
899 mem_cgroup_swapout(page, swap);
900 if (reclaimed && !mapping_exiting(mapping))
901 shadow = workingset_eviction(page, target_memcg);
902 __delete_from_swap_cache(page, swap, shadow);
903 xa_unlock_irqrestore(&mapping->i_pages, flags);
904 put_swap_page(page, swap);
905 } else {
906 void (*freepage)(struct page *);
907
908 freepage = mapping->a_ops->freepage;
909 /*
910 * Remember a shadow entry for reclaimed file cache in
911 * order to detect refaults, thus thrashing, later on.
912 *
913 * But don't store shadows in an address space that is
914 * already exiting. This is not just an optimization,
915 * inode reclaim needs to empty out the radix tree or
916 * the nodes are lost. Don't plant shadows behind its
917 * back.
918 *
919 * We also don't store shadows for DAX mappings because the
920 * only page cache pages found in these are zero pages
921 * covering holes, and because we don't want to mix DAX
922 * exceptional entries and shadow exceptional entries in the
923 * same address_space.
924 */
925 if (reclaimed && page_is_file_lru(page) &&
926 !mapping_exiting(mapping) && !dax_mapping(mapping))
927 shadow = workingset_eviction(page, target_memcg);
928 __delete_from_page_cache(page, shadow);
929 xa_unlock_irqrestore(&mapping->i_pages, flags);
930
931 if (freepage != NULL)
932 freepage(page);
933 }
934
935 return 1;
936
937cannot_free:
938 xa_unlock_irqrestore(&mapping->i_pages, flags);
939 return 0;
940}
941
942/*
943 * Attempt to detach a locked page from its ->mapping. If it is dirty or if
944 * someone else has a ref on the page, abort and return 0. If it was
945 * successfully detached, return 1. Assumes the caller has a single ref on
946 * this page.
947 */
948int remove_mapping(struct address_space *mapping, struct page *page)
949{
950 if (__remove_mapping(mapping, page, false, NULL)) {
951 /*
952 * Unfreezing the refcount with 1 rather than 2 effectively
953 * drops the pagecache ref for us without requiring another
954 * atomic operation.
955 */
956 page_ref_unfreeze(page, 1);
957 return 1;
958 }
959 return 0;
960}
961
962/**
963 * putback_lru_page - put previously isolated page onto appropriate LRU list
964 * @page: page to be put back to appropriate lru list
965 *
966 * Add previously isolated @page to appropriate LRU list.
967 * Page may still be unevictable for other reasons.
968 *
969 * lru_lock must not be held, interrupts must be enabled.
970 */
971void putback_lru_page(struct page *page)
972{
973 lru_cache_add(page);
974 put_page(page); /* drop ref from isolate */
975}
976
977enum page_references {
978 PAGEREF_RECLAIM,
979 PAGEREF_RECLAIM_CLEAN,
980 PAGEREF_KEEP,
981 PAGEREF_ACTIVATE,
982};
983
984static enum page_references page_check_references(struct page *page,
985 struct scan_control *sc)
986{
987 int referenced_ptes, referenced_page;
988 unsigned long vm_flags;
989
990 referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
991 &vm_flags);
992 referenced_page = TestClearPageReferenced(page);
993
994 /*
995 * Mlock lost the isolation race with us. Let try_to_unmap()
996 * move the page to the unevictable list.
997 */
998 if (vm_flags & VM_LOCKED)
999 return PAGEREF_RECLAIM;
1000
1001 if (referenced_ptes) {
1002 /*
1003 * All mapped pages start out with page table
1004 * references from the instantiating fault, so we need
1005 * to look twice if a mapped file page is used more
1006 * than once.
1007 *
1008 * Mark it and spare it for another trip around the
1009 * inactive list. Another page table reference will
1010 * lead to its activation.
1011 *
1012 * Note: the mark is set for activated pages as well
1013 * so that recently deactivated but used pages are
1014 * quickly recovered.
1015 */
1016 SetPageReferenced(page);
1017
1018 if (referenced_page || referenced_ptes > 1)
1019 return PAGEREF_ACTIVATE;
1020
1021 /*
1022 * Activate file-backed executable pages after first usage.
1023 */
1024 if ((vm_flags & VM_EXEC) && !PageSwapBacked(page))
1025 return PAGEREF_ACTIVATE;
1026
1027 return PAGEREF_KEEP;
1028 }
1029
1030 /* Reclaim if clean, defer dirty pages to writeback */
1031 if (referenced_page && !PageSwapBacked(page))
1032 return PAGEREF_RECLAIM_CLEAN;
1033
1034 return PAGEREF_RECLAIM;
1035}
1036
1037/* Check if a page is dirty or under writeback */
1038static void page_check_dirty_writeback(struct page *page,
1039 bool *dirty, bool *writeback)
1040{
1041 struct address_space *mapping;
1042
1043 /*
1044 * Anonymous pages are not handled by flushers and must be written
1045 * from reclaim context. Do not stall reclaim based on them
1046 */
1047 if (!page_is_file_lru(page) ||
1048 (PageAnon(page) && !PageSwapBacked(page))) {
1049 *dirty = false;
1050 *writeback = false;
1051 return;
1052 }
1053
1054 /* By default assume that the page flags are accurate */
1055 *dirty = PageDirty(page);
1056 *writeback = PageWriteback(page);
1057
1058 /* Verify dirty/writeback state if the filesystem supports it */
1059 if (!page_has_private(page))
1060 return;
1061
1062 mapping = page_mapping(page);
1063 if (mapping && mapping->a_ops->is_dirty_writeback)
1064 mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
1065}
1066
1067/*
1068 * shrink_page_list() returns the number of reclaimed pages
1069 */
1070static unsigned int shrink_page_list(struct list_head *page_list,
1071 struct pglist_data *pgdat,
1072 struct scan_control *sc,
1073 enum ttu_flags ttu_flags,
1074 struct reclaim_stat *stat,
1075 bool ignore_references)
1076{
1077 LIST_HEAD(ret_pages);
1078 LIST_HEAD(free_pages);
1079 unsigned int nr_reclaimed = 0;
1080 unsigned int pgactivate = 0;
1081
1082 memset(stat, 0, sizeof(*stat));
1083 cond_resched();
1084
1085 while (!list_empty(page_list)) {
1086 struct address_space *mapping;
1087 struct page *page;
1088 enum page_references references = PAGEREF_RECLAIM;
1089 bool dirty, writeback, may_enter_fs;
1090 unsigned int nr_pages;
1091
1092 cond_resched();
1093
1094 page = lru_to_page(page_list);
1095 list_del(&page->lru);
1096
1097 if (!trylock_page(page))
1098 goto keep;
1099
1100 VM_BUG_ON_PAGE(PageActive(page), page);
1101
1102 nr_pages = compound_nr(page);
1103
1104 /* Account the number of base pages even though THP */
1105 sc->nr_scanned += nr_pages;
1106
1107 if (unlikely(!page_evictable(page)))
1108 goto activate_locked;
1109
1110 if (!sc->may_unmap && page_mapped(page))
1111 goto keep_locked;
1112
1113 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
1114 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
1115
1116 /*
1117 * The number of dirty pages determines if a node is marked
1118 * reclaim_congested which affects wait_iff_congested. kswapd
1119 * will stall and start writing pages if the tail of the LRU
1120 * is all dirty unqueued pages.
1121 */
1122 page_check_dirty_writeback(page, &dirty, &writeback);
1123 if (dirty || writeback)
1124 stat->nr_dirty++;
1125
1126 if (dirty && !writeback)
1127 stat->nr_unqueued_dirty++;
1128
1129 /*
1130 * Treat this page as congested if the underlying BDI is or if
1131 * pages are cycling through the LRU so quickly that the
1132 * pages marked for immediate reclaim are making it to the
1133 * end of the LRU a second time.
1134 */
1135 mapping = page_mapping(page);
1136 if (((dirty || writeback) && mapping &&
1137 inode_write_congested(mapping->host)) ||
1138 (writeback && PageReclaim(page)))
1139 stat->nr_congested++;
1140
1141 /*
1142 * If a page at the tail of the LRU is under writeback, there
1143 * are three cases to consider.
1144 *
1145 * 1) If reclaim is encountering an excessive number of pages
1146 * under writeback and this page is both under writeback and
1147 * PageReclaim then it indicates that pages are being queued
1148 * for IO but are being recycled through the LRU before the
1149 * IO can complete. Waiting on the page itself risks an
1150 * indefinite stall if it is impossible to writeback the
1151 * page due to IO error or disconnected storage so instead
1152 * note that the LRU is being scanned too quickly and the
1153 * caller can stall after page list has been processed.
1154 *
1155 * 2) Global or new memcg reclaim encounters a page that is
1156 * not marked for immediate reclaim, or the caller does not
1157 * have __GFP_FS (or __GFP_IO if it's simply going to swap,
1158 * not to fs). In this case mark the page for immediate
1159 * reclaim and continue scanning.
1160 *
1161 * Require may_enter_fs because we would wait on fs, which
1162 * may not have submitted IO yet. And the loop driver might
1163 * enter reclaim, and deadlock if it waits on a page for
1164 * which it is needed to do the write (loop masks off
1165 * __GFP_IO|__GFP_FS for this reason); but more thought
1166 * would probably show more reasons.
1167 *
1168 * 3) Legacy memcg encounters a page that is already marked
1169 * PageReclaim. memcg does not have any dirty pages
1170 * throttling so we could easily OOM just because too many
1171 * pages are in writeback and there is nothing else to
1172 * reclaim. Wait for the writeback to complete.
1173 *
1174 * In cases 1) and 2) we activate the pages to get them out of
1175 * the way while we continue scanning for clean pages on the
1176 * inactive list and refilling from the active list. The
1177 * observation here is that waiting for disk writes is more
1178 * expensive than potentially causing reloads down the line.
1179 * Since they're marked for immediate reclaim, they won't put
1180 * memory pressure on the cache working set any longer than it
1181 * takes to write them to disk.
1182 */
1183 if (PageWriteback(page)) {
1184 /* Case 1 above */
1185 if (current_is_kswapd() &&
1186 PageReclaim(page) &&
1187 test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
1188 stat->nr_immediate++;
1189 goto activate_locked;
1190
1191 /* Case 2 above */
1192 } else if (writeback_throttling_sane(sc) ||
1193 !PageReclaim(page) || !may_enter_fs) {
1194 /*
1195 * This is slightly racy - end_page_writeback()
1196 * might have just cleared PageReclaim, then
1197 * setting PageReclaim here end up interpreted
1198 * as PageReadahead - but that does not matter
1199 * enough to care. What we do want is for this
1200 * page to have PageReclaim set next time memcg
1201 * reclaim reaches the tests above, so it will
1202 * then wait_on_page_writeback() to avoid OOM;
1203 * and it's also appropriate in global reclaim.
1204 */
1205 SetPageReclaim(page);
1206 stat->nr_writeback++;
1207 goto activate_locked;
1208
1209 /* Case 3 above */
1210 } else {
1211 unlock_page(page);
1212 wait_on_page_writeback(page);
1213 /* then go back and try same page again */
1214 list_add_tail(&page->lru, page_list);
1215 continue;
1216 }
1217 }
1218
1219 if (!ignore_references)
1220 references = page_check_references(page, sc);
1221
1222 switch (references) {
1223 case PAGEREF_ACTIVATE:
1224 goto activate_locked;
1225 case PAGEREF_KEEP:
1226 stat->nr_ref_keep += nr_pages;
1227 goto keep_locked;
1228 case PAGEREF_RECLAIM:
1229 case PAGEREF_RECLAIM_CLEAN:
1230 ; /* try to reclaim the page below */
1231 }
1232
1233 /*
1234 * Anonymous process memory has backing store?
1235 * Try to allocate it some swap space here.
1236 * Lazyfree page could be freed directly
1237 */
1238 if (PageAnon(page) && PageSwapBacked(page)) {
1239 if (!PageSwapCache(page)) {
1240 if (!(sc->gfp_mask & __GFP_IO))
1241 goto keep_locked;
1242 if (PageTransHuge(page)) {
1243 /* cannot split THP, skip it */
1244 if (!can_split_huge_page(page, NULL))
1245 goto activate_locked;
1246 /*
1247 * Split pages without a PMD map right
1248 * away. Chances are some or all of the
1249 * tail pages can be freed without IO.
1250 */
1251 if (!compound_mapcount(page) &&
1252 split_huge_page_to_list(page,
1253 page_list))
1254 goto activate_locked;
1255 }
1256 if (!add_to_swap(page)) {
1257 if (!PageTransHuge(page))
1258 goto activate_locked_split;
1259 /* Fallback to swap normal pages */
1260 if (split_huge_page_to_list(page,
1261 page_list))
1262 goto activate_locked;
1263#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1264 count_vm_event(THP_SWPOUT_FALLBACK);
1265#endif
1266 if (!add_to_swap(page))
1267 goto activate_locked_split;
1268 }
1269
1270 may_enter_fs = true;
1271
1272 /* Adding to swap updated mapping */
1273 mapping = page_mapping(page);
1274 }
1275 } else if (unlikely(PageTransHuge(page))) {
1276 /* Split file THP */
1277 if (split_huge_page_to_list(page, page_list))
1278 goto keep_locked;
1279 }
1280
1281 /*
1282 * THP may get split above, need minus tail pages and update
1283 * nr_pages to avoid accounting tail pages twice.
1284 *
1285 * The tail pages that are added into swap cache successfully
1286 * reach here.
1287 */
1288 if ((nr_pages > 1) && !PageTransHuge(page)) {
1289 sc->nr_scanned -= (nr_pages - 1);
1290 nr_pages = 1;
1291 }
1292
1293 /*
1294 * The page is mapped into the page tables of one or more
1295 * processes. Try to unmap it here.
1296 */
1297 if (page_mapped(page)) {
1298 enum ttu_flags flags = ttu_flags | TTU_BATCH_FLUSH;
1299 bool was_swapbacked = PageSwapBacked(page);
1300
1301 if (unlikely(PageTransHuge(page)))
1302 flags |= TTU_SPLIT_HUGE_PMD;
1303
1304 if (!try_to_unmap(page, flags)) {
1305 stat->nr_unmap_fail += nr_pages;
1306 if (!was_swapbacked && PageSwapBacked(page))
1307 stat->nr_lazyfree_fail += nr_pages;
1308 goto activate_locked;
1309 }
1310 }
1311
1312 if (PageDirty(page)) {
1313 /*
1314 * Only kswapd can writeback filesystem pages
1315 * to avoid risk of stack overflow. But avoid
1316 * injecting inefficient single-page IO into
1317 * flusher writeback as much as possible: only
1318 * write pages when we've encountered many
1319 * dirty pages, and when we've already scanned
1320 * the rest of the LRU for clean pages and see
1321 * the same dirty pages again (PageReclaim).
1322 */
1323 if (page_is_file_lru(page) &&
1324 (!current_is_kswapd() || !PageReclaim(page) ||
1325 !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
1326 /*
1327 * Immediately reclaim when written back.
1328 * Similar in principal to deactivate_page()
1329 * except we already have the page isolated
1330 * and know it's dirty
1331 */
1332 inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);
1333 SetPageReclaim(page);
1334
1335 goto activate_locked;
1336 }
1337
1338 if (references == PAGEREF_RECLAIM_CLEAN)
1339 goto keep_locked;
1340 if (!may_enter_fs)
1341 goto keep_locked;
1342 if (!sc->may_writepage)
1343 goto keep_locked;
1344
1345 /*
1346 * Page is dirty. Flush the TLB if a writable entry
1347 * potentially exists to avoid CPU writes after IO
1348 * starts and then write it out here.
1349 */
1350 try_to_unmap_flush_dirty();
1351 switch (pageout(page, mapping)) {
1352 case PAGE_KEEP:
1353 goto keep_locked;
1354 case PAGE_ACTIVATE:
1355 goto activate_locked;
1356 case PAGE_SUCCESS:
1357 stat->nr_pageout += thp_nr_pages(page);
1358
1359 if (PageWriteback(page))
1360 goto keep;
1361 if (PageDirty(page))
1362 goto keep;
1363
1364 /*
1365 * A synchronous write - probably a ramdisk. Go
1366 * ahead and try to reclaim the page.
1367 */
1368 if (!trylock_page(page))
1369 goto keep;
1370 if (PageDirty(page) || PageWriteback(page))
1371 goto keep_locked;
1372 mapping = page_mapping(page);
1373 case PAGE_CLEAN:
1374 ; /* try to free the page below */
1375 }
1376 }
1377
1378 /*
1379 * If the page has buffers, try to free the buffer mappings
1380 * associated with this page. If we succeed we try to free
1381 * the page as well.
1382 *
1383 * We do this even if the page is PageDirty().
1384 * try_to_release_page() does not perform I/O, but it is
1385 * possible for a page to have PageDirty set, but it is actually
1386 * clean (all its buffers are clean). This happens if the
1387 * buffers were written out directly, with submit_bh(). ext3
1388 * will do this, as well as the blockdev mapping.
1389 * try_to_release_page() will discover that cleanness and will
1390 * drop the buffers and mark the page clean - it can be freed.
1391 *
1392 * Rarely, pages can have buffers and no ->mapping. These are
1393 * the pages which were not successfully invalidated in
1394 * truncate_complete_page(). We try to drop those buffers here
1395 * and if that worked, and the page is no longer mapped into
1396 * process address space (page_count == 1) it can be freed.
1397 * Otherwise, leave the page on the LRU so it is swappable.
1398 */
1399 if (page_has_private(page)) {
1400 if (!try_to_release_page(page, sc->gfp_mask))
1401 goto activate_locked;
1402 if (!mapping && page_count(page) == 1) {
1403 unlock_page(page);
1404 if (put_page_testzero(page))
1405 goto free_it;
1406 else {
1407 /*
1408 * rare race with speculative reference.
1409 * the speculative reference will free
1410 * this page shortly, so we may
1411 * increment nr_reclaimed here (and
1412 * leave it off the LRU).
1413 */
1414 nr_reclaimed++;
1415 continue;
1416 }
1417 }
1418 }
1419
1420 if (PageAnon(page) && !PageSwapBacked(page)) {
1421 /* follow __remove_mapping for reference */
1422 if (!page_ref_freeze(page, 1))
1423 goto keep_locked;
1424 if (PageDirty(page)) {
1425 page_ref_unfreeze(page, 1);
1426 goto keep_locked;
1427 }
1428
1429 count_vm_event(PGLAZYFREED);
1430 count_memcg_page_event(page, PGLAZYFREED);
1431 } else if (!mapping || !__remove_mapping(mapping, page, true,
1432 sc->target_mem_cgroup))
1433 goto keep_locked;
1434
1435 unlock_page(page);
1436free_it:
1437 /*
1438 * THP may get swapped out in a whole, need account
1439 * all base pages.
1440 */
1441 nr_reclaimed += nr_pages;
1442
1443 /*
1444 * Is there need to periodically free_page_list? It would
1445 * appear not as the counts should be low
1446 */
1447 if (unlikely(PageTransHuge(page)))
1448 destroy_compound_page(page);
1449 else
1450 list_add(&page->lru, &free_pages);
1451 continue;
1452
1453activate_locked_split:
1454 /*
1455 * The tail pages that are failed to add into swap cache
1456 * reach here. Fixup nr_scanned and nr_pages.
1457 */
1458 if (nr_pages > 1) {
1459 sc->nr_scanned -= (nr_pages - 1);
1460 nr_pages = 1;
1461 }
1462activate_locked:
1463 /* Not a candidate for swapping, so reclaim swap space. */
1464 if (PageSwapCache(page) && (mem_cgroup_swap_full(page) ||
1465 PageMlocked(page)))
1466 try_to_free_swap(page);
1467 VM_BUG_ON_PAGE(PageActive(page), page);
1468 if (!PageMlocked(page)) {
1469 int type = page_is_file_lru(page);
1470 SetPageActive(page);
1471 stat->nr_activate[type] += nr_pages;
1472 count_memcg_page_event(page, PGACTIVATE);
1473 }
1474keep_locked:
1475 unlock_page(page);
1476keep:
1477 list_add(&page->lru, &ret_pages);
1478 VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
1479 }
1480
1481 pgactivate = stat->nr_activate[0] + stat->nr_activate[1];
1482
1483 mem_cgroup_uncharge_list(&free_pages);
1484 try_to_unmap_flush();
1485 free_unref_page_list(&free_pages);
1486
1487 list_splice(&ret_pages, page_list);
1488 count_vm_events(PGACTIVATE, pgactivate);
1489
1490 return nr_reclaimed;
1491}
1492
1493unsigned int reclaim_clean_pages_from_list(struct zone *zone,
1494 struct list_head *page_list)
1495{
1496 struct scan_control sc = {
1497 .gfp_mask = GFP_KERNEL,
1498 .priority = DEF_PRIORITY,
1499 .may_unmap = 1,
1500 };
1501 struct reclaim_stat stat;
1502 unsigned int nr_reclaimed;
1503 struct page *page, *next;
1504 LIST_HEAD(clean_pages);
1505
1506 list_for_each_entry_safe(page, next, page_list, lru) {
1507 if (page_is_file_lru(page) && !PageDirty(page) &&
1508 !__PageMovable(page) && !PageUnevictable(page)) {
1509 ClearPageActive(page);
1510 list_move(&page->lru, &clean_pages);
1511 }
1512 }
1513
1514 nr_reclaimed = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
1515 TTU_IGNORE_ACCESS, &stat, true);
1516 list_splice(&clean_pages, page_list);
1517 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -nr_reclaimed);
1518 /*
1519 * Since lazyfree pages are isolated from file LRU from the beginning,
1520 * they will rotate back to anonymous LRU in the end if it failed to
1521 * discard so isolated count will be mismatched.
1522 * Compensate the isolated count for both LRU lists.
1523 */
1524 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON,
1525 stat.nr_lazyfree_fail);
1526 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
1527 -stat.nr_lazyfree_fail);
1528 return nr_reclaimed;
1529}
1530
1531/*
1532 * Attempt to remove the specified page from its LRU. Only take this page
1533 * if it is of the appropriate PageActive status. Pages which are being
1534 * freed elsewhere are also ignored.
1535 *
1536 * page: page to consider
1537 * mode: one of the LRU isolation modes defined above
1538 *
1539 * returns 0 on success, -ve errno on failure.
1540 */
1541int __isolate_lru_page(struct page *page, isolate_mode_t mode)
1542{
1543 int ret = -EINVAL;
1544
1545 /* Only take pages on the LRU. */
1546 if (!PageLRU(page))
1547 return ret;
1548
1549 /* Compaction should not handle unevictable pages but CMA can do so */
1550 if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
1551 return ret;
1552
1553 ret = -EBUSY;
1554
1555 /*
1556 * To minimise LRU disruption, the caller can indicate that it only
1557 * wants to isolate pages it will be able to operate on without
1558 * blocking - clean pages for the most part.
1559 *
1560 * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
1561 * that it is possible to migrate without blocking
1562 */
1563 if (mode & ISOLATE_ASYNC_MIGRATE) {
1564 /* All the caller can do on PageWriteback is block */
1565 if (PageWriteback(page))
1566 return ret;
1567
1568 if (PageDirty(page)) {
1569 struct address_space *mapping;
1570 bool migrate_dirty;
1571
1572 /*
1573 * Only pages without mappings or that have a
1574 * ->migratepage callback are possible to migrate
1575 * without blocking. However, we can be racing with
1576 * truncation so it's necessary to lock the page
1577 * to stabilise the mapping as truncation holds
1578 * the page lock until after the page is removed
1579 * from the page cache.
1580 */
1581 if (!trylock_page(page))
1582 return ret;
1583
1584 mapping = page_mapping(page);
1585 migrate_dirty = !mapping || mapping->a_ops->migratepage;
1586 unlock_page(page);
1587 if (!migrate_dirty)
1588 return ret;
1589 }
1590 }
1591
1592 if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
1593 return ret;
1594
1595 if (likely(get_page_unless_zero(page))) {
1596 /*
1597 * Be careful not to clear PageLRU until after we're
1598 * sure the page is not being freed elsewhere -- the
1599 * page release code relies on it.
1600 */
1601 ClearPageLRU(page);
1602 ret = 0;
1603 }
1604
1605 return ret;
1606}
1607
1608
1609/*
1610 * Update LRU sizes after isolating pages. The LRU size updates must
1611 * be complete before mem_cgroup_update_lru_size due to a sanity check.
1612 */
1613static __always_inline void update_lru_sizes(struct lruvec *lruvec,
1614 enum lru_list lru, unsigned long *nr_zone_taken)
1615{
1616 int zid;
1617
1618 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1619 if (!nr_zone_taken[zid])
1620 continue;
1621
1622 update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
1623 }
1624
1625}
1626
1627/**
1628 * pgdat->lru_lock is heavily contended. Some of the functions that
1629 * shrink the lists perform better by taking out a batch of pages
1630 * and working on them outside the LRU lock.
1631 *
1632 * For pagecache intensive workloads, this function is the hottest
1633 * spot in the kernel (apart from copy_*_user functions).
1634 *
1635 * Appropriate locks must be held before calling this function.
1636 *
1637 * @nr_to_scan: The number of eligible pages to look through on the list.
1638 * @lruvec: The LRU vector to pull pages from.
1639 * @dst: The temp list to put pages on to.
1640 * @nr_scanned: The number of pages that were scanned.
1641 * @sc: The scan_control struct for this reclaim session
1642 * @lru: LRU list id for isolating
1643 *
1644 * returns how many pages were moved onto *@dst.
1645 */
1646static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1647 struct lruvec *lruvec, struct list_head *dst,
1648 unsigned long *nr_scanned, struct scan_control *sc,
1649 enum lru_list lru)
1650{
1651 struct list_head *src = &lruvec->lists[lru];
1652 unsigned long nr_taken = 0;
1653 unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
1654 unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
1655 unsigned long skipped = 0;
1656 unsigned long scan, total_scan, nr_pages;
1657 LIST_HEAD(pages_skipped);
1658 isolate_mode_t mode = (sc->may_unmap ? 0 : ISOLATE_UNMAPPED);
1659
1660 total_scan = 0;
1661 scan = 0;
1662 while (scan < nr_to_scan && !list_empty(src)) {
1663 struct page *page;
1664
1665 page = lru_to_page(src);
1666 prefetchw_prev_lru_page(page, src, flags);
1667
1668 VM_BUG_ON_PAGE(!PageLRU(page), page);
1669
1670 nr_pages = compound_nr(page);
1671 total_scan += nr_pages;
1672
1673 if (page_zonenum(page) > sc->reclaim_idx) {
1674 list_move(&page->lru, &pages_skipped);
1675 nr_skipped[page_zonenum(page)] += nr_pages;
1676 continue;
1677 }
1678
1679 /*
1680 * Do not count skipped pages because that makes the function
1681 * return with no isolated pages if the LRU mostly contains
1682 * ineligible pages. This causes the VM to not reclaim any
1683 * pages, triggering a premature OOM.
1684 *
1685 * Account all tail pages of THP. This would not cause
1686 * premature OOM since __isolate_lru_page() returns -EBUSY
1687 * only when the page is being freed somewhere else.
1688 */
1689 scan += nr_pages;
1690 switch (__isolate_lru_page(page, mode)) {
1691 case 0:
1692 nr_taken += nr_pages;
1693 nr_zone_taken[page_zonenum(page)] += nr_pages;
1694 list_move(&page->lru, dst);
1695 break;
1696
1697 case -EBUSY:
1698 /* else it is being freed elsewhere */
1699 list_move(&page->lru, src);
1700 continue;
1701
1702 default:
1703 BUG();
1704 }
1705 }
1706
1707 /*
1708 * Splice any skipped pages to the start of the LRU list. Note that
1709 * this disrupts the LRU order when reclaiming for lower zones but
1710 * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX
1711 * scanning would soon rescan the same pages to skip and put the
1712 * system at risk of premature OOM.
1713 */
1714 if (!list_empty(&pages_skipped)) {
1715 int zid;
1716
1717 list_splice(&pages_skipped, src);
1718 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1719 if (!nr_skipped[zid])
1720 continue;
1721
1722 __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]);
1723 skipped += nr_skipped[zid];
1724 }
1725 }
1726 *nr_scanned = total_scan;
1727 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan,
1728 total_scan, skipped, nr_taken, mode, lru);
1729 update_lru_sizes(lruvec, lru, nr_zone_taken);
1730 return nr_taken;
1731}
1732
1733/**
1734 * isolate_lru_page - tries to isolate a page from its LRU list
1735 * @page: page to isolate from its LRU list
1736 *
1737 * Isolates a @page from an LRU list, clears PageLRU and adjusts the
1738 * vmstat statistic corresponding to whatever LRU list the page was on.
1739 *
1740 * Returns 0 if the page was removed from an LRU list.
1741 * Returns -EBUSY if the page was not on an LRU list.
1742 *
1743 * The returned page will have PageLRU() cleared. If it was found on
1744 * the active list, it will have PageActive set. If it was found on
1745 * the unevictable list, it will have the PageUnevictable bit set. That flag
1746 * may need to be cleared by the caller before letting the page go.
1747 *
1748 * The vmstat statistic corresponding to the list on which the page was
1749 * found will be decremented.
1750 *
1751 * Restrictions:
1752 *
1753 * (1) Must be called with an elevated refcount on the page. This is a
1754 * fundamentnal difference from isolate_lru_pages (which is called
1755 * without a stable reference).
1756 * (2) the lru_lock must not be held.
1757 * (3) interrupts must be enabled.
1758 */
1759int isolate_lru_page(struct page *page)
1760{
1761 int ret = -EBUSY;
1762
1763 VM_BUG_ON_PAGE(!page_count(page), page);
1764 WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
1765
1766 if (PageLRU(page)) {
1767 pg_data_t *pgdat = page_pgdat(page);
1768 struct lruvec *lruvec;
1769
1770 spin_lock_irq(&pgdat->lru_lock);
1771 lruvec = mem_cgroup_page_lruvec(page, pgdat);
1772 if (PageLRU(page)) {
1773 int lru = page_lru(page);
1774 get_page(page);
1775 ClearPageLRU(page);
1776 del_page_from_lru_list(page, lruvec, lru);
1777 ret = 0;
1778 }
1779 spin_unlock_irq(&pgdat->lru_lock);
1780 }
1781 return ret;
1782}
1783
1784/*
1785 * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
1786 * then get rescheduled. When there are massive number of tasks doing page
1787 * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
1788 * the LRU list will go small and be scanned faster than necessary, leading to
1789 * unnecessary swapping, thrashing and OOM.
1790 */
1791static int too_many_isolated(struct pglist_data *pgdat, int file,
1792 struct scan_control *sc)
1793{
1794 unsigned long inactive, isolated;
1795
1796 if (current_is_kswapd())
1797 return 0;
1798
1799 if (!writeback_throttling_sane(sc))
1800 return 0;
1801
1802 if (file) {
1803 inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
1804 isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
1805 } else {
1806 inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
1807 isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
1808 }
1809
1810 /*
1811 * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
1812 * won't get blocked by normal direct-reclaimers, forming a circular
1813 * deadlock.
1814 */
1815 if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
1816 inactive >>= 3;
1817
1818 return isolated > inactive;
1819}
1820
1821/*
1822 * This moves pages from @list to corresponding LRU list.
1823 *
1824 * We move them the other way if the page is referenced by one or more
1825 * processes, from rmap.
1826 *
1827 * If the pages are mostly unmapped, the processing is fast and it is
1828 * appropriate to hold zone_lru_lock across the whole operation. But if
1829 * the pages are mapped, the processing is slow (page_referenced()) so we
1830 * should drop zone_lru_lock around each page. It's impossible to balance
1831 * this, so instead we remove the pages from the LRU while processing them.
1832 * It is safe to rely on PG_active against the non-LRU pages in here because
1833 * nobody will play with that bit on a non-LRU page.
1834 *
1835 * The downside is that we have to touch page->_refcount against each page.
1836 * But we had to alter page->flags anyway.
1837 *
1838 * Returns the number of pages moved to the given lruvec.
1839 */
1840
1841static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
1842 struct list_head *list)
1843{
1844 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1845 int nr_pages, nr_moved = 0;
1846 LIST_HEAD(pages_to_free);
1847 struct page *page;
1848 enum lru_list lru;
1849
1850 while (!list_empty(list)) {
1851 page = lru_to_page(list);
1852 VM_BUG_ON_PAGE(PageLRU(page), page);
1853 if (unlikely(!page_evictable(page))) {
1854 list_del(&page->lru);
1855 spin_unlock_irq(&pgdat->lru_lock);
1856 putback_lru_page(page);
1857 spin_lock_irq(&pgdat->lru_lock);
1858 continue;
1859 }
1860 lruvec = mem_cgroup_page_lruvec(page, pgdat);
1861
1862 SetPageLRU(page);
1863 lru = page_lru(page);
1864
1865 nr_pages = thp_nr_pages(page);
1866 update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
1867 list_move(&page->lru, &lruvec->lists[lru]);
1868
1869 if (put_page_testzero(page)) {
1870 __ClearPageLRU(page);
1871 __ClearPageActive(page);
1872 del_page_from_lru_list(page, lruvec, lru);
1873
1874 if (unlikely(PageCompound(page))) {
1875 spin_unlock_irq(&pgdat->lru_lock);
1876 destroy_compound_page(page);
1877 spin_lock_irq(&pgdat->lru_lock);
1878 } else
1879 list_add(&page->lru, &pages_to_free);
1880 } else {
1881 nr_moved += nr_pages;
1882 if (PageActive(page))
1883 workingset_age_nonresident(lruvec, nr_pages);
1884 }
1885 }
1886
1887 /*
1888 * To save our caller's stack, now use input list for pages to free.
1889 */
1890 list_splice(&pages_to_free, list);
1891
1892 return nr_moved;
1893}
1894
1895/*
1896 * If a kernel thread (such as nfsd for loop-back mounts) services
1897 * a backing device by writing to the page cache it sets PF_LOCAL_THROTTLE.
1898 * In that case we should only throttle if the backing device it is
1899 * writing to is congested. In other cases it is safe to throttle.
1900 */
1901static int current_may_throttle(void)
1902{
1903 return !(current->flags & PF_LOCAL_THROTTLE) ||
1904 current->backing_dev_info == NULL ||
1905 bdi_write_congested(current->backing_dev_info);
1906}
1907
1908/*
1909 * shrink_inactive_list() is a helper for shrink_node(). It returns the number
1910 * of reclaimed pages
1911 */
1912static noinline_for_stack unsigned long
1913shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1914 struct scan_control *sc, enum lru_list lru)
1915{
1916 LIST_HEAD(page_list);
1917 unsigned long nr_scanned;
1918 unsigned int nr_reclaimed = 0;
1919 unsigned long nr_taken;
1920 struct reclaim_stat stat;
1921 bool file = is_file_lru(lru);
1922 enum vm_event_item item;
1923 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1924 bool stalled = false;
1925
1926 while (unlikely(too_many_isolated(pgdat, file, sc))) {
1927 if (stalled)
1928 return 0;
1929
1930 /* wait a bit for the reclaimer. */
1931 msleep(100);
1932 stalled = true;
1933
1934 /* We are about to die and free our memory. Return now. */
1935 if (fatal_signal_pending(current))
1936 return SWAP_CLUSTER_MAX;
1937 }
1938
1939 lru_add_drain();
1940
1941 spin_lock_irq(&pgdat->lru_lock);
1942
1943 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
1944 &nr_scanned, sc, lru);
1945
1946 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
1947 item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT;
1948 if (!cgroup_reclaim(sc))
1949 __count_vm_events(item, nr_scanned);
1950 __count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
1951 __count_vm_events(PGSCAN_ANON + file, nr_scanned);
1952
1953 spin_unlock_irq(&pgdat->lru_lock);
1954
1955 if (nr_taken == 0)
1956 return 0;
1957
1958 nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, 0,
1959 &stat, false);
1960
1961 spin_lock_irq(&pgdat->lru_lock);
1962
1963 move_pages_to_lru(lruvec, &page_list);
1964
1965 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
1966 lru_note_cost(lruvec, file, stat.nr_pageout);
1967 item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
1968 if (!cgroup_reclaim(sc))
1969 __count_vm_events(item, nr_reclaimed);
1970 __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
1971 __count_vm_events(PGSTEAL_ANON + file, nr_reclaimed);
1972
1973 spin_unlock_irq(&pgdat->lru_lock);
1974
1975 mem_cgroup_uncharge_list(&page_list);
1976 free_unref_page_list(&page_list);
1977
1978 /*
1979 * If dirty pages are scanned that are not queued for IO, it
1980 * implies that flushers are not doing their job. This can
1981 * happen when memory pressure pushes dirty pages to the end of
1982 * the LRU before the dirty limits are breached and the dirty
1983 * data has expired. It can also happen when the proportion of
1984 * dirty pages grows not through writes but through memory
1985 * pressure reclaiming all the clean cache. And in some cases,
1986 * the flushers simply cannot keep up with the allocation
1987 * rate. Nudge the flusher threads in case they are asleep.
1988 */
1989 if (stat.nr_unqueued_dirty == nr_taken)
1990 wakeup_flusher_threads(WB_REASON_VMSCAN);
1991
1992 sc->nr.dirty += stat.nr_dirty;
1993 sc->nr.congested += stat.nr_congested;
1994 sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;
1995 sc->nr.writeback += stat.nr_writeback;
1996 sc->nr.immediate += stat.nr_immediate;
1997 sc->nr.taken += nr_taken;
1998 if (file)
1999 sc->nr.file_taken += nr_taken;
2000
2001 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
2002 nr_scanned, nr_reclaimed, &stat, sc->priority, file);
2003 return nr_reclaimed;
2004}
2005
2006static void shrink_active_list(unsigned long nr_to_scan,
2007 struct lruvec *lruvec,
2008 struct scan_control *sc,
2009 enum lru_list lru)
2010{
2011 unsigned long nr_taken;
2012 unsigned long nr_scanned;
2013 unsigned long vm_flags;
2014 LIST_HEAD(l_hold); /* The pages which were snipped off */
2015 LIST_HEAD(l_active);
2016 LIST_HEAD(l_inactive);
2017 struct page *page;
2018 unsigned nr_deactivate, nr_activate;
2019 unsigned nr_rotated = 0;
2020 int file = is_file_lru(lru);
2021 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2022
2023 lru_add_drain();
2024
2025 spin_lock_irq(&pgdat->lru_lock);
2026
2027 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
2028 &nr_scanned, sc, lru);
2029
2030 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
2031
2032 if (!cgroup_reclaim(sc))
2033 __count_vm_events(PGREFILL, nr_scanned);
2034 __count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
2035
2036 spin_unlock_irq(&pgdat->lru_lock);
2037
2038 while (!list_empty(&l_hold)) {
2039 cond_resched();
2040 page = lru_to_page(&l_hold);
2041 list_del(&page->lru);
2042
2043 if (unlikely(!page_evictable(page))) {
2044 putback_lru_page(page);
2045 continue;
2046 }
2047
2048 if (unlikely(buffer_heads_over_limit)) {
2049 if (page_has_private(page) && trylock_page(page)) {
2050 if (page_has_private(page))
2051 try_to_release_page(page, 0);
2052 unlock_page(page);
2053 }
2054 }
2055
2056 if (page_referenced(page, 0, sc->target_mem_cgroup,
2057 &vm_flags)) {
2058 /*
2059 * Identify referenced, file-backed active pages and
2060 * give them one more trip around the active list. So
2061 * that executable code get better chances to stay in
2062 * memory under moderate memory pressure. Anon pages
2063 * are not likely to be evicted by use-once streaming
2064 * IO, plus JVM can create lots of anon VM_EXEC pages,
2065 * so we ignore them here.
2066 */
2067 if ((vm_flags & VM_EXEC) && page_is_file_lru(page)) {
2068 nr_rotated += thp_nr_pages(page);
2069 list_add(&page->lru, &l_active);
2070 continue;
2071 }
2072 }
2073
2074 ClearPageActive(page); /* we are de-activating */
2075 SetPageWorkingset(page);
2076 list_add(&page->lru, &l_inactive);
2077 }
2078
2079 /*
2080 * Move pages back to the lru list.
2081 */
2082 spin_lock_irq(&pgdat->lru_lock);
2083
2084 nr_activate = move_pages_to_lru(lruvec, &l_active);
2085 nr_deactivate = move_pages_to_lru(lruvec, &l_inactive);
2086 /* Keep all free pages in l_active list */
2087 list_splice(&l_inactive, &l_active);
2088
2089 __count_vm_events(PGDEACTIVATE, nr_deactivate);
2090 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
2091
2092 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
2093 spin_unlock_irq(&pgdat->lru_lock);
2094
2095 mem_cgroup_uncharge_list(&l_active);
2096 free_unref_page_list(&l_active);
2097 trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
2098 nr_deactivate, nr_rotated, sc->priority, file);
2099}
2100
2101unsigned long reclaim_pages(struct list_head *page_list)
2102{
2103 int nid = NUMA_NO_NODE;
2104 unsigned int nr_reclaimed = 0;
2105 LIST_HEAD(node_page_list);
2106 struct reclaim_stat dummy_stat;
2107 struct page *page;
2108 struct scan_control sc = {
2109 .gfp_mask = GFP_KERNEL,
2110 .priority = DEF_PRIORITY,
2111 .may_writepage = 1,
2112 .may_unmap = 1,
2113 .may_swap = 1,
2114 };
2115
2116 while (!list_empty(page_list)) {
2117 page = lru_to_page(page_list);
2118 if (nid == NUMA_NO_NODE) {
2119 nid = page_to_nid(page);
2120 INIT_LIST_HEAD(&node_page_list);
2121 }
2122
2123 if (nid == page_to_nid(page)) {
2124 ClearPageActive(page);
2125 list_move(&page->lru, &node_page_list);
2126 continue;
2127 }
2128
2129 nr_reclaimed += shrink_page_list(&node_page_list,
2130 NODE_DATA(nid),
2131 &sc, 0,
2132 &dummy_stat, false);
2133 while (!list_empty(&node_page_list)) {
2134 page = lru_to_page(&node_page_list);
2135 list_del(&page->lru);
2136 putback_lru_page(page);
2137 }
2138
2139 nid = NUMA_NO_NODE;
2140 }
2141
2142 if (!list_empty(&node_page_list)) {
2143 nr_reclaimed += shrink_page_list(&node_page_list,
2144 NODE_DATA(nid),
2145 &sc, 0,
2146 &dummy_stat, false);
2147 while (!list_empty(&node_page_list)) {
2148 page = lru_to_page(&node_page_list);
2149 list_del(&page->lru);
2150 putback_lru_page(page);
2151 }
2152 }
2153
2154 return nr_reclaimed;
2155}
2156
2157static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
2158 struct lruvec *lruvec, struct scan_control *sc)
2159{
2160 if (is_active_lru(lru)) {
2161 if (sc->may_deactivate & (1 << is_file_lru(lru)))
2162 shrink_active_list(nr_to_scan, lruvec, sc, lru);
2163 else
2164 sc->skipped_deactivate = 1;
2165 return 0;
2166 }
2167
2168 return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
2169}
2170
2171/*
2172 * The inactive anon list should be small enough that the VM never has
2173 * to do too much work.
2174 *
2175 * The inactive file list should be small enough to leave most memory
2176 * to the established workingset on the scan-resistant active list,
2177 * but large enough to avoid thrashing the aggregate readahead window.
2178 *
2179 * Both inactive lists should also be large enough that each inactive
2180 * page has a chance to be referenced again before it is reclaimed.
2181 *
2182 * If that fails and refaulting is observed, the inactive list grows.
2183 *
2184 * The inactive_ratio is the target ratio of ACTIVE to INACTIVE pages
2185 * on this LRU, maintained by the pageout code. An inactive_ratio
2186 * of 3 means 3:1 or 25% of the pages are kept on the inactive list.
2187 *
2188 * total target max
2189 * memory ratio inactive
2190 * -------------------------------------
2191 * 10MB 1 5MB
2192 * 100MB 1 50MB
2193 * 1GB 3 250MB
2194 * 10GB 10 0.9GB
2195 * 100GB 31 3GB
2196 * 1TB 101 10GB
2197 * 10TB 320 32GB
2198 */
2199static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru)
2200{
2201 enum lru_list active_lru = inactive_lru + LRU_ACTIVE;
2202 unsigned long inactive, active;
2203 unsigned long inactive_ratio;
2204 unsigned long gb;
2205
2206 inactive = lruvec_page_state(lruvec, NR_LRU_BASE + inactive_lru);
2207 active = lruvec_page_state(lruvec, NR_LRU_BASE + active_lru);
2208
2209 gb = (inactive + active) >> (30 - PAGE_SHIFT);
2210 if (gb)
2211 inactive_ratio = int_sqrt(10 * gb);
2212 else
2213 inactive_ratio = 1;
2214
2215 return inactive * inactive_ratio < active;
2216}
2217
2218enum scan_balance {
2219 SCAN_EQUAL,
2220 SCAN_FRACT,
2221 SCAN_ANON,
2222 SCAN_FILE,
2223};
2224
2225/*
2226 * Determine how aggressively the anon and file LRU lists should be
2227 * scanned. The relative value of each set of LRU lists is determined
2228 * by looking at the fraction of the pages scanned we did rotate back
2229 * onto the active list instead of evict.
2230 *
2231 * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
2232 * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
2233 */
2234static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
2235 unsigned long *nr)
2236{
2237 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
2238 unsigned long anon_cost, file_cost, total_cost;
2239 int swappiness = mem_cgroup_swappiness(memcg);
2240 u64 fraction[2];
2241 u64 denominator = 0; /* gcc */
2242 enum scan_balance scan_balance;
2243 unsigned long ap, fp;
2244 enum lru_list lru;
2245
2246 /* If we have no swap space, do not bother scanning anon pages. */
2247 if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) {
2248 scan_balance = SCAN_FILE;
2249 goto out;
2250 }
2251
2252 /*
2253 * Global reclaim will swap to prevent OOM even with no
2254 * swappiness, but memcg users want to use this knob to
2255 * disable swapping for individual groups completely when
2256 * using the memory controller's swap limit feature would be
2257 * too expensive.
2258 */
2259 if (cgroup_reclaim(sc) && !swappiness) {
2260 scan_balance = SCAN_FILE;
2261 goto out;
2262 }
2263
2264 /*
2265 * Do not apply any pressure balancing cleverness when the
2266 * system is close to OOM, scan both anon and file equally
2267 * (unless the swappiness setting disagrees with swapping).
2268 */
2269 if (!sc->priority && swappiness) {
2270 scan_balance = SCAN_EQUAL;
2271 goto out;
2272 }
2273
2274 /*
2275 * If the system is almost out of file pages, force-scan anon.
2276 */
2277 if (sc->file_is_tiny) {
2278 scan_balance = SCAN_ANON;
2279 goto out;
2280 }
2281
2282 /*
2283 * If there is enough inactive page cache, we do not reclaim
2284 * anything from the anonymous working right now.
2285 */
2286 if (sc->cache_trim_mode) {
2287 scan_balance = SCAN_FILE;
2288 goto out;
2289 }
2290
2291 scan_balance = SCAN_FRACT;
2292 /*
2293 * Calculate the pressure balance between anon and file pages.
2294 *
2295 * The amount of pressure we put on each LRU is inversely
2296 * proportional to the cost of reclaiming each list, as
2297 * determined by the share of pages that are refaulting, times
2298 * the relative IO cost of bringing back a swapped out
2299 * anonymous page vs reloading a filesystem page (swappiness).
2300 *
2301 * Although we limit that influence to ensure no list gets
2302 * left behind completely: at least a third of the pressure is
2303 * applied, before swappiness.
2304 *
2305 * With swappiness at 100, anon and file have equal IO cost.
2306 */
2307 total_cost = sc->anon_cost + sc->file_cost;
2308 anon_cost = total_cost + sc->anon_cost;
2309 file_cost = total_cost + sc->file_cost;
2310 total_cost = anon_cost + file_cost;
2311
2312 ap = swappiness * (total_cost + 1);
2313 ap /= anon_cost + 1;
2314
2315 fp = (200 - swappiness) * (total_cost + 1);
2316 fp /= file_cost + 1;
2317
2318 fraction[0] = ap;
2319 fraction[1] = fp;
2320 denominator = ap + fp;
2321out:
2322 for_each_evictable_lru(lru) {
2323 int file = is_file_lru(lru);
2324 unsigned long lruvec_size;
2325 unsigned long scan;
2326 unsigned long protection;
2327
2328 lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
2329 protection = mem_cgroup_protection(sc->target_mem_cgroup,
2330 memcg,
2331 sc->memcg_low_reclaim);
2332
2333 if (protection) {
2334 /*
2335 * Scale a cgroup's reclaim pressure by proportioning
2336 * its current usage to its memory.low or memory.min
2337 * setting.
2338 *
2339 * This is important, as otherwise scanning aggression
2340 * becomes extremely binary -- from nothing as we
2341 * approach the memory protection threshold, to totally
2342 * nominal as we exceed it. This results in requiring
2343 * setting extremely liberal protection thresholds. It
2344 * also means we simply get no protection at all if we
2345 * set it too low, which is not ideal.
2346 *
2347 * If there is any protection in place, we reduce scan
2348 * pressure by how much of the total memory used is
2349 * within protection thresholds.
2350 *
2351 * There is one special case: in the first reclaim pass,
2352 * we skip over all groups that are within their low
2353 * protection. If that fails to reclaim enough pages to
2354 * satisfy the reclaim goal, we come back and override
2355 * the best-effort low protection. However, we still
2356 * ideally want to honor how well-behaved groups are in
2357 * that case instead of simply punishing them all
2358 * equally. As such, we reclaim them based on how much
2359 * memory they are using, reducing the scan pressure
2360 * again by how much of the total memory used is under
2361 * hard protection.
2362 */
2363 unsigned long cgroup_size = mem_cgroup_size(memcg);
2364
2365 /* Avoid TOCTOU with earlier protection check */
2366 cgroup_size = max(cgroup_size, protection);
2367
2368 scan = lruvec_size - lruvec_size * protection /
2369 cgroup_size;
2370
2371 /*
2372 * Minimally target SWAP_CLUSTER_MAX pages to keep
2373 * reclaim moving forwards, avoiding decrementing
2374 * sc->priority further than desirable.
2375 */
2376 scan = max(scan, SWAP_CLUSTER_MAX);
2377 } else {
2378 scan = lruvec_size;
2379 }
2380
2381 scan >>= sc->priority;
2382
2383 /*
2384 * If the cgroup's already been deleted, make sure to
2385 * scrape out the remaining cache.
2386 */
2387 if (!scan && !mem_cgroup_online(memcg))
2388 scan = min(lruvec_size, SWAP_CLUSTER_MAX);
2389
2390 switch (scan_balance) {
2391 case SCAN_EQUAL:
2392 /* Scan lists relative to size */
2393 break;
2394 case SCAN_FRACT:
2395 /*
2396 * Scan types proportional to swappiness and
2397 * their relative recent reclaim efficiency.
2398 * Make sure we don't miss the last page on
2399 * the offlined memory cgroups because of a
2400 * round-off error.
2401 */
2402 scan = mem_cgroup_online(memcg) ?
2403 div64_u64(scan * fraction[file], denominator) :
2404 DIV64_U64_ROUND_UP(scan * fraction[file],
2405 denominator);
2406 break;
2407 case SCAN_FILE:
2408 case SCAN_ANON:
2409 /* Scan one type exclusively */
2410 if ((scan_balance == SCAN_FILE) != file)
2411 scan = 0;
2412 break;
2413 default:
2414 /* Look ma, no brain */
2415 BUG();
2416 }
2417
2418 nr[lru] = scan;
2419 }
2420}
2421
2422static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
2423{
2424 unsigned long nr[NR_LRU_LISTS];
2425 unsigned long targets[NR_LRU_LISTS];
2426 unsigned long nr_to_scan;
2427 enum lru_list lru;
2428 unsigned long nr_reclaimed = 0;
2429 unsigned long nr_to_reclaim = sc->nr_to_reclaim;
2430 struct blk_plug plug;
2431 bool scan_adjusted;
2432
2433 get_scan_count(lruvec, sc, nr);
2434
2435 /* Record the original scan target for proportional adjustments later */
2436 memcpy(targets, nr, sizeof(nr));
2437
2438 /*
2439 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
2440 * event that can occur when there is little memory pressure e.g.
2441 * multiple streaming readers/writers. Hence, we do not abort scanning
2442 * when the requested number of pages are reclaimed when scanning at
2443 * DEF_PRIORITY on the assumption that the fact we are direct
2444 * reclaiming implies that kswapd is not keeping up and it is best to
2445 * do a batch of work at once. For memcg reclaim one check is made to
2446 * abort proportional reclaim if either the file or anon lru has already
2447 * dropped to zero at the first pass.
2448 */
2449 scan_adjusted = (!cgroup_reclaim(sc) && !current_is_kswapd() &&
2450 sc->priority == DEF_PRIORITY);
2451
2452 blk_start_plug(&plug);
2453 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
2454 nr[LRU_INACTIVE_FILE]) {
2455 unsigned long nr_anon, nr_file, percentage;
2456 unsigned long nr_scanned;
2457
2458 for_each_evictable_lru(lru) {
2459 if (nr[lru]) {
2460 nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
2461 nr[lru] -= nr_to_scan;
2462
2463 nr_reclaimed += shrink_list(lru, nr_to_scan,
2464 lruvec, sc);
2465 }
2466 }
2467
2468 cond_resched();
2469
2470 if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
2471 continue;
2472
2473 /*
2474 * For kswapd and memcg, reclaim at least the number of pages
2475 * requested. Ensure that the anon and file LRUs are scanned
2476 * proportionally what was requested by get_scan_count(). We
2477 * stop reclaiming one LRU and reduce the amount scanning
2478 * proportional to the original scan target.
2479 */
2480 nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
2481 nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
2482
2483 /*
2484 * It's just vindictive to attack the larger once the smaller
2485 * has gone to zero. And given the way we stop scanning the
2486 * smaller below, this makes sure that we only make one nudge
2487 * towards proportionality once we've got nr_to_reclaim.
2488 */
2489 if (!nr_file || !nr_anon)
2490 break;
2491
2492 if (nr_file > nr_anon) {
2493 unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
2494 targets[LRU_ACTIVE_ANON] + 1;
2495 lru = LRU_BASE;
2496 percentage = nr_anon * 100 / scan_target;
2497 } else {
2498 unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
2499 targets[LRU_ACTIVE_FILE] + 1;
2500 lru = LRU_FILE;
2501 percentage = nr_file * 100 / scan_target;
2502 }
2503
2504 /* Stop scanning the smaller of the LRU */
2505 nr[lru] = 0;
2506 nr[lru + LRU_ACTIVE] = 0;
2507
2508 /*
2509 * Recalculate the other LRU scan count based on its original
2510 * scan target and the percentage scanning already complete
2511 */
2512 lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
2513 nr_scanned = targets[lru] - nr[lru];
2514 nr[lru] = targets[lru] * (100 - percentage) / 100;
2515 nr[lru] -= min(nr[lru], nr_scanned);
2516
2517 lru += LRU_ACTIVE;
2518 nr_scanned = targets[lru] - nr[lru];
2519 nr[lru] = targets[lru] * (100 - percentage) / 100;
2520 nr[lru] -= min(nr[lru], nr_scanned);
2521
2522 scan_adjusted = true;
2523 }
2524 blk_finish_plug(&plug);
2525 sc->nr_reclaimed += nr_reclaimed;
2526
2527 /*
2528 * Even if we did not try to evict anon pages at all, we want to
2529 * rebalance the anon lru active/inactive ratio.
2530 */
2531 if (total_swap_pages && inactive_is_low(lruvec, LRU_INACTIVE_ANON))
2532 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
2533 sc, LRU_ACTIVE_ANON);
2534}
2535
2536/* Use reclaim/compaction for costly allocs or under memory pressure */
2537static bool in_reclaim_compaction(struct scan_control *sc)
2538{
2539 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
2540 (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
2541 sc->priority < DEF_PRIORITY - 2))
2542 return true;
2543
2544 return false;
2545}
2546
2547/*
2548 * Reclaim/compaction is used for high-order allocation requests. It reclaims
2549 * order-0 pages before compacting the zone. should_continue_reclaim() returns
2550 * true if more pages should be reclaimed such that when the page allocator
2551 * calls try_to_compact_pages() that it will have enough free pages to succeed.
2552 * It will give up earlier than that if there is difficulty reclaiming pages.
2553 */
2554static inline bool should_continue_reclaim(struct pglist_data *pgdat,
2555 unsigned long nr_reclaimed,
2556 struct scan_control *sc)
2557{
2558 unsigned long pages_for_compaction;
2559 unsigned long inactive_lru_pages;
2560 int z;
2561
2562 /* If not in reclaim/compaction mode, stop */
2563 if (!in_reclaim_compaction(sc))
2564 return false;
2565
2566 /*
2567 * Stop if we failed to reclaim any pages from the last SWAP_CLUSTER_MAX
2568 * number of pages that were scanned. This will return to the caller
2569 * with the risk reclaim/compaction and the resulting allocation attempt
2570 * fails. In the past we have tried harder for __GFP_RETRY_MAYFAIL
2571 * allocations through requiring that the full LRU list has been scanned
2572 * first, by assuming that zero delta of sc->nr_scanned means full LRU
2573 * scan, but that approximation was wrong, and there were corner cases
2574 * where always a non-zero amount of pages were scanned.
2575 */
2576 if (!nr_reclaimed)
2577 return false;
2578
2579 /* If compaction would go ahead or the allocation would succeed, stop */
2580 for (z = 0; z <= sc->reclaim_idx; z++) {
2581 struct zone *zone = &pgdat->node_zones[z];
2582 if (!managed_zone(zone))
2583 continue;
2584
2585 switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
2586 case COMPACT_SUCCESS:
2587 case COMPACT_CONTINUE:
2588 return false;
2589 default:
2590 /* check next zone */
2591 ;
2592 }
2593 }
2594
2595 /*
2596 * If we have not reclaimed enough pages for compaction and the
2597 * inactive lists are large enough, continue reclaiming
2598 */
2599 pages_for_compaction = compact_gap(sc->order);
2600 inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
2601 if (get_nr_swap_pages() > 0)
2602 inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
2603
2604 return inactive_lru_pages > pages_for_compaction;
2605}
2606
2607static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
2608{
2609 struct mem_cgroup *target_memcg = sc->target_mem_cgroup;
2610 struct mem_cgroup *memcg;
2611
2612 memcg = mem_cgroup_iter(target_memcg, NULL, NULL);
2613 do {
2614 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
2615 unsigned long reclaimed;
2616 unsigned long scanned;
2617
2618 /*
2619 * This loop can become CPU-bound when target memcgs
2620 * aren't eligible for reclaim - either because they
2621 * don't have any reclaimable pages, or because their
2622 * memory is explicitly protected. Avoid soft lockups.
2623 */
2624 cond_resched();
2625
2626 mem_cgroup_calculate_protection(target_memcg, memcg);
2627
2628 if (mem_cgroup_below_min(memcg)) {
2629 /*
2630 * Hard protection.
2631 * If there is no reclaimable memory, OOM.
2632 */
2633 continue;
2634 } else if (mem_cgroup_below_low(memcg)) {
2635 /*
2636 * Soft protection.
2637 * Respect the protection only as long as
2638 * there is an unprotected supply
2639 * of reclaimable memory from other cgroups.
2640 */
2641 if (!sc->memcg_low_reclaim) {
2642 sc->memcg_low_skipped = 1;
2643 continue;
2644 }
2645 memcg_memory_event(memcg, MEMCG_LOW);
2646 }
2647
2648 reclaimed = sc->nr_reclaimed;
2649 scanned = sc->nr_scanned;
2650
2651 shrink_lruvec(lruvec, sc);
2652
2653 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
2654 sc->priority);
2655
2656 /* Record the group's reclaim efficiency */
2657 vmpressure(sc->gfp_mask, memcg, false,
2658 sc->nr_scanned - scanned,
2659 sc->nr_reclaimed - reclaimed);
2660
2661 } while ((memcg = mem_cgroup_iter(target_memcg, memcg, NULL)));
2662}
2663
2664static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
2665{
2666 struct reclaim_state *reclaim_state = current->reclaim_state;
2667 unsigned long nr_reclaimed, nr_scanned;
2668 struct lruvec *target_lruvec;
2669 bool reclaimable = false;
2670 unsigned long file;
2671
2672 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
2673
2674again:
2675 memset(&sc->nr, 0, sizeof(sc->nr));
2676
2677 nr_reclaimed = sc->nr_reclaimed;
2678 nr_scanned = sc->nr_scanned;
2679
2680 /*
2681 * Determine the scan balance between anon and file LRUs.
2682 */
2683 spin_lock_irq(&pgdat->lru_lock);
2684 sc->anon_cost = target_lruvec->anon_cost;
2685 sc->file_cost = target_lruvec->file_cost;
2686 spin_unlock_irq(&pgdat->lru_lock);
2687
2688 /*
2689 * Target desirable inactive:active list ratios for the anon
2690 * and file LRU lists.
2691 */
2692 if (!sc->force_deactivate) {
2693 unsigned long refaults;
2694
2695 refaults = lruvec_page_state(target_lruvec,
2696 WORKINGSET_ACTIVATE_ANON);
2697 if (refaults != target_lruvec->refaults[0] ||
2698 inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
2699 sc->may_deactivate |= DEACTIVATE_ANON;
2700 else
2701 sc->may_deactivate &= ~DEACTIVATE_ANON;
2702
2703 /*
2704 * When refaults are being observed, it means a new
2705 * workingset is being established. Deactivate to get
2706 * rid of any stale active pages quickly.
2707 */
2708 refaults = lruvec_page_state(target_lruvec,
2709 WORKINGSET_ACTIVATE_FILE);
2710 if (refaults != target_lruvec->refaults[1] ||
2711 inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
2712 sc->may_deactivate |= DEACTIVATE_FILE;
2713 else
2714 sc->may_deactivate &= ~DEACTIVATE_FILE;
2715 } else
2716 sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
2717
2718 /*
2719 * If we have plenty of inactive file pages that aren't
2720 * thrashing, try to reclaim those first before touching
2721 * anonymous pages.
2722 */
2723 file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
2724 if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
2725 sc->cache_trim_mode = 1;
2726 else
2727 sc->cache_trim_mode = 0;
2728
2729 /*
2730 * Prevent the reclaimer from falling into the cache trap: as
2731 * cache pages start out inactive, every cache fault will tip
2732 * the scan balance towards the file LRU. And as the file LRU
2733 * shrinks, so does the window for rotation from references.
2734 * This means we have a runaway feedback loop where a tiny
2735 * thrashing file LRU becomes infinitely more attractive than
2736 * anon pages. Try to detect this based on file LRU size.
2737 */
2738 if (!cgroup_reclaim(sc)) {
2739 unsigned long total_high_wmark = 0;
2740 unsigned long free, anon;
2741 int z;
2742
2743 free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
2744 file = node_page_state(pgdat, NR_ACTIVE_FILE) +
2745 node_page_state(pgdat, NR_INACTIVE_FILE);
2746
2747 for (z = 0; z < MAX_NR_ZONES; z++) {
2748 struct zone *zone = &pgdat->node_zones[z];
2749 if (!managed_zone(zone))
2750 continue;
2751
2752 total_high_wmark += high_wmark_pages(zone);
2753 }
2754
2755 /*
2756 * Consider anon: if that's low too, this isn't a
2757 * runaway file reclaim problem, but rather just
2758 * extreme pressure. Reclaim as per usual then.
2759 */
2760 anon = node_page_state(pgdat, NR_INACTIVE_ANON);
2761
2762 sc->file_is_tiny =
2763 file + free <= total_high_wmark &&
2764 !(sc->may_deactivate & DEACTIVATE_ANON) &&
2765 anon >> sc->priority;
2766 }
2767
2768 shrink_node_memcgs(pgdat, sc);
2769
2770 if (reclaim_state) {
2771 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2772 reclaim_state->reclaimed_slab = 0;
2773 }
2774
2775 /* Record the subtree's reclaim efficiency */
2776 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
2777 sc->nr_scanned - nr_scanned,
2778 sc->nr_reclaimed - nr_reclaimed);
2779
2780 if (sc->nr_reclaimed - nr_reclaimed)
2781 reclaimable = true;
2782
2783 if (current_is_kswapd()) {
2784 /*
2785 * If reclaim is isolating dirty pages under writeback,
2786 * it implies that the long-lived page allocation rate
2787 * is exceeding the page laundering rate. Either the
2788 * global limits are not being effective at throttling
2789 * processes due to the page distribution throughout
2790 * zones or there is heavy usage of a slow backing
2791 * device. The only option is to throttle from reclaim
2792 * context which is not ideal as there is no guarantee
2793 * the dirtying process is throttled in the same way
2794 * balance_dirty_pages() manages.
2795 *
2796 * Once a node is flagged PGDAT_WRITEBACK, kswapd will
2797 * count the number of pages under pages flagged for
2798 * immediate reclaim and stall if any are encountered
2799 * in the nr_immediate check below.
2800 */
2801 if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken)
2802 set_bit(PGDAT_WRITEBACK, &pgdat->flags);
2803
2804 /* Allow kswapd to start writing pages during reclaim.*/
2805 if (sc->nr.unqueued_dirty == sc->nr.file_taken)
2806 set_bit(PGDAT_DIRTY, &pgdat->flags);
2807
2808 /*
2809 * If kswapd scans pages marked for immediate
2810 * reclaim and under writeback (nr_immediate), it
2811 * implies that pages are cycling through the LRU
2812 * faster than they are written so also forcibly stall.
2813 */
2814 if (sc->nr.immediate)
2815 congestion_wait(BLK_RW_ASYNC, HZ/10);
2816 }
2817
2818 /*
2819 * Tag a node/memcg as congested if all the dirty pages
2820 * scanned were backed by a congested BDI and
2821 * wait_iff_congested will stall.
2822 *
2823 * Legacy memcg will stall in page writeback so avoid forcibly
2824 * stalling in wait_iff_congested().
2825 */
2826 if ((current_is_kswapd() ||
2827 (cgroup_reclaim(sc) && writeback_throttling_sane(sc))) &&
2828 sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
2829 set_bit(LRUVEC_CONGESTED, &target_lruvec->flags);
2830
2831 /*
2832 * Stall direct reclaim for IO completions if underlying BDIs
2833 * and node is congested. Allow kswapd to continue until it
2834 * starts encountering unqueued dirty pages or cycling through
2835 * the LRU too quickly.
2836 */
2837 if (!current_is_kswapd() && current_may_throttle() &&
2838 !sc->hibernation_mode &&
2839 test_bit(LRUVEC_CONGESTED, &target_lruvec->flags))
2840 wait_iff_congested(BLK_RW_ASYNC, HZ/10);
2841
2842 if (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
2843 sc))
2844 goto again;
2845
2846 /*
2847 * Kswapd gives up on balancing particular nodes after too
2848 * many failures to reclaim anything from them and goes to
2849 * sleep. On reclaim progress, reset the failure counter. A
2850 * successful direct reclaim run will revive a dormant kswapd.
2851 */
2852 if (reclaimable)
2853 pgdat->kswapd_failures = 0;
2854}
2855
2856/*
2857 * Returns true if compaction should go ahead for a costly-order request, or
2858 * the allocation would already succeed without compaction. Return false if we
2859 * should reclaim first.
2860 */
2861static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
2862{
2863 unsigned long watermark;
2864 enum compact_result suitable;
2865
2866 suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx);
2867 if (suitable == COMPACT_SUCCESS)
2868 /* Allocation should succeed already. Don't reclaim. */
2869 return true;
2870 if (suitable == COMPACT_SKIPPED)
2871 /* Compaction cannot yet proceed. Do reclaim. */
2872 return false;
2873
2874 /*
2875 * Compaction is already possible, but it takes time to run and there
2876 * are potentially other callers using the pages just freed. So proceed
2877 * with reclaim to make a buffer of free pages available to give
2878 * compaction a reasonable chance of completing and allocating the page.
2879 * Note that we won't actually reclaim the whole buffer in one attempt
2880 * as the target watermark in should_continue_reclaim() is lower. But if
2881 * we are already above the high+gap watermark, don't reclaim at all.
2882 */
2883 watermark = high_wmark_pages(zone) + compact_gap(sc->order);
2884
2885 return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx);
2886}
2887
2888/*
2889 * This is the direct reclaim path, for page-allocating processes. We only
2890 * try to reclaim pages from zones which will satisfy the caller's allocation
2891 * request.
2892 *
2893 * If a zone is deemed to be full of pinned pages then just give it a light
2894 * scan then give up on it.
2895 */
2896static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2897{
2898 struct zoneref *z;
2899 struct zone *zone;
2900 unsigned long nr_soft_reclaimed;
2901 unsigned long nr_soft_scanned;
2902 gfp_t orig_mask;
2903 pg_data_t *last_pgdat = NULL;
2904
2905 /*
2906 * If the number of buffer_heads in the machine exceeds the maximum
2907 * allowed level, force direct reclaim to scan the highmem zone as
2908 * highmem pages could be pinning lowmem pages storing buffer_heads
2909 */
2910 orig_mask = sc->gfp_mask;
2911 if (buffer_heads_over_limit) {
2912 sc->gfp_mask |= __GFP_HIGHMEM;
2913 sc->reclaim_idx = gfp_zone(sc->gfp_mask);
2914 }
2915
2916 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2917 sc->reclaim_idx, sc->nodemask) {
2918 /*
2919 * Take care memory controller reclaiming has small influence
2920 * to global LRU.
2921 */
2922 if (!cgroup_reclaim(sc)) {
2923 if (!cpuset_zone_allowed(zone,
2924 GFP_KERNEL | __GFP_HARDWALL))
2925 continue;
2926
2927 /*
2928 * If we already have plenty of memory free for
2929 * compaction in this zone, don't free any more.
2930 * Even though compaction is invoked for any
2931 * non-zero order, only frequent costly order
2932 * reclamation is disruptive enough to become a
2933 * noticeable problem, like transparent huge
2934 * page allocations.
2935 */
2936 if (IS_ENABLED(CONFIG_COMPACTION) &&
2937 sc->order > PAGE_ALLOC_COSTLY_ORDER &&
2938 compaction_ready(zone, sc)) {
2939 sc->compaction_ready = true;
2940 continue;
2941 }
2942
2943 /*
2944 * Shrink each node in the zonelist once. If the
2945 * zonelist is ordered by zone (not the default) then a
2946 * node may be shrunk multiple times but in that case
2947 * the user prefers lower zones being preserved.
2948 */
2949 if (zone->zone_pgdat == last_pgdat)
2950 continue;
2951
2952 /*
2953 * This steals pages from memory cgroups over softlimit
2954 * and returns the number of reclaimed pages and
2955 * scanned pages. This works for global memory pressure
2956 * and balancing, not for a memcg's limit.
2957 */
2958 nr_soft_scanned = 0;
2959 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat,
2960 sc->order, sc->gfp_mask,
2961 &nr_soft_scanned);
2962 sc->nr_reclaimed += nr_soft_reclaimed;
2963 sc->nr_scanned += nr_soft_scanned;
2964 /* need some check for avoid more shrink_zone() */
2965 }
2966
2967 /* See comment about same check for global reclaim above */
2968 if (zone->zone_pgdat == last_pgdat)
2969 continue;
2970 last_pgdat = zone->zone_pgdat;
2971 shrink_node(zone->zone_pgdat, sc);
2972 }
2973
2974 /*
2975 * Restore to original mask to avoid the impact on the caller if we
2976 * promoted it to __GFP_HIGHMEM.
2977 */
2978 sc->gfp_mask = orig_mask;
2979}
2980
2981static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat)
2982{
2983 struct lruvec *target_lruvec;
2984 unsigned long refaults;
2985
2986 target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
2987 refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON);
2988 target_lruvec->refaults[0] = refaults;
2989 refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_FILE);
2990 target_lruvec->refaults[1] = refaults;
2991}
2992
2993/*
2994 * This is the main entry point to direct page reclaim.
2995 *
2996 * If a full scan of the inactive list fails to free enough memory then we
2997 * are "out of memory" and something needs to be killed.
2998 *
2999 * If the caller is !__GFP_FS then the probability of a failure is reasonably
3000 * high - the zone may be full of dirty or under-writeback pages, which this
3001 * caller can't do much about. We kick the writeback threads and take explicit
3002 * naps in the hope that some of these pages can be written. But if the
3003 * allocating task holds filesystem locks which prevent writeout this might not
3004 * work, and the allocation attempt will fail.
3005 *
3006 * returns: 0, if no pages reclaimed
3007 * else, the number of pages reclaimed
3008 */
3009static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
3010 struct scan_control *sc)
3011{
3012 int initial_priority = sc->priority;
3013 pg_data_t *last_pgdat;
3014 struct zoneref *z;
3015 struct zone *zone;
3016retry:
3017 delayacct_freepages_start();
3018
3019 if (!cgroup_reclaim(sc))
3020 __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1);
3021
3022 do {
3023 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
3024 sc->priority);
3025 sc->nr_scanned = 0;
3026 shrink_zones(zonelist, sc);
3027
3028 if (sc->nr_reclaimed >= sc->nr_to_reclaim)
3029 break;
3030
3031 if (sc->compaction_ready)
3032 break;
3033
3034 /*
3035 * If we're getting trouble reclaiming, start doing
3036 * writepage even in laptop mode.
3037 */
3038 if (sc->priority < DEF_PRIORITY - 2)
3039 sc->may_writepage = 1;
3040 } while (--sc->priority >= 0);
3041
3042 last_pgdat = NULL;
3043 for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx,
3044 sc->nodemask) {
3045 if (zone->zone_pgdat == last_pgdat)
3046 continue;
3047 last_pgdat = zone->zone_pgdat;
3048
3049 snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat);
3050
3051 if (cgroup_reclaim(sc)) {
3052 struct lruvec *lruvec;
3053
3054 lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup,
3055 zone->zone_pgdat);
3056 clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
3057 }
3058 }
3059
3060 delayacct_freepages_end();
3061
3062 if (sc->nr_reclaimed)
3063 return sc->nr_reclaimed;
3064
3065 /* Aborted reclaim to try compaction? don't OOM, then */
3066 if (sc->compaction_ready)
3067 return 1;
3068
3069 /*
3070 * We make inactive:active ratio decisions based on the node's
3071 * composition of memory, but a restrictive reclaim_idx or a
3072 * memory.low cgroup setting can exempt large amounts of
3073 * memory from reclaim. Neither of which are very common, so
3074 * instead of doing costly eligibility calculations of the
3075 * entire cgroup subtree up front, we assume the estimates are
3076 * good, and retry with forcible deactivation if that fails.
3077 */
3078 if (sc->skipped_deactivate) {
3079 sc->priority = initial_priority;
3080 sc->force_deactivate = 1;
3081 sc->skipped_deactivate = 0;
3082 goto retry;
3083 }
3084
3085 /* Untapped cgroup reserves? Don't OOM, retry. */
3086 if (sc->memcg_low_skipped) {
3087 sc->priority = initial_priority;
3088 sc->force_deactivate = 0;
3089 sc->memcg_low_reclaim = 1;
3090 sc->memcg_low_skipped = 0;
3091 goto retry;
3092 }
3093
3094 return 0;
3095}
3096
3097static bool allow_direct_reclaim(pg_data_t *pgdat)
3098{
3099 struct zone *zone;
3100 unsigned long pfmemalloc_reserve = 0;
3101 unsigned long free_pages = 0;
3102 int i;
3103 bool wmark_ok;
3104
3105 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
3106 return true;
3107
3108 for (i = 0; i <= ZONE_NORMAL; i++) {
3109 zone = &pgdat->node_zones[i];
3110 if (!managed_zone(zone))
3111 continue;
3112
3113 if (!zone_reclaimable_pages(zone))
3114 continue;
3115
3116 pfmemalloc_reserve += min_wmark_pages(zone);
3117 free_pages += zone_page_state(zone, NR_FREE_PAGES);
3118 }
3119
3120 /* If there are no reserves (unexpected config) then do not throttle */
3121 if (!pfmemalloc_reserve)
3122 return true;
3123
3124 wmark_ok = free_pages > pfmemalloc_reserve / 2;
3125
3126 /* kswapd must be awake if processes are being throttled */
3127 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
3128 if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL)
3129 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, ZONE_NORMAL);
3130
3131 wake_up_interruptible(&pgdat->kswapd_wait);
3132 }
3133
3134 return wmark_ok;
3135}
3136
3137/*
3138 * Throttle direct reclaimers if backing storage is backed by the network
3139 * and the PFMEMALLOC reserve for the preferred node is getting dangerously
3140 * depleted. kswapd will continue to make progress and wake the processes
3141 * when the low watermark is reached.
3142 *
3143 * Returns true if a fatal signal was delivered during throttling. If this
3144 * happens, the page allocator should not consider triggering the OOM killer.
3145 */
3146static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
3147 nodemask_t *nodemask)
3148{
3149 struct zoneref *z;
3150 struct zone *zone;
3151 pg_data_t *pgdat = NULL;
3152
3153 /*
3154 * Kernel threads should not be throttled as they may be indirectly
3155 * responsible for cleaning pages necessary for reclaim to make forward
3156 * progress. kjournald for example may enter direct reclaim while
3157 * committing a transaction where throttling it could forcing other
3158 * processes to block on log_wait_commit().
3159 */
3160 if (current->flags & PF_KTHREAD)
3161 goto out;
3162
3163 /*
3164 * If a fatal signal is pending, this process should not throttle.
3165 * It should return quickly so it can exit and free its memory
3166 */
3167 if (fatal_signal_pending(current))
3168 goto out;
3169
3170 /*
3171 * Check if the pfmemalloc reserves are ok by finding the first node
3172 * with a usable ZONE_NORMAL or lower zone. The expectation is that
3173 * GFP_KERNEL will be required for allocating network buffers when
3174 * swapping over the network so ZONE_HIGHMEM is unusable.
3175 *
3176 * Throttling is based on the first usable node and throttled processes
3177 * wait on a queue until kswapd makes progress and wakes them. There
3178 * is an affinity then between processes waking up and where reclaim
3179 * progress has been made assuming the process wakes on the same node.
3180 * More importantly, processes running on remote nodes will not compete
3181 * for remote pfmemalloc reserves and processes on different nodes
3182 * should make reasonable progress.
3183 */
3184 for_each_zone_zonelist_nodemask(zone, z, zonelist,
3185 gfp_zone(gfp_mask), nodemask) {
3186 if (zone_idx(zone) > ZONE_NORMAL)
3187 continue;
3188
3189 /* Throttle based on the first usable node */
3190 pgdat = zone->zone_pgdat;
3191 if (allow_direct_reclaim(pgdat))
3192 goto out;
3193 break;
3194 }
3195
3196 /* If no zone was usable by the allocation flags then do not throttle */
3197 if (!pgdat)
3198 goto out;
3199
3200 /* Account for the throttling */
3201 count_vm_event(PGSCAN_DIRECT_THROTTLE);
3202
3203 /*
3204 * If the caller cannot enter the filesystem, it's possible that it
3205 * is due to the caller holding an FS lock or performing a journal
3206 * transaction in the case of a filesystem like ext[3|4]. In this case,
3207 * it is not safe to block on pfmemalloc_wait as kswapd could be
3208 * blocked waiting on the same lock. Instead, throttle for up to a
3209 * second before continuing.
3210 */
3211 if (!(gfp_mask & __GFP_FS)) {
3212 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
3213 allow_direct_reclaim(pgdat), HZ);
3214
3215 goto check_pending;
3216 }
3217
3218 /* Throttle until kswapd wakes the process */
3219 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
3220 allow_direct_reclaim(pgdat));
3221
3222check_pending:
3223 if (fatal_signal_pending(current))
3224 return true;
3225
3226out:
3227 return false;
3228}
3229
3230unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
3231 gfp_t gfp_mask, nodemask_t *nodemask)
3232{
3233 unsigned long nr_reclaimed;
3234 struct scan_control sc = {
3235 .nr_to_reclaim = SWAP_CLUSTER_MAX,
3236 .gfp_mask = current_gfp_context(gfp_mask),
3237 .reclaim_idx = gfp_zone(gfp_mask),
3238 .order = order,
3239 .nodemask = nodemask,
3240 .priority = DEF_PRIORITY,
3241 .may_writepage = !laptop_mode,
3242 .may_unmap = 1,
3243 .may_swap = 1,
3244 };
3245
3246 /*
3247 * scan_control uses s8 fields for order, priority, and reclaim_idx.
3248 * Confirm they are large enough for max values.
3249 */
3250 BUILD_BUG_ON(MAX_ORDER > S8_MAX);
3251 BUILD_BUG_ON(DEF_PRIORITY > S8_MAX);
3252 BUILD_BUG_ON(MAX_NR_ZONES > S8_MAX);
3253
3254 /*
3255 * Do not enter reclaim if fatal signal was delivered while throttled.
3256 * 1 is returned so that the page allocator does not OOM kill at this
3257 * point.
3258 */
3259 if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
3260 return 1;
3261
3262 set_task_reclaim_state(current, &sc.reclaim_state);
3263 trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask);
3264
3265 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
3266
3267 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
3268 set_task_reclaim_state(current, NULL);
3269
3270 return nr_reclaimed;
3271}
3272
3273#ifdef CONFIG_MEMCG
3274
3275/* Only used by soft limit reclaim. Do not reuse for anything else. */
3276unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
3277 gfp_t gfp_mask, bool noswap,
3278 pg_data_t *pgdat,
3279 unsigned long *nr_scanned)
3280{
3281 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
3282 struct scan_control sc = {
3283 .nr_to_reclaim = SWAP_CLUSTER_MAX,
3284 .target_mem_cgroup = memcg,
3285 .may_writepage = !laptop_mode,
3286 .may_unmap = 1,
3287 .reclaim_idx = MAX_NR_ZONES - 1,
3288 .may_swap = !noswap,
3289 };
3290
3291 WARN_ON_ONCE(!current->reclaim_state);
3292
3293 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
3294 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
3295
3296 trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
3297 sc.gfp_mask);
3298
3299 /*
3300 * NOTE: Although we can get the priority field, using it
3301 * here is not a good idea, since it limits the pages we can scan.
3302 * if we don't reclaim here, the shrink_node from balance_pgdat
3303 * will pick up pages from other mem cgroup's as well. We hack
3304 * the priority and make it zero.
3305 */
3306 shrink_lruvec(lruvec, &sc);
3307
3308 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
3309
3310 *nr_scanned = sc.nr_scanned;
3311
3312 return sc.nr_reclaimed;
3313}
3314
3315unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
3316 unsigned long nr_pages,
3317 gfp_t gfp_mask,
3318 bool may_swap)
3319{
3320 unsigned long nr_reclaimed;
3321 unsigned int noreclaim_flag;
3322 struct scan_control sc = {
3323 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
3324 .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) |
3325 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
3326 .reclaim_idx = MAX_NR_ZONES - 1,
3327 .target_mem_cgroup = memcg,
3328 .priority = DEF_PRIORITY,
3329 .may_writepage = !laptop_mode,
3330 .may_unmap = 1,
3331 .may_swap = may_swap,
3332 };
3333 /*
3334 * Traverse the ZONELIST_FALLBACK zonelist of the current node to put
3335 * equal pressure on all the nodes. This is based on the assumption that
3336 * the reclaim does not bail out early.
3337 */
3338 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
3339
3340 set_task_reclaim_state(current, &sc.reclaim_state);
3341 trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);
3342 noreclaim_flag = memalloc_noreclaim_save();
3343
3344 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
3345
3346 memalloc_noreclaim_restore(noreclaim_flag);
3347 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
3348 set_task_reclaim_state(current, NULL);
3349
3350 return nr_reclaimed;
3351}
3352#endif
3353
3354static void age_active_anon(struct pglist_data *pgdat,
3355 struct scan_control *sc)
3356{
3357 struct mem_cgroup *memcg;
3358 struct lruvec *lruvec;
3359
3360 if (!total_swap_pages)
3361 return;
3362
3363 lruvec = mem_cgroup_lruvec(NULL, pgdat);
3364 if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON))
3365 return;
3366
3367 memcg = mem_cgroup_iter(NULL, NULL, NULL);
3368 do {
3369 lruvec = mem_cgroup_lruvec(memcg, pgdat);
3370 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
3371 sc, LRU_ACTIVE_ANON);
3372 memcg = mem_cgroup_iter(NULL, memcg, NULL);
3373 } while (memcg);
3374}
3375
3376static bool pgdat_watermark_boosted(pg_data_t *pgdat, int highest_zoneidx)
3377{
3378 int i;
3379 struct zone *zone;
3380
3381 /*
3382 * Check for watermark boosts top-down as the higher zones
3383 * are more likely to be boosted. Both watermarks and boosts
3384 * should not be checked at the same time as reclaim would
3385 * start prematurely when there is no boosting and a lower
3386 * zone is balanced.
3387 */
3388 for (i = highest_zoneidx; i >= 0; i--) {
3389 zone = pgdat->node_zones + i;
3390 if (!managed_zone(zone))
3391 continue;
3392
3393 if (zone->watermark_boost)
3394 return true;
3395 }
3396
3397 return false;
3398}
3399
3400/*
3401 * Returns true if there is an eligible zone balanced for the request order
3402 * and highest_zoneidx
3403 */
3404static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx)
3405{
3406 int i;
3407 unsigned long mark = -1;
3408 struct zone *zone;
3409
3410 /*
3411 * Check watermarks bottom-up as lower zones are more likely to
3412 * meet watermarks.
3413 */
3414 for (i = 0; i <= highest_zoneidx; i++) {
3415 zone = pgdat->node_zones + i;
3416
3417 if (!managed_zone(zone))
3418 continue;
3419
3420 mark = high_wmark_pages(zone);
3421 if (zone_watermark_ok_safe(zone, order, mark, highest_zoneidx))
3422 return true;
3423 }
3424
3425 /*
3426 * If a node has no populated zone within highest_zoneidx, it does not
3427 * need balancing by definition. This can happen if a zone-restricted
3428 * allocation tries to wake a remote kswapd.
3429 */
3430 if (mark == -1)
3431 return true;
3432
3433 return false;
3434}
3435
3436/* Clear pgdat state for congested, dirty or under writeback. */
3437static void clear_pgdat_congested(pg_data_t *pgdat)
3438{
3439 struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat);
3440
3441 clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
3442 clear_bit(PGDAT_DIRTY, &pgdat->flags);
3443 clear_bit(PGDAT_WRITEBACK, &pgdat->flags);
3444}
3445
3446/*
3447 * Prepare kswapd for sleeping. This verifies that there are no processes
3448 * waiting in throttle_direct_reclaim() and that watermarks have been met.
3449 *
3450 * Returns true if kswapd is ready to sleep
3451 */
3452static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order,
3453 int highest_zoneidx)
3454{
3455 /*
3456 * The throttled processes are normally woken up in balance_pgdat() as
3457 * soon as allow_direct_reclaim() is true. But there is a potential
3458 * race between when kswapd checks the watermarks and a process gets
3459 * throttled. There is also a potential race if processes get
3460 * throttled, kswapd wakes, a large process exits thereby balancing the
3461 * zones, which causes kswapd to exit balance_pgdat() before reaching
3462 * the wake up checks. If kswapd is going to sleep, no process should
3463 * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
3464 * the wake up is premature, processes will wake kswapd and get
3465 * throttled again. The difference from wake ups in balance_pgdat() is
3466 * that here we are under prepare_to_wait().
3467 */
3468 if (waitqueue_active(&pgdat->pfmemalloc_wait))
3469 wake_up_all(&pgdat->pfmemalloc_wait);
3470
3471 /* Hopeless node, leave it to direct reclaim */
3472 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
3473 return true;
3474
3475 if (pgdat_balanced(pgdat, order, highest_zoneidx)) {
3476 clear_pgdat_congested(pgdat);
3477 return true;
3478 }
3479
3480 return false;
3481}
3482
3483/*
3484 * kswapd shrinks a node of pages that are at or below the highest usable
3485 * zone that is currently unbalanced.
3486 *
3487 * Returns true if kswapd scanned at least the requested number of pages to
3488 * reclaim or if the lack of progress was due to pages under writeback.
3489 * This is used to determine if the scanning priority needs to be raised.
3490 */
3491static bool kswapd_shrink_node(pg_data_t *pgdat,
3492 struct scan_control *sc)
3493{
3494 struct zone *zone;
3495 int z;
3496
3497 /* Reclaim a number of pages proportional to the number of zones */
3498 sc->nr_to_reclaim = 0;
3499 for (z = 0; z <= sc->reclaim_idx; z++) {
3500 zone = pgdat->node_zones + z;
3501 if (!managed_zone(zone))
3502 continue;
3503
3504 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
3505 }
3506
3507 /*
3508 * Historically care was taken to put equal pressure on all zones but
3509 * now pressure is applied based on node LRU order.
3510 */
3511 shrink_node(pgdat, sc);
3512
3513 /*
3514 * Fragmentation may mean that the system cannot be rebalanced for
3515 * high-order allocations. If twice the allocation size has been
3516 * reclaimed then recheck watermarks only at order-0 to prevent
3517 * excessive reclaim. Assume that a process requested a high-order
3518 * can direct reclaim/compact.
3519 */
3520 if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order))
3521 sc->order = 0;
3522
3523 return sc->nr_scanned >= sc->nr_to_reclaim;
3524}
3525
3526/*
3527 * For kswapd, balance_pgdat() will reclaim pages across a node from zones
3528 * that are eligible for use by the caller until at least one zone is
3529 * balanced.
3530 *
3531 * Returns the order kswapd finished reclaiming at.
3532 *
3533 * kswapd scans the zones in the highmem->normal->dma direction. It skips
3534 * zones which have free_pages > high_wmark_pages(zone), but once a zone is
3535 * found to have free_pages <= high_wmark_pages(zone), any page in that zone
3536 * or lower is eligible for reclaim until at least one usable zone is
3537 * balanced.
3538 */
3539static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx)
3540{
3541 int i;
3542 unsigned long nr_soft_reclaimed;
3543 unsigned long nr_soft_scanned;
3544 unsigned long pflags;
3545 unsigned long nr_boost_reclaim;
3546 unsigned long zone_boosts[MAX_NR_ZONES] = { 0, };
3547 bool boosted;
3548 struct zone *zone;
3549 struct scan_control sc = {
3550 .gfp_mask = GFP_KERNEL,
3551 .order = order,
3552 .may_unmap = 1,
3553 };
3554
3555 set_task_reclaim_state(current, &sc.reclaim_state);
3556 psi_memstall_enter(&pflags);
3557 __fs_reclaim_acquire();
3558
3559 count_vm_event(PAGEOUTRUN);
3560
3561 /*
3562 * Account for the reclaim boost. Note that the zone boost is left in
3563 * place so that parallel allocations that are near the watermark will
3564 * stall or direct reclaim until kswapd is finished.
3565 */
3566 nr_boost_reclaim = 0;
3567 for (i = 0; i <= highest_zoneidx; i++) {
3568 zone = pgdat->node_zones + i;
3569 if (!managed_zone(zone))
3570 continue;
3571
3572 nr_boost_reclaim += zone->watermark_boost;
3573 zone_boosts[i] = zone->watermark_boost;
3574 }
3575 boosted = nr_boost_reclaim;
3576
3577restart:
3578 sc.priority = DEF_PRIORITY;
3579 do {
3580 unsigned long nr_reclaimed = sc.nr_reclaimed;
3581 bool raise_priority = true;
3582 bool balanced;
3583 bool ret;
3584
3585 sc.reclaim_idx = highest_zoneidx;
3586
3587 /*
3588 * If the number of buffer_heads exceeds the maximum allowed
3589 * then consider reclaiming from all zones. This has a dual
3590 * purpose -- on 64-bit systems it is expected that
3591 * buffer_heads are stripped during active rotation. On 32-bit
3592 * systems, highmem pages can pin lowmem memory and shrinking
3593 * buffers can relieve lowmem pressure. Reclaim may still not
3594 * go ahead if all eligible zones for the original allocation
3595 * request are balanced to avoid excessive reclaim from kswapd.
3596 */
3597 if (buffer_heads_over_limit) {
3598 for (i = MAX_NR_ZONES - 1; i >= 0; i--) {
3599 zone = pgdat->node_zones + i;
3600 if (!managed_zone(zone))
3601 continue;
3602
3603 sc.reclaim_idx = i;
3604 break;
3605 }
3606 }
3607
3608 /*
3609 * If the pgdat is imbalanced then ignore boosting and preserve
3610 * the watermarks for a later time and restart. Note that the
3611 * zone watermarks will be still reset at the end of balancing
3612 * on the grounds that the normal reclaim should be enough to
3613 * re-evaluate if boosting is required when kswapd next wakes.
3614 */
3615 balanced = pgdat_balanced(pgdat, sc.order, highest_zoneidx);
3616 if (!balanced && nr_boost_reclaim) {
3617 nr_boost_reclaim = 0;
3618 goto restart;
3619 }
3620
3621 /*
3622 * If boosting is not active then only reclaim if there are no
3623 * eligible zones. Note that sc.reclaim_idx is not used as
3624 * buffer_heads_over_limit may have adjusted it.
3625 */
3626 if (!nr_boost_reclaim && balanced)
3627 goto out;
3628
3629 /* Limit the priority of boosting to avoid reclaim writeback */
3630 if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2)
3631 raise_priority = false;
3632
3633 /*
3634 * Do not writeback or swap pages for boosted reclaim. The
3635 * intent is to relieve pressure not issue sub-optimal IO
3636 * from reclaim context. If no pages are reclaimed, the
3637 * reclaim will be aborted.
3638 */
3639 sc.may_writepage = !laptop_mode && !nr_boost_reclaim;
3640 sc.may_swap = !nr_boost_reclaim;
3641
3642 /*
3643 * Do some background aging of the anon list, to give
3644 * pages a chance to be referenced before reclaiming. All
3645 * pages are rotated regardless of classzone as this is
3646 * about consistent aging.
3647 */
3648 age_active_anon(pgdat, &sc);
3649
3650 /*
3651 * If we're getting trouble reclaiming, start doing writepage
3652 * even in laptop mode.
3653 */
3654 if (sc.priority < DEF_PRIORITY - 2)
3655 sc.may_writepage = 1;
3656
3657 /* Call soft limit reclaim before calling shrink_node. */
3658 sc.nr_scanned = 0;
3659 nr_soft_scanned = 0;
3660 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order,
3661 sc.gfp_mask, &nr_soft_scanned);
3662 sc.nr_reclaimed += nr_soft_reclaimed;
3663
3664 /*
3665 * There should be no need to raise the scanning priority if
3666 * enough pages are already being scanned that that high
3667 * watermark would be met at 100% efficiency.
3668 */
3669 if (kswapd_shrink_node(pgdat, &sc))
3670 raise_priority = false;
3671
3672 /*
3673 * If the low watermark is met there is no need for processes
3674 * to be throttled on pfmemalloc_wait as they should not be
3675 * able to safely make forward progress. Wake them
3676 */
3677 if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
3678 allow_direct_reclaim(pgdat))
3679 wake_up_all(&pgdat->pfmemalloc_wait);
3680
3681 /* Check if kswapd should be suspending */
3682 __fs_reclaim_release();
3683 ret = try_to_freeze();
3684 __fs_reclaim_acquire();
3685 if (ret || kthread_should_stop())
3686 break;
3687
3688 /*
3689 * Raise priority if scanning rate is too low or there was no
3690 * progress in reclaiming pages
3691 */
3692 nr_reclaimed = sc.nr_reclaimed - nr_reclaimed;
3693 nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed);
3694
3695 /*
3696 * If reclaim made no progress for a boost, stop reclaim as
3697 * IO cannot be queued and it could be an infinite loop in
3698 * extreme circumstances.
3699 */
3700 if (nr_boost_reclaim && !nr_reclaimed)
3701 break;
3702
3703 if (raise_priority || !nr_reclaimed)
3704 sc.priority--;
3705 } while (sc.priority >= 1);
3706
3707 if (!sc.nr_reclaimed)
3708 pgdat->kswapd_failures++;
3709
3710out:
3711 /* If reclaim was boosted, account for the reclaim done in this pass */
3712 if (boosted) {
3713 unsigned long flags;
3714
3715 for (i = 0; i <= highest_zoneidx; i++) {
3716 if (!zone_boosts[i])
3717 continue;
3718
3719 /* Increments are under the zone lock */
3720 zone = pgdat->node_zones + i;
3721 spin_lock_irqsave(&zone->lock, flags);
3722 zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]);
3723 spin_unlock_irqrestore(&zone->lock, flags);
3724 }
3725
3726 /*
3727 * As there is now likely space, wakeup kcompact to defragment
3728 * pageblocks.
3729 */
3730 wakeup_kcompactd(pgdat, pageblock_order, highest_zoneidx);
3731 }
3732
3733 snapshot_refaults(NULL, pgdat);
3734 __fs_reclaim_release();
3735 psi_memstall_leave(&pflags);
3736 set_task_reclaim_state(current, NULL);
3737
3738 /*
3739 * Return the order kswapd stopped reclaiming at as
3740 * prepare_kswapd_sleep() takes it into account. If another caller
3741 * entered the allocator slow path while kswapd was awake, order will
3742 * remain at the higher level.
3743 */
3744 return sc.order;
3745}
3746
3747/*
3748 * The pgdat->kswapd_highest_zoneidx is used to pass the highest zone index to
3749 * be reclaimed by kswapd from the waker. If the value is MAX_NR_ZONES which is
3750 * not a valid index then either kswapd runs for first time or kswapd couldn't
3751 * sleep after previous reclaim attempt (node is still unbalanced). In that
3752 * case return the zone index of the previous kswapd reclaim cycle.
3753 */
3754static enum zone_type kswapd_highest_zoneidx(pg_data_t *pgdat,
3755 enum zone_type prev_highest_zoneidx)
3756{
3757 enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx);
3758
3759 return curr_idx == MAX_NR_ZONES ? prev_highest_zoneidx : curr_idx;
3760}
3761
3762static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
3763 unsigned int highest_zoneidx)
3764{
3765 long remaining = 0;
3766 DEFINE_WAIT(wait);
3767
3768 if (freezing(current) || kthread_should_stop())
3769 return;
3770
3771 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3772
3773 /*
3774 * Try to sleep for a short interval. Note that kcompactd will only be
3775 * woken if it is possible to sleep for a short interval. This is
3776 * deliberate on the assumption that if reclaim cannot keep an
3777 * eligible zone balanced that it's also unlikely that compaction will
3778 * succeed.
3779 */
3780 if (prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) {
3781 /*
3782 * Compaction records what page blocks it recently failed to
3783 * isolate pages from and skips them in the future scanning.
3784 * When kswapd is going to sleep, it is reasonable to assume
3785 * that pages and compaction may succeed so reset the cache.
3786 */
3787 reset_isolation_suitable(pgdat);
3788
3789 /*
3790 * We have freed the memory, now we should compact it to make
3791 * allocation of the requested order possible.
3792 */
3793 wakeup_kcompactd(pgdat, alloc_order, highest_zoneidx);
3794
3795 remaining = schedule_timeout(HZ/10);
3796
3797 /*
3798 * If woken prematurely then reset kswapd_highest_zoneidx and
3799 * order. The values will either be from a wakeup request or
3800 * the previous request that slept prematurely.
3801 */
3802 if (remaining) {
3803 WRITE_ONCE(pgdat->kswapd_highest_zoneidx,
3804 kswapd_highest_zoneidx(pgdat,
3805 highest_zoneidx));
3806
3807 if (READ_ONCE(pgdat->kswapd_order) < reclaim_order)
3808 WRITE_ONCE(pgdat->kswapd_order, reclaim_order);
3809 }
3810
3811 finish_wait(&pgdat->kswapd_wait, &wait);
3812 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3813 }
3814
3815 /*
3816 * After a short sleep, check if it was a premature sleep. If not, then
3817 * go fully to sleep until explicitly woken up.
3818 */
3819 if (!remaining &&
3820 prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) {
3821 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
3822
3823 /*
3824 * vmstat counters are not perfectly accurate and the estimated
3825 * value for counters such as NR_FREE_PAGES can deviate from the
3826 * true value by nr_online_cpus * threshold. To avoid the zone
3827 * watermarks being breached while under pressure, we reduce the
3828 * per-cpu vmstat threshold while kswapd is awake and restore
3829 * them before going back to sleep.
3830 */
3831 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
3832
3833 if (!kthread_should_stop())
3834 schedule();
3835
3836 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
3837 } else {
3838 if (remaining)
3839 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
3840 else
3841 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
3842 }
3843 finish_wait(&pgdat->kswapd_wait, &wait);
3844}
3845
3846/*
3847 * The background pageout daemon, started as a kernel thread
3848 * from the init process.
3849 *
3850 * This basically trickles out pages so that we have _some_
3851 * free memory available even if there is no other activity
3852 * that frees anything up. This is needed for things like routing
3853 * etc, where we otherwise might have all activity going on in
3854 * asynchronous contexts that cannot page things out.
3855 *
3856 * If there are applications that are active memory-allocators
3857 * (most normal use), this basically shouldn't matter.
3858 */
3859static int kswapd(void *p)
3860{
3861 unsigned int alloc_order, reclaim_order;
3862 unsigned int highest_zoneidx = MAX_NR_ZONES - 1;
3863 pg_data_t *pgdat = (pg_data_t*)p;
3864 struct task_struct *tsk = current;
3865 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
3866
3867 if (!cpumask_empty(cpumask))
3868 set_cpus_allowed_ptr(tsk, cpumask);
3869
3870 /*
3871 * Tell the memory management that we're a "memory allocator",
3872 * and that if we need more memory we should get access to it
3873 * regardless (see "__alloc_pages()"). "kswapd" should
3874 * never get caught in the normal page freeing logic.
3875 *
3876 * (Kswapd normally doesn't need memory anyway, but sometimes
3877 * you need a small amount of memory in order to be able to
3878 * page out something else, and this flag essentially protects
3879 * us from recursively trying to free more memory as we're
3880 * trying to free the first piece of memory in the first place).
3881 */
3882 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
3883 set_freezable();
3884
3885 WRITE_ONCE(pgdat->kswapd_order, 0);
3886 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES);
3887 for ( ; ; ) {
3888 bool ret;
3889
3890 alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
3891 highest_zoneidx = kswapd_highest_zoneidx(pgdat,
3892 highest_zoneidx);
3893
3894kswapd_try_sleep:
3895 kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order,
3896 highest_zoneidx);
3897
3898 /* Read the new order and highest_zoneidx */
3899 alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
3900 highest_zoneidx = kswapd_highest_zoneidx(pgdat,
3901 highest_zoneidx);
3902 WRITE_ONCE(pgdat->kswapd_order, 0);
3903 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES);
3904
3905 ret = try_to_freeze();
3906 if (kthread_should_stop())
3907 break;
3908
3909 /*
3910 * We can speed up thawing tasks if we don't call balance_pgdat
3911 * after returning from the refrigerator
3912 */
3913 if (ret)
3914 continue;
3915
3916 /*
3917 * Reclaim begins at the requested order but if a high-order
3918 * reclaim fails then kswapd falls back to reclaiming for
3919 * order-0. If that happens, kswapd will consider sleeping
3920 * for the order it finished reclaiming at (reclaim_order)
3921 * but kcompactd is woken to compact for the original
3922 * request (alloc_order).
3923 */
3924 trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx,
3925 alloc_order);
3926 reclaim_order = balance_pgdat(pgdat, alloc_order,
3927 highest_zoneidx);
3928 if (reclaim_order < alloc_order)
3929 goto kswapd_try_sleep;
3930 }
3931
3932 tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
3933
3934 return 0;
3935}
3936
3937/*
3938 * A zone is low on free memory or too fragmented for high-order memory. If
3939 * kswapd should reclaim (direct reclaim is deferred), wake it up for the zone's
3940 * pgdat. It will wake up kcompactd after reclaiming memory. If kswapd reclaim
3941 * has failed or is not needed, still wake up kcompactd if only compaction is
3942 * needed.
3943 */
3944void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
3945 enum zone_type highest_zoneidx)
3946{
3947 pg_data_t *pgdat;
3948 enum zone_type curr_idx;
3949
3950 if (!managed_zone(zone))
3951 return;
3952
3953 if (!cpuset_zone_allowed(zone, gfp_flags))
3954 return;
3955
3956 pgdat = zone->zone_pgdat;
3957 curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx);
3958
3959 if (curr_idx == MAX_NR_ZONES || curr_idx < highest_zoneidx)
3960 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, highest_zoneidx);
3961
3962 if (READ_ONCE(pgdat->kswapd_order) < order)
3963 WRITE_ONCE(pgdat->kswapd_order, order);
3964
3965 if (!waitqueue_active(&pgdat->kswapd_wait))
3966 return;
3967
3968 /* Hopeless node, leave it to direct reclaim if possible */
3969 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ||
3970 (pgdat_balanced(pgdat, order, highest_zoneidx) &&
3971 !pgdat_watermark_boosted(pgdat, highest_zoneidx))) {
3972 /*
3973 * There may be plenty of free memory available, but it's too
3974 * fragmented for high-order allocations. Wake up kcompactd
3975 * and rely on compaction_suitable() to determine if it's
3976 * needed. If it fails, it will defer subsequent attempts to
3977 * ratelimit its work.
3978 */
3979 if (!(gfp_flags & __GFP_DIRECT_RECLAIM))
3980 wakeup_kcompactd(pgdat, order, highest_zoneidx);
3981 return;
3982 }
3983
3984 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, highest_zoneidx, order,
3985 gfp_flags);
3986 wake_up_interruptible(&pgdat->kswapd_wait);
3987}
3988
3989#ifdef CONFIG_HIBERNATION
3990/*
3991 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
3992 * freed pages.
3993 *
3994 * Rather than trying to age LRUs the aim is to preserve the overall
3995 * LRU order by reclaiming preferentially
3996 * inactive > active > active referenced > active mapped
3997 */
3998unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
3999{
4000 struct scan_control sc = {
4001 .nr_to_reclaim = nr_to_reclaim,
4002 .gfp_mask = GFP_HIGHUSER_MOVABLE,
4003 .reclaim_idx = MAX_NR_ZONES - 1,
4004 .priority = DEF_PRIORITY,
4005 .may_writepage = 1,
4006 .may_unmap = 1,
4007 .may_swap = 1,
4008 .hibernation_mode = 1,
4009 };
4010 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
4011 unsigned long nr_reclaimed;
4012 unsigned int noreclaim_flag;
4013
4014 fs_reclaim_acquire(sc.gfp_mask);
4015 noreclaim_flag = memalloc_noreclaim_save();
4016 set_task_reclaim_state(current, &sc.reclaim_state);
4017
4018 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
4019
4020 set_task_reclaim_state(current, NULL);
4021 memalloc_noreclaim_restore(noreclaim_flag);
4022 fs_reclaim_release(sc.gfp_mask);
4023
4024 return nr_reclaimed;
4025}
4026#endif /* CONFIG_HIBERNATION */
4027
4028/*
4029 * This kswapd start function will be called by init and node-hot-add.
4030 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
4031 */
4032int kswapd_run(int nid)
4033{
4034 pg_data_t *pgdat = NODE_DATA(nid);
4035 int ret = 0;
4036
4037 if (pgdat->kswapd)
4038 return 0;
4039
4040 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
4041 if (IS_ERR(pgdat->kswapd)) {
4042 /* failure at boot is fatal */
4043 BUG_ON(system_state < SYSTEM_RUNNING);
4044 pr_err("Failed to start kswapd on node %d\n", nid);
4045 ret = PTR_ERR(pgdat->kswapd);
4046 pgdat->kswapd = NULL;
4047 }
4048 return ret;
4049}
4050
4051/*
4052 * Called by memory hotplug when all memory in a node is offlined. Caller must
4053 * hold mem_hotplug_begin/end().
4054 */
4055void kswapd_stop(int nid)
4056{
4057 struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
4058
4059 if (kswapd) {
4060 kthread_stop(kswapd);
4061 NODE_DATA(nid)->kswapd = NULL;
4062 }
4063}
4064
4065static int __init kswapd_init(void)
4066{
4067 int nid;
4068
4069 swap_setup();
4070 for_each_node_state(nid, N_MEMORY)
4071 kswapd_run(nid);
4072 return 0;
4073}
4074
4075module_init(kswapd_init)
4076
4077#ifdef CONFIG_NUMA
4078/*
4079 * Node reclaim mode
4080 *
4081 * If non-zero call node_reclaim when the number of free pages falls below
4082 * the watermarks.
4083 */
4084int node_reclaim_mode __read_mostly;
4085
4086#define RECLAIM_WRITE (1<<0) /* Writeout pages during reclaim */
4087#define RECLAIM_UNMAP (1<<1) /* Unmap pages during reclaim */
4088
4089/*
4090 * Priority for NODE_RECLAIM. This determines the fraction of pages
4091 * of a node considered for each zone_reclaim. 4 scans 1/16th of
4092 * a zone.
4093 */
4094#define NODE_RECLAIM_PRIORITY 4
4095
4096/*
4097 * Percentage of pages in a zone that must be unmapped for node_reclaim to
4098 * occur.
4099 */
4100int sysctl_min_unmapped_ratio = 1;
4101
4102/*
4103 * If the number of slab pages in a zone grows beyond this percentage then
4104 * slab reclaim needs to occur.
4105 */
4106int sysctl_min_slab_ratio = 5;
4107
4108static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat)
4109{
4110 unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED);
4111 unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) +
4112 node_page_state(pgdat, NR_ACTIVE_FILE);
4113
4114 /*
4115 * It's possible for there to be more file mapped pages than
4116 * accounted for by the pages on the file LRU lists because
4117 * tmpfs pages accounted for as ANON can also be FILE_MAPPED
4118 */
4119 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
4120}
4121
4122/* Work out how many page cache pages we can reclaim in this reclaim_mode */
4123static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat)
4124{
4125 unsigned long nr_pagecache_reclaimable;
4126 unsigned long delta = 0;
4127
4128 /*
4129 * If RECLAIM_UNMAP is set, then all file pages are considered
4130 * potentially reclaimable. Otherwise, we have to worry about
4131 * pages like swapcache and node_unmapped_file_pages() provides
4132 * a better estimate
4133 */
4134 if (node_reclaim_mode & RECLAIM_UNMAP)
4135 nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES);
4136 else
4137 nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat);
4138
4139 /* If we can't clean pages, remove dirty pages from consideration */
4140 if (!(node_reclaim_mode & RECLAIM_WRITE))
4141 delta += node_page_state(pgdat, NR_FILE_DIRTY);
4142
4143 /* Watch for any possible underflows due to delta */
4144 if (unlikely(delta > nr_pagecache_reclaimable))
4145 delta = nr_pagecache_reclaimable;
4146
4147 return nr_pagecache_reclaimable - delta;
4148}
4149
4150/*
4151 * Try to free up some pages from this node through reclaim.
4152 */
4153static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
4154{
4155 /* Minimum pages needed in order to stay on node */
4156 const unsigned long nr_pages = 1 << order;
4157 struct task_struct *p = current;
4158 unsigned int noreclaim_flag;
4159 struct scan_control sc = {
4160 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
4161 .gfp_mask = current_gfp_context(gfp_mask),
4162 .order = order,
4163 .priority = NODE_RECLAIM_PRIORITY,
4164 .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
4165 .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
4166 .may_swap = 1,
4167 .reclaim_idx = gfp_zone(gfp_mask),
4168 };
4169
4170 trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,
4171 sc.gfp_mask);
4172
4173 cond_resched();
4174 fs_reclaim_acquire(sc.gfp_mask);
4175 /*
4176 * We need to be able to allocate from the reserves for RECLAIM_UNMAP
4177 * and we also need to be able to write out pages for RECLAIM_WRITE
4178 * and RECLAIM_UNMAP.
4179 */
4180 noreclaim_flag = memalloc_noreclaim_save();
4181 p->flags |= PF_SWAPWRITE;
4182 set_task_reclaim_state(p, &sc.reclaim_state);
4183
4184 if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) {
4185 /*
4186 * Free memory by calling shrink node with increasing
4187 * priorities until we have enough memory freed.
4188 */
4189 do {
4190 shrink_node(pgdat, &sc);
4191 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
4192 }
4193
4194 set_task_reclaim_state(p, NULL);
4195 current->flags &= ~PF_SWAPWRITE;
4196 memalloc_noreclaim_restore(noreclaim_flag);
4197 fs_reclaim_release(sc.gfp_mask);
4198
4199 trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed);
4200
4201 return sc.nr_reclaimed >= nr_pages;
4202}
4203
4204int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
4205{
4206 int ret;
4207
4208 /*
4209 * Node reclaim reclaims unmapped file backed pages and
4210 * slab pages if we are over the defined limits.
4211 *
4212 * A small portion of unmapped file backed pages is needed for
4213 * file I/O otherwise pages read by file I/O will be immediately
4214 * thrown out if the node is overallocated. So we do not reclaim
4215 * if less than a specified percentage of the node is used by
4216 * unmapped file backed pages.
4217 */
4218 if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
4219 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) <=
4220 pgdat->min_slab_pages)
4221 return NODE_RECLAIM_FULL;
4222
4223 /*
4224 * Do not scan if the allocation should not be delayed.
4225 */
4226 if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC))
4227 return NODE_RECLAIM_NOSCAN;
4228
4229 /*
4230 * Only run node reclaim on the local node or on nodes that do not
4231 * have associated processors. This will favor the local processor
4232 * over remote processors and spread off node memory allocations
4233 * as wide as possible.
4234 */
4235 if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id())
4236 return NODE_RECLAIM_NOSCAN;
4237
4238 if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
4239 return NODE_RECLAIM_NOSCAN;
4240
4241 ret = __node_reclaim(pgdat, gfp_mask, order);
4242 clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
4243
4244 if (!ret)
4245 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
4246
4247 return ret;
4248}
4249#endif
4250
4251/**
4252 * check_move_unevictable_pages - check pages for evictability and move to
4253 * appropriate zone lru list
4254 * @pvec: pagevec with lru pages to check
4255 *
4256 * Checks pages for evictability, if an evictable page is in the unevictable
4257 * lru list, moves it to the appropriate evictable lru list. This function
4258 * should be only used for lru pages.
4259 */
4260void check_move_unevictable_pages(struct pagevec *pvec)
4261{
4262 struct lruvec *lruvec;
4263 struct pglist_data *pgdat = NULL;
4264 int pgscanned = 0;
4265 int pgrescued = 0;
4266 int i;
4267
4268 for (i = 0; i < pvec->nr; i++) {
4269 struct page *page = pvec->pages[i];
4270 struct pglist_data *pagepgdat = page_pgdat(page);
4271 int nr_pages;
4272
4273 if (PageTransTail(page))
4274 continue;
4275
4276 nr_pages = thp_nr_pages(page);
4277 pgscanned += nr_pages;
4278
4279 if (pagepgdat != pgdat) {
4280 if (pgdat)
4281 spin_unlock_irq(&pgdat->lru_lock);
4282 pgdat = pagepgdat;
4283 spin_lock_irq(&pgdat->lru_lock);
4284 }
4285 lruvec = mem_cgroup_page_lruvec(page, pgdat);
4286
4287 if (!PageLRU(page) || !PageUnevictable(page))
4288 continue;
4289
4290 if (page_evictable(page)) {
4291 enum lru_list lru = page_lru_base_type(page);
4292
4293 VM_BUG_ON_PAGE(PageActive(page), page);
4294 ClearPageUnevictable(page);
4295 del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
4296 add_page_to_lru_list(page, lruvec, lru);
4297 pgrescued += nr_pages;
4298 }
4299 }
4300
4301 if (pgdat) {
4302 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
4303 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
4304 spin_unlock_irq(&pgdat->lru_lock);
4305 }
4306}
4307EXPORT_SYMBOL_GPL(check_move_unevictable_pages);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/mm/vmscan.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 *
7 * Swap reorganised 29.12.95, Stephen Tweedie.
8 * kswapd added: 7.1.96 sct
9 * Removed kswapd_ctl limits, and swap out as many pages as needed
10 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
11 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
12 * Multiqueue VM started 5.8.00, Rik van Riel.
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/mm.h>
18#include <linux/sched/mm.h>
19#include <linux/module.h>
20#include <linux/gfp.h>
21#include <linux/kernel_stat.h>
22#include <linux/swap.h>
23#include <linux/pagemap.h>
24#include <linux/init.h>
25#include <linux/highmem.h>
26#include <linux/vmpressure.h>
27#include <linux/vmstat.h>
28#include <linux/file.h>
29#include <linux/writeback.h>
30#include <linux/blkdev.h>
31#include <linux/buffer_head.h> /* for try_to_release_page(),
32 buffer_heads_over_limit */
33#include <linux/mm_inline.h>
34#include <linux/backing-dev.h>
35#include <linux/rmap.h>
36#include <linux/topology.h>
37#include <linux/cpu.h>
38#include <linux/cpuset.h>
39#include <linux/compaction.h>
40#include <linux/notifier.h>
41#include <linux/rwsem.h>
42#include <linux/delay.h>
43#include <linux/kthread.h>
44#include <linux/freezer.h>
45#include <linux/memcontrol.h>
46#include <linux/delayacct.h>
47#include <linux/sysctl.h>
48#include <linux/oom.h>
49#include <linux/pagevec.h>
50#include <linux/prefetch.h>
51#include <linux/printk.h>
52#include <linux/dax.h>
53#include <linux/psi.h>
54
55#include <asm/tlbflush.h>
56#include <asm/div64.h>
57
58#include <linux/swapops.h>
59#include <linux/balloon_compaction.h>
60
61#include "internal.h"
62
63#define CREATE_TRACE_POINTS
64#include <trace/events/vmscan.h>
65
66struct scan_control {
67 /* How many pages shrink_list() should reclaim */
68 unsigned long nr_to_reclaim;
69
70 /*
71 * Nodemask of nodes allowed by the caller. If NULL, all nodes
72 * are scanned.
73 */
74 nodemask_t *nodemask;
75
76 /*
77 * The memory cgroup that hit its limit and as a result is the
78 * primary target of this reclaim invocation.
79 */
80 struct mem_cgroup *target_mem_cgroup;
81
82 /* Writepage batching in laptop mode; RECLAIM_WRITE */
83 unsigned int may_writepage:1;
84
85 /* Can mapped pages be reclaimed? */
86 unsigned int may_unmap:1;
87
88 /* Can pages be swapped as part of reclaim? */
89 unsigned int may_swap:1;
90
91 /*
92 * Cgroups are not reclaimed below their configured memory.low,
93 * unless we threaten to OOM. If any cgroups are skipped due to
94 * memory.low and nothing was reclaimed, go back for memory.low.
95 */
96 unsigned int memcg_low_reclaim:1;
97 unsigned int memcg_low_skipped:1;
98
99 unsigned int hibernation_mode:1;
100
101 /* One of the zones is ready for compaction */
102 unsigned int compaction_ready:1;
103
104 /* Allocation order */
105 s8 order;
106
107 /* Scan (total_size >> priority) pages at once */
108 s8 priority;
109
110 /* The highest zone to isolate pages for reclaim from */
111 s8 reclaim_idx;
112
113 /* This context's GFP mask */
114 gfp_t gfp_mask;
115
116 /* Incremented by the number of inactive pages that were scanned */
117 unsigned long nr_scanned;
118
119 /* Number of pages freed so far during a call to shrink_zones() */
120 unsigned long nr_reclaimed;
121
122 struct {
123 unsigned int dirty;
124 unsigned int unqueued_dirty;
125 unsigned int congested;
126 unsigned int writeback;
127 unsigned int immediate;
128 unsigned int file_taken;
129 unsigned int taken;
130 } nr;
131
132 /* for recording the reclaimed slab by now */
133 struct reclaim_state reclaim_state;
134};
135
136#ifdef ARCH_HAS_PREFETCH
137#define prefetch_prev_lru_page(_page, _base, _field) \
138 do { \
139 if ((_page)->lru.prev != _base) { \
140 struct page *prev; \
141 \
142 prev = lru_to_page(&(_page->lru)); \
143 prefetch(&prev->_field); \
144 } \
145 } while (0)
146#else
147#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
148#endif
149
150#ifdef ARCH_HAS_PREFETCHW
151#define prefetchw_prev_lru_page(_page, _base, _field) \
152 do { \
153 if ((_page)->lru.prev != _base) { \
154 struct page *prev; \
155 \
156 prev = lru_to_page(&(_page->lru)); \
157 prefetchw(&prev->_field); \
158 } \
159 } while (0)
160#else
161#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
162#endif
163
164/*
165 * From 0 .. 100. Higher means more swappy.
166 */
167int vm_swappiness = 60;
168/*
169 * The total number of pages which are beyond the high watermark within all
170 * zones.
171 */
172unsigned long vm_total_pages;
173
174static void set_task_reclaim_state(struct task_struct *task,
175 struct reclaim_state *rs)
176{
177 /* Check for an overwrite */
178 WARN_ON_ONCE(rs && task->reclaim_state);
179
180 /* Check for the nulling of an already-nulled member */
181 WARN_ON_ONCE(!rs && !task->reclaim_state);
182
183 task->reclaim_state = rs;
184}
185
186static LIST_HEAD(shrinker_list);
187static DECLARE_RWSEM(shrinker_rwsem);
188
189#ifdef CONFIG_MEMCG
190/*
191 * We allow subsystems to populate their shrinker-related
192 * LRU lists before register_shrinker_prepared() is called
193 * for the shrinker, since we don't want to impose
194 * restrictions on their internal registration order.
195 * In this case shrink_slab_memcg() may find corresponding
196 * bit is set in the shrinkers map.
197 *
198 * This value is used by the function to detect registering
199 * shrinkers and to skip do_shrink_slab() calls for them.
200 */
201#define SHRINKER_REGISTERING ((struct shrinker *)~0UL)
202
203static DEFINE_IDR(shrinker_idr);
204static int shrinker_nr_max;
205
206static int prealloc_memcg_shrinker(struct shrinker *shrinker)
207{
208 int id, ret = -ENOMEM;
209
210 down_write(&shrinker_rwsem);
211 /* This may call shrinker, so it must use down_read_trylock() */
212 id = idr_alloc(&shrinker_idr, SHRINKER_REGISTERING, 0, 0, GFP_KERNEL);
213 if (id < 0)
214 goto unlock;
215
216 if (id >= shrinker_nr_max) {
217 if (memcg_expand_shrinker_maps(id)) {
218 idr_remove(&shrinker_idr, id);
219 goto unlock;
220 }
221
222 shrinker_nr_max = id + 1;
223 }
224 shrinker->id = id;
225 ret = 0;
226unlock:
227 up_write(&shrinker_rwsem);
228 return ret;
229}
230
231static void unregister_memcg_shrinker(struct shrinker *shrinker)
232{
233 int id = shrinker->id;
234
235 BUG_ON(id < 0);
236
237 down_write(&shrinker_rwsem);
238 idr_remove(&shrinker_idr, id);
239 up_write(&shrinker_rwsem);
240}
241
242static bool global_reclaim(struct scan_control *sc)
243{
244 return !sc->target_mem_cgroup;
245}
246
247/**
248 * sane_reclaim - is the usual dirty throttling mechanism operational?
249 * @sc: scan_control in question
250 *
251 * The normal page dirty throttling mechanism in balance_dirty_pages() is
252 * completely broken with the legacy memcg and direct stalling in
253 * shrink_page_list() is used for throttling instead, which lacks all the
254 * niceties such as fairness, adaptive pausing, bandwidth proportional
255 * allocation and configurability.
256 *
257 * This function tests whether the vmscan currently in progress can assume
258 * that the normal dirty throttling mechanism is operational.
259 */
260static bool sane_reclaim(struct scan_control *sc)
261{
262 struct mem_cgroup *memcg = sc->target_mem_cgroup;
263
264 if (!memcg)
265 return true;
266#ifdef CONFIG_CGROUP_WRITEBACK
267 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
268 return true;
269#endif
270 return false;
271}
272
273static void set_memcg_congestion(pg_data_t *pgdat,
274 struct mem_cgroup *memcg,
275 bool congested)
276{
277 struct mem_cgroup_per_node *mn;
278
279 if (!memcg)
280 return;
281
282 mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
283 WRITE_ONCE(mn->congested, congested);
284}
285
286static bool memcg_congested(pg_data_t *pgdat,
287 struct mem_cgroup *memcg)
288{
289 struct mem_cgroup_per_node *mn;
290
291 mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
292 return READ_ONCE(mn->congested);
293
294}
295#else
296static int prealloc_memcg_shrinker(struct shrinker *shrinker)
297{
298 return 0;
299}
300
301static void unregister_memcg_shrinker(struct shrinker *shrinker)
302{
303}
304
305static bool global_reclaim(struct scan_control *sc)
306{
307 return true;
308}
309
310static bool sane_reclaim(struct scan_control *sc)
311{
312 return true;
313}
314
315static inline void set_memcg_congestion(struct pglist_data *pgdat,
316 struct mem_cgroup *memcg, bool congested)
317{
318}
319
320static inline bool memcg_congested(struct pglist_data *pgdat,
321 struct mem_cgroup *memcg)
322{
323 return false;
324
325}
326#endif
327
328/*
329 * This misses isolated pages which are not accounted for to save counters.
330 * As the data only determines if reclaim or compaction continues, it is
331 * not expected that isolated pages will be a dominating factor.
332 */
333unsigned long zone_reclaimable_pages(struct zone *zone)
334{
335 unsigned long nr;
336
337 nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) +
338 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE);
339 if (get_nr_swap_pages() > 0)
340 nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
341 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
342
343 return nr;
344}
345
346/**
347 * lruvec_lru_size - Returns the number of pages on the given LRU list.
348 * @lruvec: lru vector
349 * @lru: lru to use
350 * @zone_idx: zones to consider (use MAX_NR_ZONES for the whole LRU list)
351 */
352unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx)
353{
354 unsigned long lru_size = 0;
355 int zid;
356
357 if (!mem_cgroup_disabled()) {
358 for (zid = 0; zid < MAX_NR_ZONES; zid++)
359 lru_size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
360 } else
361 lru_size = node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
362
363 for (zid = zone_idx + 1; zid < MAX_NR_ZONES; zid++) {
364 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
365 unsigned long size;
366
367 if (!managed_zone(zone))
368 continue;
369
370 if (!mem_cgroup_disabled())
371 size = mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
372 else
373 size = zone_page_state(&lruvec_pgdat(lruvec)->node_zones[zid],
374 NR_ZONE_LRU_BASE + lru);
375 lru_size -= min(size, lru_size);
376 }
377
378 return lru_size;
379
380}
381
382/*
383 * Add a shrinker callback to be called from the vm.
384 */
385int prealloc_shrinker(struct shrinker *shrinker)
386{
387 unsigned int size = sizeof(*shrinker->nr_deferred);
388
389 if (shrinker->flags & SHRINKER_NUMA_AWARE)
390 size *= nr_node_ids;
391
392 shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
393 if (!shrinker->nr_deferred)
394 return -ENOMEM;
395
396 if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
397 if (prealloc_memcg_shrinker(shrinker))
398 goto free_deferred;
399 }
400
401 return 0;
402
403free_deferred:
404 kfree(shrinker->nr_deferred);
405 shrinker->nr_deferred = NULL;
406 return -ENOMEM;
407}
408
409void free_prealloced_shrinker(struct shrinker *shrinker)
410{
411 if (!shrinker->nr_deferred)
412 return;
413
414 if (shrinker->flags & SHRINKER_MEMCG_AWARE)
415 unregister_memcg_shrinker(shrinker);
416
417 kfree(shrinker->nr_deferred);
418 shrinker->nr_deferred = NULL;
419}
420
421void register_shrinker_prepared(struct shrinker *shrinker)
422{
423 down_write(&shrinker_rwsem);
424 list_add_tail(&shrinker->list, &shrinker_list);
425#ifdef CONFIG_MEMCG_KMEM
426 if (shrinker->flags & SHRINKER_MEMCG_AWARE)
427 idr_replace(&shrinker_idr, shrinker, shrinker->id);
428#endif
429 up_write(&shrinker_rwsem);
430}
431
432int register_shrinker(struct shrinker *shrinker)
433{
434 int err = prealloc_shrinker(shrinker);
435
436 if (err)
437 return err;
438 register_shrinker_prepared(shrinker);
439 return 0;
440}
441EXPORT_SYMBOL(register_shrinker);
442
443/*
444 * Remove one
445 */
446void unregister_shrinker(struct shrinker *shrinker)
447{
448 if (!shrinker->nr_deferred)
449 return;
450 if (shrinker->flags & SHRINKER_MEMCG_AWARE)
451 unregister_memcg_shrinker(shrinker);
452 down_write(&shrinker_rwsem);
453 list_del(&shrinker->list);
454 up_write(&shrinker_rwsem);
455 kfree(shrinker->nr_deferred);
456 shrinker->nr_deferred = NULL;
457}
458EXPORT_SYMBOL(unregister_shrinker);
459
460#define SHRINK_BATCH 128
461
462static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
463 struct shrinker *shrinker, int priority)
464{
465 unsigned long freed = 0;
466 unsigned long long delta;
467 long total_scan;
468 long freeable;
469 long nr;
470 long new_nr;
471 int nid = shrinkctl->nid;
472 long batch_size = shrinker->batch ? shrinker->batch
473 : SHRINK_BATCH;
474 long scanned = 0, next_deferred;
475
476 if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
477 nid = 0;
478
479 freeable = shrinker->count_objects(shrinker, shrinkctl);
480 if (freeable == 0 || freeable == SHRINK_EMPTY)
481 return freeable;
482
483 /*
484 * copy the current shrinker scan count into a local variable
485 * and zero it so that other concurrent shrinker invocations
486 * don't also do this scanning work.
487 */
488 nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
489
490 total_scan = nr;
491 if (shrinker->seeks) {
492 delta = freeable >> priority;
493 delta *= 4;
494 do_div(delta, shrinker->seeks);
495 } else {
496 /*
497 * These objects don't require any IO to create. Trim
498 * them aggressively under memory pressure to keep
499 * them from causing refetches in the IO caches.
500 */
501 delta = freeable / 2;
502 }
503
504 total_scan += delta;
505 if (total_scan < 0) {
506 pr_err("shrink_slab: %pS negative objects to delete nr=%ld\n",
507 shrinker->scan_objects, total_scan);
508 total_scan = freeable;
509 next_deferred = nr;
510 } else
511 next_deferred = total_scan;
512
513 /*
514 * We need to avoid excessive windup on filesystem shrinkers
515 * due to large numbers of GFP_NOFS allocations causing the
516 * shrinkers to return -1 all the time. This results in a large
517 * nr being built up so when a shrink that can do some work
518 * comes along it empties the entire cache due to nr >>>
519 * freeable. This is bad for sustaining a working set in
520 * memory.
521 *
522 * Hence only allow the shrinker to scan the entire cache when
523 * a large delta change is calculated directly.
524 */
525 if (delta < freeable / 4)
526 total_scan = min(total_scan, freeable / 2);
527
528 /*
529 * Avoid risking looping forever due to too large nr value:
530 * never try to free more than twice the estimate number of
531 * freeable entries.
532 */
533 if (total_scan > freeable * 2)
534 total_scan = freeable * 2;
535
536 trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
537 freeable, delta, total_scan, priority);
538
539 /*
540 * Normally, we should not scan less than batch_size objects in one
541 * pass to avoid too frequent shrinker calls, but if the slab has less
542 * than batch_size objects in total and we are really tight on memory,
543 * we will try to reclaim all available objects, otherwise we can end
544 * up failing allocations although there are plenty of reclaimable
545 * objects spread over several slabs with usage less than the
546 * batch_size.
547 *
548 * We detect the "tight on memory" situations by looking at the total
549 * number of objects we want to scan (total_scan). If it is greater
550 * than the total number of objects on slab (freeable), we must be
551 * scanning at high prio and therefore should try to reclaim as much as
552 * possible.
553 */
554 while (total_scan >= batch_size ||
555 total_scan >= freeable) {
556 unsigned long ret;
557 unsigned long nr_to_scan = min(batch_size, total_scan);
558
559 shrinkctl->nr_to_scan = nr_to_scan;
560 shrinkctl->nr_scanned = nr_to_scan;
561 ret = shrinker->scan_objects(shrinker, shrinkctl);
562 if (ret == SHRINK_STOP)
563 break;
564 freed += ret;
565
566 count_vm_events(SLABS_SCANNED, shrinkctl->nr_scanned);
567 total_scan -= shrinkctl->nr_scanned;
568 scanned += shrinkctl->nr_scanned;
569
570 cond_resched();
571 }
572
573 if (next_deferred >= scanned)
574 next_deferred -= scanned;
575 else
576 next_deferred = 0;
577 /*
578 * move the unused scan count back into the shrinker in a
579 * manner that handles concurrent updates. If we exhausted the
580 * scan, there is no need to do an update.
581 */
582 if (next_deferred > 0)
583 new_nr = atomic_long_add_return(next_deferred,
584 &shrinker->nr_deferred[nid]);
585 else
586 new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
587
588 trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan);
589 return freed;
590}
591
592#ifdef CONFIG_MEMCG
593static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
594 struct mem_cgroup *memcg, int priority)
595{
596 struct memcg_shrinker_map *map;
597 unsigned long ret, freed = 0;
598 int i;
599
600 if (!mem_cgroup_online(memcg))
601 return 0;
602
603 if (!down_read_trylock(&shrinker_rwsem))
604 return 0;
605
606 map = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_map,
607 true);
608 if (unlikely(!map))
609 goto unlock;
610
611 for_each_set_bit(i, map->map, shrinker_nr_max) {
612 struct shrink_control sc = {
613 .gfp_mask = gfp_mask,
614 .nid = nid,
615 .memcg = memcg,
616 };
617 struct shrinker *shrinker;
618
619 shrinker = idr_find(&shrinker_idr, i);
620 if (unlikely(!shrinker || shrinker == SHRINKER_REGISTERING)) {
621 if (!shrinker)
622 clear_bit(i, map->map);
623 continue;
624 }
625
626 /* Call non-slab shrinkers even though kmem is disabled */
627 if (!memcg_kmem_enabled() &&
628 !(shrinker->flags & SHRINKER_NONSLAB))
629 continue;
630
631 ret = do_shrink_slab(&sc, shrinker, priority);
632 if (ret == SHRINK_EMPTY) {
633 clear_bit(i, map->map);
634 /*
635 * After the shrinker reported that it had no objects to
636 * free, but before we cleared the corresponding bit in
637 * the memcg shrinker map, a new object might have been
638 * added. To make sure, we have the bit set in this
639 * case, we invoke the shrinker one more time and reset
640 * the bit if it reports that it is not empty anymore.
641 * The memory barrier here pairs with the barrier in
642 * memcg_set_shrinker_bit():
643 *
644 * list_lru_add() shrink_slab_memcg()
645 * list_add_tail() clear_bit()
646 * <MB> <MB>
647 * set_bit() do_shrink_slab()
648 */
649 smp_mb__after_atomic();
650 ret = do_shrink_slab(&sc, shrinker, priority);
651 if (ret == SHRINK_EMPTY)
652 ret = 0;
653 else
654 memcg_set_shrinker_bit(memcg, nid, i);
655 }
656 freed += ret;
657
658 if (rwsem_is_contended(&shrinker_rwsem)) {
659 freed = freed ? : 1;
660 break;
661 }
662 }
663unlock:
664 up_read(&shrinker_rwsem);
665 return freed;
666}
667#else /* CONFIG_MEMCG */
668static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
669 struct mem_cgroup *memcg, int priority)
670{
671 return 0;
672}
673#endif /* CONFIG_MEMCG */
674
675/**
676 * shrink_slab - shrink slab caches
677 * @gfp_mask: allocation context
678 * @nid: node whose slab caches to target
679 * @memcg: memory cgroup whose slab caches to target
680 * @priority: the reclaim priority
681 *
682 * Call the shrink functions to age shrinkable caches.
683 *
684 * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set,
685 * unaware shrinkers will receive a node id of 0 instead.
686 *
687 * @memcg specifies the memory cgroup to target. Unaware shrinkers
688 * are called only if it is the root cgroup.
689 *
690 * @priority is sc->priority, we take the number of objects and >> by priority
691 * in order to get the scan target.
692 *
693 * Returns the number of reclaimed slab objects.
694 */
695static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
696 struct mem_cgroup *memcg,
697 int priority)
698{
699 unsigned long ret, freed = 0;
700 struct shrinker *shrinker;
701
702 /*
703 * The root memcg might be allocated even though memcg is disabled
704 * via "cgroup_disable=memory" boot parameter. This could make
705 * mem_cgroup_is_root() return false, then just run memcg slab
706 * shrink, but skip global shrink. This may result in premature
707 * oom.
708 */
709 if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
710 return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
711
712 if (!down_read_trylock(&shrinker_rwsem))
713 goto out;
714
715 list_for_each_entry(shrinker, &shrinker_list, list) {
716 struct shrink_control sc = {
717 .gfp_mask = gfp_mask,
718 .nid = nid,
719 .memcg = memcg,
720 };
721
722 ret = do_shrink_slab(&sc, shrinker, priority);
723 if (ret == SHRINK_EMPTY)
724 ret = 0;
725 freed += ret;
726 /*
727 * Bail out if someone want to register a new shrinker to
728 * prevent the regsitration from being stalled for long periods
729 * by parallel ongoing shrinking.
730 */
731 if (rwsem_is_contended(&shrinker_rwsem)) {
732 freed = freed ? : 1;
733 break;
734 }
735 }
736
737 up_read(&shrinker_rwsem);
738out:
739 cond_resched();
740 return freed;
741}
742
743void drop_slab_node(int nid)
744{
745 unsigned long freed;
746
747 do {
748 struct mem_cgroup *memcg = NULL;
749
750 freed = 0;
751 memcg = mem_cgroup_iter(NULL, NULL, NULL);
752 do {
753 freed += shrink_slab(GFP_KERNEL, nid, memcg, 0);
754 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
755 } while (freed > 10);
756}
757
758void drop_slab(void)
759{
760 int nid;
761
762 for_each_online_node(nid)
763 drop_slab_node(nid);
764}
765
766static inline int is_page_cache_freeable(struct page *page)
767{
768 /*
769 * A freeable page cache page is referenced only by the caller
770 * that isolated the page, the page cache and optional buffer
771 * heads at page->private.
772 */
773 int page_cache_pins = PageTransHuge(page) && PageSwapCache(page) ?
774 HPAGE_PMD_NR : 1;
775 return page_count(page) - page_has_private(page) == 1 + page_cache_pins;
776}
777
778static int may_write_to_inode(struct inode *inode, struct scan_control *sc)
779{
780 if (current->flags & PF_SWAPWRITE)
781 return 1;
782 if (!inode_write_congested(inode))
783 return 1;
784 if (inode_to_bdi(inode) == current->backing_dev_info)
785 return 1;
786 return 0;
787}
788
789/*
790 * We detected a synchronous write error writing a page out. Probably
791 * -ENOSPC. We need to propagate that into the address_space for a subsequent
792 * fsync(), msync() or close().
793 *
794 * The tricky part is that after writepage we cannot touch the mapping: nothing
795 * prevents it from being freed up. But we have a ref on the page and once
796 * that page is locked, the mapping is pinned.
797 *
798 * We're allowed to run sleeping lock_page() here because we know the caller has
799 * __GFP_FS.
800 */
801static void handle_write_error(struct address_space *mapping,
802 struct page *page, int error)
803{
804 lock_page(page);
805 if (page_mapping(page) == mapping)
806 mapping_set_error(mapping, error);
807 unlock_page(page);
808}
809
810/* possible outcome of pageout() */
811typedef enum {
812 /* failed to write page out, page is locked */
813 PAGE_KEEP,
814 /* move page to the active list, page is locked */
815 PAGE_ACTIVATE,
816 /* page has been sent to the disk successfully, page is unlocked */
817 PAGE_SUCCESS,
818 /* page is clean and locked */
819 PAGE_CLEAN,
820} pageout_t;
821
822/*
823 * pageout is called by shrink_page_list() for each dirty page.
824 * Calls ->writepage().
825 */
826static pageout_t pageout(struct page *page, struct address_space *mapping,
827 struct scan_control *sc)
828{
829 /*
830 * If the page is dirty, only perform writeback if that write
831 * will be non-blocking. To prevent this allocation from being
832 * stalled by pagecache activity. But note that there may be
833 * stalls if we need to run get_block(). We could test
834 * PagePrivate for that.
835 *
836 * If this process is currently in __generic_file_write_iter() against
837 * this page's queue, we can perform writeback even if that
838 * will block.
839 *
840 * If the page is swapcache, write it back even if that would
841 * block, for some throttling. This happens by accident, because
842 * swap_backing_dev_info is bust: it doesn't reflect the
843 * congestion state of the swapdevs. Easy to fix, if needed.
844 */
845 if (!is_page_cache_freeable(page))
846 return PAGE_KEEP;
847 if (!mapping) {
848 /*
849 * Some data journaling orphaned pages can have
850 * page->mapping == NULL while being dirty with clean buffers.
851 */
852 if (page_has_private(page)) {
853 if (try_to_free_buffers(page)) {
854 ClearPageDirty(page);
855 pr_info("%s: orphaned page\n", __func__);
856 return PAGE_CLEAN;
857 }
858 }
859 return PAGE_KEEP;
860 }
861 if (mapping->a_ops->writepage == NULL)
862 return PAGE_ACTIVATE;
863 if (!may_write_to_inode(mapping->host, sc))
864 return PAGE_KEEP;
865
866 if (clear_page_dirty_for_io(page)) {
867 int res;
868 struct writeback_control wbc = {
869 .sync_mode = WB_SYNC_NONE,
870 .nr_to_write = SWAP_CLUSTER_MAX,
871 .range_start = 0,
872 .range_end = LLONG_MAX,
873 .for_reclaim = 1,
874 };
875
876 SetPageReclaim(page);
877 res = mapping->a_ops->writepage(page, &wbc);
878 if (res < 0)
879 handle_write_error(mapping, page, res);
880 if (res == AOP_WRITEPAGE_ACTIVATE) {
881 ClearPageReclaim(page);
882 return PAGE_ACTIVATE;
883 }
884
885 if (!PageWriteback(page)) {
886 /* synchronous write or broken a_ops? */
887 ClearPageReclaim(page);
888 }
889 trace_mm_vmscan_writepage(page);
890 inc_node_page_state(page, NR_VMSCAN_WRITE);
891 return PAGE_SUCCESS;
892 }
893
894 return PAGE_CLEAN;
895}
896
897/*
898 * Same as remove_mapping, but if the page is removed from the mapping, it
899 * gets returned with a refcount of 0.
900 */
901static int __remove_mapping(struct address_space *mapping, struct page *page,
902 bool reclaimed)
903{
904 unsigned long flags;
905 int refcount;
906
907 BUG_ON(!PageLocked(page));
908 BUG_ON(mapping != page_mapping(page));
909
910 xa_lock_irqsave(&mapping->i_pages, flags);
911 /*
912 * The non racy check for a busy page.
913 *
914 * Must be careful with the order of the tests. When someone has
915 * a ref to the page, it may be possible that they dirty it then
916 * drop the reference. So if PageDirty is tested before page_count
917 * here, then the following race may occur:
918 *
919 * get_user_pages(&page);
920 * [user mapping goes away]
921 * write_to(page);
922 * !PageDirty(page) [good]
923 * SetPageDirty(page);
924 * put_page(page);
925 * !page_count(page) [good, discard it]
926 *
927 * [oops, our write_to data is lost]
928 *
929 * Reversing the order of the tests ensures such a situation cannot
930 * escape unnoticed. The smp_rmb is needed to ensure the page->flags
931 * load is not satisfied before that of page->_refcount.
932 *
933 * Note that if SetPageDirty is always performed via set_page_dirty,
934 * and thus under the i_pages lock, then this ordering is not required.
935 */
936 refcount = 1 + compound_nr(page);
937 if (!page_ref_freeze(page, refcount))
938 goto cannot_free;
939 /* note: atomic_cmpxchg in page_ref_freeze provides the smp_rmb */
940 if (unlikely(PageDirty(page))) {
941 page_ref_unfreeze(page, refcount);
942 goto cannot_free;
943 }
944
945 if (PageSwapCache(page)) {
946 swp_entry_t swap = { .val = page_private(page) };
947 mem_cgroup_swapout(page, swap);
948 __delete_from_swap_cache(page, swap);
949 xa_unlock_irqrestore(&mapping->i_pages, flags);
950 put_swap_page(page, swap);
951 } else {
952 void (*freepage)(struct page *);
953 void *shadow = NULL;
954
955 freepage = mapping->a_ops->freepage;
956 /*
957 * Remember a shadow entry for reclaimed file cache in
958 * order to detect refaults, thus thrashing, later on.
959 *
960 * But don't store shadows in an address space that is
961 * already exiting. This is not just an optizimation,
962 * inode reclaim needs to empty out the radix tree or
963 * the nodes are lost. Don't plant shadows behind its
964 * back.
965 *
966 * We also don't store shadows for DAX mappings because the
967 * only page cache pages found in these are zero pages
968 * covering holes, and because we don't want to mix DAX
969 * exceptional entries and shadow exceptional entries in the
970 * same address_space.
971 */
972 if (reclaimed && page_is_file_cache(page) &&
973 !mapping_exiting(mapping) && !dax_mapping(mapping))
974 shadow = workingset_eviction(page);
975 __delete_from_page_cache(page, shadow);
976 xa_unlock_irqrestore(&mapping->i_pages, flags);
977
978 if (freepage != NULL)
979 freepage(page);
980 }
981
982 return 1;
983
984cannot_free:
985 xa_unlock_irqrestore(&mapping->i_pages, flags);
986 return 0;
987}
988
989/*
990 * Attempt to detach a locked page from its ->mapping. If it is dirty or if
991 * someone else has a ref on the page, abort and return 0. If it was
992 * successfully detached, return 1. Assumes the caller has a single ref on
993 * this page.
994 */
995int remove_mapping(struct address_space *mapping, struct page *page)
996{
997 if (__remove_mapping(mapping, page, false)) {
998 /*
999 * Unfreezing the refcount with 1 rather than 2 effectively
1000 * drops the pagecache ref for us without requiring another
1001 * atomic operation.
1002 */
1003 page_ref_unfreeze(page, 1);
1004 return 1;
1005 }
1006 return 0;
1007}
1008
1009/**
1010 * putback_lru_page - put previously isolated page onto appropriate LRU list
1011 * @page: page to be put back to appropriate lru list
1012 *
1013 * Add previously isolated @page to appropriate LRU list.
1014 * Page may still be unevictable for other reasons.
1015 *
1016 * lru_lock must not be held, interrupts must be enabled.
1017 */
1018void putback_lru_page(struct page *page)
1019{
1020 lru_cache_add(page);
1021 put_page(page); /* drop ref from isolate */
1022}
1023
1024enum page_references {
1025 PAGEREF_RECLAIM,
1026 PAGEREF_RECLAIM_CLEAN,
1027 PAGEREF_KEEP,
1028 PAGEREF_ACTIVATE,
1029};
1030
1031static enum page_references page_check_references(struct page *page,
1032 struct scan_control *sc)
1033{
1034 int referenced_ptes, referenced_page;
1035 unsigned long vm_flags;
1036
1037 referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
1038 &vm_flags);
1039 referenced_page = TestClearPageReferenced(page);
1040
1041 /*
1042 * Mlock lost the isolation race with us. Let try_to_unmap()
1043 * move the page to the unevictable list.
1044 */
1045 if (vm_flags & VM_LOCKED)
1046 return PAGEREF_RECLAIM;
1047
1048 if (referenced_ptes) {
1049 if (PageSwapBacked(page))
1050 return PAGEREF_ACTIVATE;
1051 /*
1052 * All mapped pages start out with page table
1053 * references from the instantiating fault, so we need
1054 * to look twice if a mapped file page is used more
1055 * than once.
1056 *
1057 * Mark it and spare it for another trip around the
1058 * inactive list. Another page table reference will
1059 * lead to its activation.
1060 *
1061 * Note: the mark is set for activated pages as well
1062 * so that recently deactivated but used pages are
1063 * quickly recovered.
1064 */
1065 SetPageReferenced(page);
1066
1067 if (referenced_page || referenced_ptes > 1)
1068 return PAGEREF_ACTIVATE;
1069
1070 /*
1071 * Activate file-backed executable pages after first usage.
1072 */
1073 if (vm_flags & VM_EXEC)
1074 return PAGEREF_ACTIVATE;
1075
1076 return PAGEREF_KEEP;
1077 }
1078
1079 /* Reclaim if clean, defer dirty pages to writeback */
1080 if (referenced_page && !PageSwapBacked(page))
1081 return PAGEREF_RECLAIM_CLEAN;
1082
1083 return PAGEREF_RECLAIM;
1084}
1085
1086/* Check if a page is dirty or under writeback */
1087static void page_check_dirty_writeback(struct page *page,
1088 bool *dirty, bool *writeback)
1089{
1090 struct address_space *mapping;
1091
1092 /*
1093 * Anonymous pages are not handled by flushers and must be written
1094 * from reclaim context. Do not stall reclaim based on them
1095 */
1096 if (!page_is_file_cache(page) ||
1097 (PageAnon(page) && !PageSwapBacked(page))) {
1098 *dirty = false;
1099 *writeback = false;
1100 return;
1101 }
1102
1103 /* By default assume that the page flags are accurate */
1104 *dirty = PageDirty(page);
1105 *writeback = PageWriteback(page);
1106
1107 /* Verify dirty/writeback state if the filesystem supports it */
1108 if (!page_has_private(page))
1109 return;
1110
1111 mapping = page_mapping(page);
1112 if (mapping && mapping->a_ops->is_dirty_writeback)
1113 mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
1114}
1115
1116/*
1117 * shrink_page_list() returns the number of reclaimed pages
1118 */
1119static unsigned long shrink_page_list(struct list_head *page_list,
1120 struct pglist_data *pgdat,
1121 struct scan_control *sc,
1122 enum ttu_flags ttu_flags,
1123 struct reclaim_stat *stat,
1124 bool ignore_references)
1125{
1126 LIST_HEAD(ret_pages);
1127 LIST_HEAD(free_pages);
1128 unsigned nr_reclaimed = 0;
1129 unsigned pgactivate = 0;
1130
1131 memset(stat, 0, sizeof(*stat));
1132 cond_resched();
1133
1134 while (!list_empty(page_list)) {
1135 struct address_space *mapping;
1136 struct page *page;
1137 int may_enter_fs;
1138 enum page_references references = PAGEREF_RECLAIM;
1139 bool dirty, writeback;
1140 unsigned int nr_pages;
1141
1142 cond_resched();
1143
1144 page = lru_to_page(page_list);
1145 list_del(&page->lru);
1146
1147 if (!trylock_page(page))
1148 goto keep;
1149
1150 VM_BUG_ON_PAGE(PageActive(page), page);
1151
1152 nr_pages = compound_nr(page);
1153
1154 /* Account the number of base pages even though THP */
1155 sc->nr_scanned += nr_pages;
1156
1157 if (unlikely(!page_evictable(page)))
1158 goto activate_locked;
1159
1160 if (!sc->may_unmap && page_mapped(page))
1161 goto keep_locked;
1162
1163 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
1164 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
1165
1166 /*
1167 * The number of dirty pages determines if a node is marked
1168 * reclaim_congested which affects wait_iff_congested. kswapd
1169 * will stall and start writing pages if the tail of the LRU
1170 * is all dirty unqueued pages.
1171 */
1172 page_check_dirty_writeback(page, &dirty, &writeback);
1173 if (dirty || writeback)
1174 stat->nr_dirty++;
1175
1176 if (dirty && !writeback)
1177 stat->nr_unqueued_dirty++;
1178
1179 /*
1180 * Treat this page as congested if the underlying BDI is or if
1181 * pages are cycling through the LRU so quickly that the
1182 * pages marked for immediate reclaim are making it to the
1183 * end of the LRU a second time.
1184 */
1185 mapping = page_mapping(page);
1186 if (((dirty || writeback) && mapping &&
1187 inode_write_congested(mapping->host)) ||
1188 (writeback && PageReclaim(page)))
1189 stat->nr_congested++;
1190
1191 /*
1192 * If a page at the tail of the LRU is under writeback, there
1193 * are three cases to consider.
1194 *
1195 * 1) If reclaim is encountering an excessive number of pages
1196 * under writeback and this page is both under writeback and
1197 * PageReclaim then it indicates that pages are being queued
1198 * for IO but are being recycled through the LRU before the
1199 * IO can complete. Waiting on the page itself risks an
1200 * indefinite stall if it is impossible to writeback the
1201 * page due to IO error or disconnected storage so instead
1202 * note that the LRU is being scanned too quickly and the
1203 * caller can stall after page list has been processed.
1204 *
1205 * 2) Global or new memcg reclaim encounters a page that is
1206 * not marked for immediate reclaim, or the caller does not
1207 * have __GFP_FS (or __GFP_IO if it's simply going to swap,
1208 * not to fs). In this case mark the page for immediate
1209 * reclaim and continue scanning.
1210 *
1211 * Require may_enter_fs because we would wait on fs, which
1212 * may not have submitted IO yet. And the loop driver might
1213 * enter reclaim, and deadlock if it waits on a page for
1214 * which it is needed to do the write (loop masks off
1215 * __GFP_IO|__GFP_FS for this reason); but more thought
1216 * would probably show more reasons.
1217 *
1218 * 3) Legacy memcg encounters a page that is already marked
1219 * PageReclaim. memcg does not have any dirty pages
1220 * throttling so we could easily OOM just because too many
1221 * pages are in writeback and there is nothing else to
1222 * reclaim. Wait for the writeback to complete.
1223 *
1224 * In cases 1) and 2) we activate the pages to get them out of
1225 * the way while we continue scanning for clean pages on the
1226 * inactive list and refilling from the active list. The
1227 * observation here is that waiting for disk writes is more
1228 * expensive than potentially causing reloads down the line.
1229 * Since they're marked for immediate reclaim, they won't put
1230 * memory pressure on the cache working set any longer than it
1231 * takes to write them to disk.
1232 */
1233 if (PageWriteback(page)) {
1234 /* Case 1 above */
1235 if (current_is_kswapd() &&
1236 PageReclaim(page) &&
1237 test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
1238 stat->nr_immediate++;
1239 goto activate_locked;
1240
1241 /* Case 2 above */
1242 } else if (sane_reclaim(sc) ||
1243 !PageReclaim(page) || !may_enter_fs) {
1244 /*
1245 * This is slightly racy - end_page_writeback()
1246 * might have just cleared PageReclaim, then
1247 * setting PageReclaim here end up interpreted
1248 * as PageReadahead - but that does not matter
1249 * enough to care. What we do want is for this
1250 * page to have PageReclaim set next time memcg
1251 * reclaim reaches the tests above, so it will
1252 * then wait_on_page_writeback() to avoid OOM;
1253 * and it's also appropriate in global reclaim.
1254 */
1255 SetPageReclaim(page);
1256 stat->nr_writeback++;
1257 goto activate_locked;
1258
1259 /* Case 3 above */
1260 } else {
1261 unlock_page(page);
1262 wait_on_page_writeback(page);
1263 /* then go back and try same page again */
1264 list_add_tail(&page->lru, page_list);
1265 continue;
1266 }
1267 }
1268
1269 if (!ignore_references)
1270 references = page_check_references(page, sc);
1271
1272 switch (references) {
1273 case PAGEREF_ACTIVATE:
1274 goto activate_locked;
1275 case PAGEREF_KEEP:
1276 stat->nr_ref_keep += nr_pages;
1277 goto keep_locked;
1278 case PAGEREF_RECLAIM:
1279 case PAGEREF_RECLAIM_CLEAN:
1280 ; /* try to reclaim the page below */
1281 }
1282
1283 /*
1284 * Anonymous process memory has backing store?
1285 * Try to allocate it some swap space here.
1286 * Lazyfree page could be freed directly
1287 */
1288 if (PageAnon(page) && PageSwapBacked(page)) {
1289 if (!PageSwapCache(page)) {
1290 if (!(sc->gfp_mask & __GFP_IO))
1291 goto keep_locked;
1292 if (PageTransHuge(page)) {
1293 /* cannot split THP, skip it */
1294 if (!can_split_huge_page(page, NULL))
1295 goto activate_locked;
1296 /*
1297 * Split pages without a PMD map right
1298 * away. Chances are some or all of the
1299 * tail pages can be freed without IO.
1300 */
1301 if (!compound_mapcount(page) &&
1302 split_huge_page_to_list(page,
1303 page_list))
1304 goto activate_locked;
1305 }
1306 if (!add_to_swap(page)) {
1307 if (!PageTransHuge(page))
1308 goto activate_locked_split;
1309 /* Fallback to swap normal pages */
1310 if (split_huge_page_to_list(page,
1311 page_list))
1312 goto activate_locked;
1313#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1314 count_vm_event(THP_SWPOUT_FALLBACK);
1315#endif
1316 if (!add_to_swap(page))
1317 goto activate_locked_split;
1318 }
1319
1320 may_enter_fs = 1;
1321
1322 /* Adding to swap updated mapping */
1323 mapping = page_mapping(page);
1324 }
1325 } else if (unlikely(PageTransHuge(page))) {
1326 /* Split file THP */
1327 if (split_huge_page_to_list(page, page_list))
1328 goto keep_locked;
1329 }
1330
1331 /*
1332 * THP may get split above, need minus tail pages and update
1333 * nr_pages to avoid accounting tail pages twice.
1334 *
1335 * The tail pages that are added into swap cache successfully
1336 * reach here.
1337 */
1338 if ((nr_pages > 1) && !PageTransHuge(page)) {
1339 sc->nr_scanned -= (nr_pages - 1);
1340 nr_pages = 1;
1341 }
1342
1343 /*
1344 * The page is mapped into the page tables of one or more
1345 * processes. Try to unmap it here.
1346 */
1347 if (page_mapped(page)) {
1348 enum ttu_flags flags = ttu_flags | TTU_BATCH_FLUSH;
1349
1350 if (unlikely(PageTransHuge(page)))
1351 flags |= TTU_SPLIT_HUGE_PMD;
1352 if (!try_to_unmap(page, flags)) {
1353 stat->nr_unmap_fail += nr_pages;
1354 goto activate_locked;
1355 }
1356 }
1357
1358 if (PageDirty(page)) {
1359 /*
1360 * Only kswapd can writeback filesystem pages
1361 * to avoid risk of stack overflow. But avoid
1362 * injecting inefficient single-page IO into
1363 * flusher writeback as much as possible: only
1364 * write pages when we've encountered many
1365 * dirty pages, and when we've already scanned
1366 * the rest of the LRU for clean pages and see
1367 * the same dirty pages again (PageReclaim).
1368 */
1369 if (page_is_file_cache(page) &&
1370 (!current_is_kswapd() || !PageReclaim(page) ||
1371 !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
1372 /*
1373 * Immediately reclaim when written back.
1374 * Similar in principal to deactivate_page()
1375 * except we already have the page isolated
1376 * and know it's dirty
1377 */
1378 inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);
1379 SetPageReclaim(page);
1380
1381 goto activate_locked;
1382 }
1383
1384 if (references == PAGEREF_RECLAIM_CLEAN)
1385 goto keep_locked;
1386 if (!may_enter_fs)
1387 goto keep_locked;
1388 if (!sc->may_writepage)
1389 goto keep_locked;
1390
1391 /*
1392 * Page is dirty. Flush the TLB if a writable entry
1393 * potentially exists to avoid CPU writes after IO
1394 * starts and then write it out here.
1395 */
1396 try_to_unmap_flush_dirty();
1397 switch (pageout(page, mapping, sc)) {
1398 case PAGE_KEEP:
1399 goto keep_locked;
1400 case PAGE_ACTIVATE:
1401 goto activate_locked;
1402 case PAGE_SUCCESS:
1403 if (PageWriteback(page))
1404 goto keep;
1405 if (PageDirty(page))
1406 goto keep;
1407
1408 /*
1409 * A synchronous write - probably a ramdisk. Go
1410 * ahead and try to reclaim the page.
1411 */
1412 if (!trylock_page(page))
1413 goto keep;
1414 if (PageDirty(page) || PageWriteback(page))
1415 goto keep_locked;
1416 mapping = page_mapping(page);
1417 case PAGE_CLEAN:
1418 ; /* try to free the page below */
1419 }
1420 }
1421
1422 /*
1423 * If the page has buffers, try to free the buffer mappings
1424 * associated with this page. If we succeed we try to free
1425 * the page as well.
1426 *
1427 * We do this even if the page is PageDirty().
1428 * try_to_release_page() does not perform I/O, but it is
1429 * possible for a page to have PageDirty set, but it is actually
1430 * clean (all its buffers are clean). This happens if the
1431 * buffers were written out directly, with submit_bh(). ext3
1432 * will do this, as well as the blockdev mapping.
1433 * try_to_release_page() will discover that cleanness and will
1434 * drop the buffers and mark the page clean - it can be freed.
1435 *
1436 * Rarely, pages can have buffers and no ->mapping. These are
1437 * the pages which were not successfully invalidated in
1438 * truncate_complete_page(). We try to drop those buffers here
1439 * and if that worked, and the page is no longer mapped into
1440 * process address space (page_count == 1) it can be freed.
1441 * Otherwise, leave the page on the LRU so it is swappable.
1442 */
1443 if (page_has_private(page)) {
1444 if (!try_to_release_page(page, sc->gfp_mask))
1445 goto activate_locked;
1446 if (!mapping && page_count(page) == 1) {
1447 unlock_page(page);
1448 if (put_page_testzero(page))
1449 goto free_it;
1450 else {
1451 /*
1452 * rare race with speculative reference.
1453 * the speculative reference will free
1454 * this page shortly, so we may
1455 * increment nr_reclaimed here (and
1456 * leave it off the LRU).
1457 */
1458 nr_reclaimed++;
1459 continue;
1460 }
1461 }
1462 }
1463
1464 if (PageAnon(page) && !PageSwapBacked(page)) {
1465 /* follow __remove_mapping for reference */
1466 if (!page_ref_freeze(page, 1))
1467 goto keep_locked;
1468 if (PageDirty(page)) {
1469 page_ref_unfreeze(page, 1);
1470 goto keep_locked;
1471 }
1472
1473 count_vm_event(PGLAZYFREED);
1474 count_memcg_page_event(page, PGLAZYFREED);
1475 } else if (!mapping || !__remove_mapping(mapping, page, true))
1476 goto keep_locked;
1477
1478 unlock_page(page);
1479free_it:
1480 /*
1481 * THP may get swapped out in a whole, need account
1482 * all base pages.
1483 */
1484 nr_reclaimed += nr_pages;
1485
1486 /*
1487 * Is there need to periodically free_page_list? It would
1488 * appear not as the counts should be low
1489 */
1490 if (unlikely(PageTransHuge(page)))
1491 (*get_compound_page_dtor(page))(page);
1492 else
1493 list_add(&page->lru, &free_pages);
1494 continue;
1495
1496activate_locked_split:
1497 /*
1498 * The tail pages that are failed to add into swap cache
1499 * reach here. Fixup nr_scanned and nr_pages.
1500 */
1501 if (nr_pages > 1) {
1502 sc->nr_scanned -= (nr_pages - 1);
1503 nr_pages = 1;
1504 }
1505activate_locked:
1506 /* Not a candidate for swapping, so reclaim swap space. */
1507 if (PageSwapCache(page) && (mem_cgroup_swap_full(page) ||
1508 PageMlocked(page)))
1509 try_to_free_swap(page);
1510 VM_BUG_ON_PAGE(PageActive(page), page);
1511 if (!PageMlocked(page)) {
1512 int type = page_is_file_cache(page);
1513 SetPageActive(page);
1514 stat->nr_activate[type] += nr_pages;
1515 count_memcg_page_event(page, PGACTIVATE);
1516 }
1517keep_locked:
1518 unlock_page(page);
1519keep:
1520 list_add(&page->lru, &ret_pages);
1521 VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
1522 }
1523
1524 pgactivate = stat->nr_activate[0] + stat->nr_activate[1];
1525
1526 mem_cgroup_uncharge_list(&free_pages);
1527 try_to_unmap_flush();
1528 free_unref_page_list(&free_pages);
1529
1530 list_splice(&ret_pages, page_list);
1531 count_vm_events(PGACTIVATE, pgactivate);
1532
1533 return nr_reclaimed;
1534}
1535
1536unsigned long reclaim_clean_pages_from_list(struct zone *zone,
1537 struct list_head *page_list)
1538{
1539 struct scan_control sc = {
1540 .gfp_mask = GFP_KERNEL,
1541 .priority = DEF_PRIORITY,
1542 .may_unmap = 1,
1543 };
1544 struct reclaim_stat dummy_stat;
1545 unsigned long ret;
1546 struct page *page, *next;
1547 LIST_HEAD(clean_pages);
1548
1549 list_for_each_entry_safe(page, next, page_list, lru) {
1550 if (page_is_file_cache(page) && !PageDirty(page) &&
1551 !__PageMovable(page) && !PageUnevictable(page)) {
1552 ClearPageActive(page);
1553 list_move(&page->lru, &clean_pages);
1554 }
1555 }
1556
1557 ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
1558 TTU_IGNORE_ACCESS, &dummy_stat, true);
1559 list_splice(&clean_pages, page_list);
1560 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret);
1561 return ret;
1562}
1563
1564/*
1565 * Attempt to remove the specified page from its LRU. Only take this page
1566 * if it is of the appropriate PageActive status. Pages which are being
1567 * freed elsewhere are also ignored.
1568 *
1569 * page: page to consider
1570 * mode: one of the LRU isolation modes defined above
1571 *
1572 * returns 0 on success, -ve errno on failure.
1573 */
1574int __isolate_lru_page(struct page *page, isolate_mode_t mode)
1575{
1576 int ret = -EINVAL;
1577
1578 /* Only take pages on the LRU. */
1579 if (!PageLRU(page))
1580 return ret;
1581
1582 /* Compaction should not handle unevictable pages but CMA can do so */
1583 if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
1584 return ret;
1585
1586 ret = -EBUSY;
1587
1588 /*
1589 * To minimise LRU disruption, the caller can indicate that it only
1590 * wants to isolate pages it will be able to operate on without
1591 * blocking - clean pages for the most part.
1592 *
1593 * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
1594 * that it is possible to migrate without blocking
1595 */
1596 if (mode & ISOLATE_ASYNC_MIGRATE) {
1597 /* All the caller can do on PageWriteback is block */
1598 if (PageWriteback(page))
1599 return ret;
1600
1601 if (PageDirty(page)) {
1602 struct address_space *mapping;
1603 bool migrate_dirty;
1604
1605 /*
1606 * Only pages without mappings or that have a
1607 * ->migratepage callback are possible to migrate
1608 * without blocking. However, we can be racing with
1609 * truncation so it's necessary to lock the page
1610 * to stabilise the mapping as truncation holds
1611 * the page lock until after the page is removed
1612 * from the page cache.
1613 */
1614 if (!trylock_page(page))
1615 return ret;
1616
1617 mapping = page_mapping(page);
1618 migrate_dirty = !mapping || mapping->a_ops->migratepage;
1619 unlock_page(page);
1620 if (!migrate_dirty)
1621 return ret;
1622 }
1623 }
1624
1625 if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
1626 return ret;
1627
1628 if (likely(get_page_unless_zero(page))) {
1629 /*
1630 * Be careful not to clear PageLRU until after we're
1631 * sure the page is not being freed elsewhere -- the
1632 * page release code relies on it.
1633 */
1634 ClearPageLRU(page);
1635 ret = 0;
1636 }
1637
1638 return ret;
1639}
1640
1641
1642/*
1643 * Update LRU sizes after isolating pages. The LRU size updates must
1644 * be complete before mem_cgroup_update_lru_size due to a santity check.
1645 */
1646static __always_inline void update_lru_sizes(struct lruvec *lruvec,
1647 enum lru_list lru, unsigned long *nr_zone_taken)
1648{
1649 int zid;
1650
1651 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1652 if (!nr_zone_taken[zid])
1653 continue;
1654
1655 __update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
1656#ifdef CONFIG_MEMCG
1657 mem_cgroup_update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
1658#endif
1659 }
1660
1661}
1662
1663/**
1664 * pgdat->lru_lock is heavily contended. Some of the functions that
1665 * shrink the lists perform better by taking out a batch of pages
1666 * and working on them outside the LRU lock.
1667 *
1668 * For pagecache intensive workloads, this function is the hottest
1669 * spot in the kernel (apart from copy_*_user functions).
1670 *
1671 * Appropriate locks must be held before calling this function.
1672 *
1673 * @nr_to_scan: The number of eligible pages to look through on the list.
1674 * @lruvec: The LRU vector to pull pages from.
1675 * @dst: The temp list to put pages on to.
1676 * @nr_scanned: The number of pages that were scanned.
1677 * @sc: The scan_control struct for this reclaim session
1678 * @mode: One of the LRU isolation modes
1679 * @lru: LRU list id for isolating
1680 *
1681 * returns how many pages were moved onto *@dst.
1682 */
1683static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1684 struct lruvec *lruvec, struct list_head *dst,
1685 unsigned long *nr_scanned, struct scan_control *sc,
1686 enum lru_list lru)
1687{
1688 struct list_head *src = &lruvec->lists[lru];
1689 unsigned long nr_taken = 0;
1690 unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
1691 unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
1692 unsigned long skipped = 0;
1693 unsigned long scan, total_scan, nr_pages;
1694 LIST_HEAD(pages_skipped);
1695 isolate_mode_t mode = (sc->may_unmap ? 0 : ISOLATE_UNMAPPED);
1696
1697 total_scan = 0;
1698 scan = 0;
1699 while (scan < nr_to_scan && !list_empty(src)) {
1700 struct page *page;
1701
1702 page = lru_to_page(src);
1703 prefetchw_prev_lru_page(page, src, flags);
1704
1705 VM_BUG_ON_PAGE(!PageLRU(page), page);
1706
1707 nr_pages = compound_nr(page);
1708 total_scan += nr_pages;
1709
1710 if (page_zonenum(page) > sc->reclaim_idx) {
1711 list_move(&page->lru, &pages_skipped);
1712 nr_skipped[page_zonenum(page)] += nr_pages;
1713 continue;
1714 }
1715
1716 /*
1717 * Do not count skipped pages because that makes the function
1718 * return with no isolated pages if the LRU mostly contains
1719 * ineligible pages. This causes the VM to not reclaim any
1720 * pages, triggering a premature OOM.
1721 *
1722 * Account all tail pages of THP. This would not cause
1723 * premature OOM since __isolate_lru_page() returns -EBUSY
1724 * only when the page is being freed somewhere else.
1725 */
1726 scan += nr_pages;
1727 switch (__isolate_lru_page(page, mode)) {
1728 case 0:
1729 nr_taken += nr_pages;
1730 nr_zone_taken[page_zonenum(page)] += nr_pages;
1731 list_move(&page->lru, dst);
1732 break;
1733
1734 case -EBUSY:
1735 /* else it is being freed elsewhere */
1736 list_move(&page->lru, src);
1737 continue;
1738
1739 default:
1740 BUG();
1741 }
1742 }
1743
1744 /*
1745 * Splice any skipped pages to the start of the LRU list. Note that
1746 * this disrupts the LRU order when reclaiming for lower zones but
1747 * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX
1748 * scanning would soon rescan the same pages to skip and put the
1749 * system at risk of premature OOM.
1750 */
1751 if (!list_empty(&pages_skipped)) {
1752 int zid;
1753
1754 list_splice(&pages_skipped, src);
1755 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1756 if (!nr_skipped[zid])
1757 continue;
1758
1759 __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]);
1760 skipped += nr_skipped[zid];
1761 }
1762 }
1763 *nr_scanned = total_scan;
1764 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan,
1765 total_scan, skipped, nr_taken, mode, lru);
1766 update_lru_sizes(lruvec, lru, nr_zone_taken);
1767 return nr_taken;
1768}
1769
1770/**
1771 * isolate_lru_page - tries to isolate a page from its LRU list
1772 * @page: page to isolate from its LRU list
1773 *
1774 * Isolates a @page from an LRU list, clears PageLRU and adjusts the
1775 * vmstat statistic corresponding to whatever LRU list the page was on.
1776 *
1777 * Returns 0 if the page was removed from an LRU list.
1778 * Returns -EBUSY if the page was not on an LRU list.
1779 *
1780 * The returned page will have PageLRU() cleared. If it was found on
1781 * the active list, it will have PageActive set. If it was found on
1782 * the unevictable list, it will have the PageUnevictable bit set. That flag
1783 * may need to be cleared by the caller before letting the page go.
1784 *
1785 * The vmstat statistic corresponding to the list on which the page was
1786 * found will be decremented.
1787 *
1788 * Restrictions:
1789 *
1790 * (1) Must be called with an elevated refcount on the page. This is a
1791 * fundamentnal difference from isolate_lru_pages (which is called
1792 * without a stable reference).
1793 * (2) the lru_lock must not be held.
1794 * (3) interrupts must be enabled.
1795 */
1796int isolate_lru_page(struct page *page)
1797{
1798 int ret = -EBUSY;
1799
1800 VM_BUG_ON_PAGE(!page_count(page), page);
1801 WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
1802
1803 if (PageLRU(page)) {
1804 pg_data_t *pgdat = page_pgdat(page);
1805 struct lruvec *lruvec;
1806
1807 spin_lock_irq(&pgdat->lru_lock);
1808 lruvec = mem_cgroup_page_lruvec(page, pgdat);
1809 if (PageLRU(page)) {
1810 int lru = page_lru(page);
1811 get_page(page);
1812 ClearPageLRU(page);
1813 del_page_from_lru_list(page, lruvec, lru);
1814 ret = 0;
1815 }
1816 spin_unlock_irq(&pgdat->lru_lock);
1817 }
1818 return ret;
1819}
1820
1821/*
1822 * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
1823 * then get resheduled. When there are massive number of tasks doing page
1824 * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
1825 * the LRU list will go small and be scanned faster than necessary, leading to
1826 * unnecessary swapping, thrashing and OOM.
1827 */
1828static int too_many_isolated(struct pglist_data *pgdat, int file,
1829 struct scan_control *sc)
1830{
1831 unsigned long inactive, isolated;
1832
1833 if (current_is_kswapd())
1834 return 0;
1835
1836 if (!sane_reclaim(sc))
1837 return 0;
1838
1839 if (file) {
1840 inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
1841 isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
1842 } else {
1843 inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
1844 isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
1845 }
1846
1847 /*
1848 * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
1849 * won't get blocked by normal direct-reclaimers, forming a circular
1850 * deadlock.
1851 */
1852 if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
1853 inactive >>= 3;
1854
1855 return isolated > inactive;
1856}
1857
1858/*
1859 * This moves pages from @list to corresponding LRU list.
1860 *
1861 * We move them the other way if the page is referenced by one or more
1862 * processes, from rmap.
1863 *
1864 * If the pages are mostly unmapped, the processing is fast and it is
1865 * appropriate to hold zone_lru_lock across the whole operation. But if
1866 * the pages are mapped, the processing is slow (page_referenced()) so we
1867 * should drop zone_lru_lock around each page. It's impossible to balance
1868 * this, so instead we remove the pages from the LRU while processing them.
1869 * It is safe to rely on PG_active against the non-LRU pages in here because
1870 * nobody will play with that bit on a non-LRU page.
1871 *
1872 * The downside is that we have to touch page->_refcount against each page.
1873 * But we had to alter page->flags anyway.
1874 *
1875 * Returns the number of pages moved to the given lruvec.
1876 */
1877
1878static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
1879 struct list_head *list)
1880{
1881 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1882 int nr_pages, nr_moved = 0;
1883 LIST_HEAD(pages_to_free);
1884 struct page *page;
1885 enum lru_list lru;
1886
1887 while (!list_empty(list)) {
1888 page = lru_to_page(list);
1889 VM_BUG_ON_PAGE(PageLRU(page), page);
1890 if (unlikely(!page_evictable(page))) {
1891 list_del(&page->lru);
1892 spin_unlock_irq(&pgdat->lru_lock);
1893 putback_lru_page(page);
1894 spin_lock_irq(&pgdat->lru_lock);
1895 continue;
1896 }
1897 lruvec = mem_cgroup_page_lruvec(page, pgdat);
1898
1899 SetPageLRU(page);
1900 lru = page_lru(page);
1901
1902 nr_pages = hpage_nr_pages(page);
1903 update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
1904 list_move(&page->lru, &lruvec->lists[lru]);
1905
1906 if (put_page_testzero(page)) {
1907 __ClearPageLRU(page);
1908 __ClearPageActive(page);
1909 del_page_from_lru_list(page, lruvec, lru);
1910
1911 if (unlikely(PageCompound(page))) {
1912 spin_unlock_irq(&pgdat->lru_lock);
1913 (*get_compound_page_dtor(page))(page);
1914 spin_lock_irq(&pgdat->lru_lock);
1915 } else
1916 list_add(&page->lru, &pages_to_free);
1917 } else {
1918 nr_moved += nr_pages;
1919 }
1920 }
1921
1922 /*
1923 * To save our caller's stack, now use input list for pages to free.
1924 */
1925 list_splice(&pages_to_free, list);
1926
1927 return nr_moved;
1928}
1929
1930/*
1931 * If a kernel thread (such as nfsd for loop-back mounts) services
1932 * a backing device by writing to the page cache it sets PF_LESS_THROTTLE.
1933 * In that case we should only throttle if the backing device it is
1934 * writing to is congested. In other cases it is safe to throttle.
1935 */
1936static int current_may_throttle(void)
1937{
1938 return !(current->flags & PF_LESS_THROTTLE) ||
1939 current->backing_dev_info == NULL ||
1940 bdi_write_congested(current->backing_dev_info);
1941}
1942
1943/*
1944 * shrink_inactive_list() is a helper for shrink_node(). It returns the number
1945 * of reclaimed pages
1946 */
1947static noinline_for_stack unsigned long
1948shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1949 struct scan_control *sc, enum lru_list lru)
1950{
1951 LIST_HEAD(page_list);
1952 unsigned long nr_scanned;
1953 unsigned long nr_reclaimed = 0;
1954 unsigned long nr_taken;
1955 struct reclaim_stat stat;
1956 int file = is_file_lru(lru);
1957 enum vm_event_item item;
1958 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1959 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1960 bool stalled = false;
1961
1962 while (unlikely(too_many_isolated(pgdat, file, sc))) {
1963 if (stalled)
1964 return 0;
1965
1966 /* wait a bit for the reclaimer. */
1967 msleep(100);
1968 stalled = true;
1969
1970 /* We are about to die and free our memory. Return now. */
1971 if (fatal_signal_pending(current))
1972 return SWAP_CLUSTER_MAX;
1973 }
1974
1975 lru_add_drain();
1976
1977 spin_lock_irq(&pgdat->lru_lock);
1978
1979 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
1980 &nr_scanned, sc, lru);
1981
1982 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
1983 reclaim_stat->recent_scanned[file] += nr_taken;
1984
1985 item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT;
1986 if (global_reclaim(sc))
1987 __count_vm_events(item, nr_scanned);
1988 __count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
1989 spin_unlock_irq(&pgdat->lru_lock);
1990
1991 if (nr_taken == 0)
1992 return 0;
1993
1994 nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, 0,
1995 &stat, false);
1996
1997 spin_lock_irq(&pgdat->lru_lock);
1998
1999 item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
2000 if (global_reclaim(sc))
2001 __count_vm_events(item, nr_reclaimed);
2002 __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
2003 reclaim_stat->recent_rotated[0] += stat.nr_activate[0];
2004 reclaim_stat->recent_rotated[1] += stat.nr_activate[1];
2005
2006 move_pages_to_lru(lruvec, &page_list);
2007
2008 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
2009
2010 spin_unlock_irq(&pgdat->lru_lock);
2011
2012 mem_cgroup_uncharge_list(&page_list);
2013 free_unref_page_list(&page_list);
2014
2015 /*
2016 * If dirty pages are scanned that are not queued for IO, it
2017 * implies that flushers are not doing their job. This can
2018 * happen when memory pressure pushes dirty pages to the end of
2019 * the LRU before the dirty limits are breached and the dirty
2020 * data has expired. It can also happen when the proportion of
2021 * dirty pages grows not through writes but through memory
2022 * pressure reclaiming all the clean cache. And in some cases,
2023 * the flushers simply cannot keep up with the allocation
2024 * rate. Nudge the flusher threads in case they are asleep.
2025 */
2026 if (stat.nr_unqueued_dirty == nr_taken)
2027 wakeup_flusher_threads(WB_REASON_VMSCAN);
2028
2029 sc->nr.dirty += stat.nr_dirty;
2030 sc->nr.congested += stat.nr_congested;
2031 sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;
2032 sc->nr.writeback += stat.nr_writeback;
2033 sc->nr.immediate += stat.nr_immediate;
2034 sc->nr.taken += nr_taken;
2035 if (file)
2036 sc->nr.file_taken += nr_taken;
2037
2038 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
2039 nr_scanned, nr_reclaimed, &stat, sc->priority, file);
2040 return nr_reclaimed;
2041}
2042
2043static void shrink_active_list(unsigned long nr_to_scan,
2044 struct lruvec *lruvec,
2045 struct scan_control *sc,
2046 enum lru_list lru)
2047{
2048 unsigned long nr_taken;
2049 unsigned long nr_scanned;
2050 unsigned long vm_flags;
2051 LIST_HEAD(l_hold); /* The pages which were snipped off */
2052 LIST_HEAD(l_active);
2053 LIST_HEAD(l_inactive);
2054 struct page *page;
2055 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
2056 unsigned nr_deactivate, nr_activate;
2057 unsigned nr_rotated = 0;
2058 int file = is_file_lru(lru);
2059 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2060
2061 lru_add_drain();
2062
2063 spin_lock_irq(&pgdat->lru_lock);
2064
2065 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
2066 &nr_scanned, sc, lru);
2067
2068 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
2069 reclaim_stat->recent_scanned[file] += nr_taken;
2070
2071 __count_vm_events(PGREFILL, nr_scanned);
2072 __count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
2073
2074 spin_unlock_irq(&pgdat->lru_lock);
2075
2076 while (!list_empty(&l_hold)) {
2077 cond_resched();
2078 page = lru_to_page(&l_hold);
2079 list_del(&page->lru);
2080
2081 if (unlikely(!page_evictable(page))) {
2082 putback_lru_page(page);
2083 continue;
2084 }
2085
2086 if (unlikely(buffer_heads_over_limit)) {
2087 if (page_has_private(page) && trylock_page(page)) {
2088 if (page_has_private(page))
2089 try_to_release_page(page, 0);
2090 unlock_page(page);
2091 }
2092 }
2093
2094 if (page_referenced(page, 0, sc->target_mem_cgroup,
2095 &vm_flags)) {
2096 nr_rotated += hpage_nr_pages(page);
2097 /*
2098 * Identify referenced, file-backed active pages and
2099 * give them one more trip around the active list. So
2100 * that executable code get better chances to stay in
2101 * memory under moderate memory pressure. Anon pages
2102 * are not likely to be evicted by use-once streaming
2103 * IO, plus JVM can create lots of anon VM_EXEC pages,
2104 * so we ignore them here.
2105 */
2106 if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
2107 list_add(&page->lru, &l_active);
2108 continue;
2109 }
2110 }
2111
2112 ClearPageActive(page); /* we are de-activating */
2113 SetPageWorkingset(page);
2114 list_add(&page->lru, &l_inactive);
2115 }
2116
2117 /*
2118 * Move pages back to the lru list.
2119 */
2120 spin_lock_irq(&pgdat->lru_lock);
2121 /*
2122 * Count referenced pages from currently used mappings as rotated,
2123 * even though only some of them are actually re-activated. This
2124 * helps balance scan pressure between file and anonymous pages in
2125 * get_scan_count.
2126 */
2127 reclaim_stat->recent_rotated[file] += nr_rotated;
2128
2129 nr_activate = move_pages_to_lru(lruvec, &l_active);
2130 nr_deactivate = move_pages_to_lru(lruvec, &l_inactive);
2131 /* Keep all free pages in l_active list */
2132 list_splice(&l_inactive, &l_active);
2133
2134 __count_vm_events(PGDEACTIVATE, nr_deactivate);
2135 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
2136
2137 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
2138 spin_unlock_irq(&pgdat->lru_lock);
2139
2140 mem_cgroup_uncharge_list(&l_active);
2141 free_unref_page_list(&l_active);
2142 trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
2143 nr_deactivate, nr_rotated, sc->priority, file);
2144}
2145
2146unsigned long reclaim_pages(struct list_head *page_list)
2147{
2148 int nid = -1;
2149 unsigned long nr_reclaimed = 0;
2150 LIST_HEAD(node_page_list);
2151 struct reclaim_stat dummy_stat;
2152 struct page *page;
2153 struct scan_control sc = {
2154 .gfp_mask = GFP_KERNEL,
2155 .priority = DEF_PRIORITY,
2156 .may_writepage = 1,
2157 .may_unmap = 1,
2158 .may_swap = 1,
2159 };
2160
2161 while (!list_empty(page_list)) {
2162 page = lru_to_page(page_list);
2163 if (nid == -1) {
2164 nid = page_to_nid(page);
2165 INIT_LIST_HEAD(&node_page_list);
2166 }
2167
2168 if (nid == page_to_nid(page)) {
2169 ClearPageActive(page);
2170 list_move(&page->lru, &node_page_list);
2171 continue;
2172 }
2173
2174 nr_reclaimed += shrink_page_list(&node_page_list,
2175 NODE_DATA(nid),
2176 &sc, 0,
2177 &dummy_stat, false);
2178 while (!list_empty(&node_page_list)) {
2179 page = lru_to_page(&node_page_list);
2180 list_del(&page->lru);
2181 putback_lru_page(page);
2182 }
2183
2184 nid = -1;
2185 }
2186
2187 if (!list_empty(&node_page_list)) {
2188 nr_reclaimed += shrink_page_list(&node_page_list,
2189 NODE_DATA(nid),
2190 &sc, 0,
2191 &dummy_stat, false);
2192 while (!list_empty(&node_page_list)) {
2193 page = lru_to_page(&node_page_list);
2194 list_del(&page->lru);
2195 putback_lru_page(page);
2196 }
2197 }
2198
2199 return nr_reclaimed;
2200}
2201
2202/*
2203 * The inactive anon list should be small enough that the VM never has
2204 * to do too much work.
2205 *
2206 * The inactive file list should be small enough to leave most memory
2207 * to the established workingset on the scan-resistant active list,
2208 * but large enough to avoid thrashing the aggregate readahead window.
2209 *
2210 * Both inactive lists should also be large enough that each inactive
2211 * page has a chance to be referenced again before it is reclaimed.
2212 *
2213 * If that fails and refaulting is observed, the inactive list grows.
2214 *
2215 * The inactive_ratio is the target ratio of ACTIVE to INACTIVE pages
2216 * on this LRU, maintained by the pageout code. An inactive_ratio
2217 * of 3 means 3:1 or 25% of the pages are kept on the inactive list.
2218 *
2219 * total target max
2220 * memory ratio inactive
2221 * -------------------------------------
2222 * 10MB 1 5MB
2223 * 100MB 1 50MB
2224 * 1GB 3 250MB
2225 * 10GB 10 0.9GB
2226 * 100GB 31 3GB
2227 * 1TB 101 10GB
2228 * 10TB 320 32GB
2229 */
2230static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
2231 struct scan_control *sc, bool trace)
2232{
2233 enum lru_list active_lru = file * LRU_FILE + LRU_ACTIVE;
2234 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2235 enum lru_list inactive_lru = file * LRU_FILE;
2236 unsigned long inactive, active;
2237 unsigned long inactive_ratio;
2238 unsigned long refaults;
2239 unsigned long gb;
2240
2241 /*
2242 * If we don't have swap space, anonymous page deactivation
2243 * is pointless.
2244 */
2245 if (!file && !total_swap_pages)
2246 return false;
2247
2248 inactive = lruvec_lru_size(lruvec, inactive_lru, sc->reclaim_idx);
2249 active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx);
2250
2251 /*
2252 * When refaults are being observed, it means a new workingset
2253 * is being established. Disable active list protection to get
2254 * rid of the stale workingset quickly.
2255 */
2256 refaults = lruvec_page_state_local(lruvec, WORKINGSET_ACTIVATE);
2257 if (file && lruvec->refaults != refaults) {
2258 inactive_ratio = 0;
2259 } else {
2260 gb = (inactive + active) >> (30 - PAGE_SHIFT);
2261 if (gb)
2262 inactive_ratio = int_sqrt(10 * gb);
2263 else
2264 inactive_ratio = 1;
2265 }
2266
2267 if (trace)
2268 trace_mm_vmscan_inactive_list_is_low(pgdat->node_id, sc->reclaim_idx,
2269 lruvec_lru_size(lruvec, inactive_lru, MAX_NR_ZONES), inactive,
2270 lruvec_lru_size(lruvec, active_lru, MAX_NR_ZONES), active,
2271 inactive_ratio, file);
2272
2273 return inactive * inactive_ratio < active;
2274}
2275
2276static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
2277 struct lruvec *lruvec, struct scan_control *sc)
2278{
2279 if (is_active_lru(lru)) {
2280 if (inactive_list_is_low(lruvec, is_file_lru(lru), sc, true))
2281 shrink_active_list(nr_to_scan, lruvec, sc, lru);
2282 return 0;
2283 }
2284
2285 return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
2286}
2287
2288enum scan_balance {
2289 SCAN_EQUAL,
2290 SCAN_FRACT,
2291 SCAN_ANON,
2292 SCAN_FILE,
2293};
2294
2295/*
2296 * Determine how aggressively the anon and file LRU lists should be
2297 * scanned. The relative value of each set of LRU lists is determined
2298 * by looking at the fraction of the pages scanned we did rotate back
2299 * onto the active list instead of evict.
2300 *
2301 * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
2302 * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
2303 */
2304static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
2305 struct scan_control *sc, unsigned long *nr,
2306 unsigned long *lru_pages)
2307{
2308 int swappiness = mem_cgroup_swappiness(memcg);
2309 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
2310 u64 fraction[2];
2311 u64 denominator = 0; /* gcc */
2312 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2313 unsigned long anon_prio, file_prio;
2314 enum scan_balance scan_balance;
2315 unsigned long anon, file;
2316 unsigned long ap, fp;
2317 enum lru_list lru;
2318
2319 /* If we have no swap space, do not bother scanning anon pages. */
2320 if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) {
2321 scan_balance = SCAN_FILE;
2322 goto out;
2323 }
2324
2325 /*
2326 * Global reclaim will swap to prevent OOM even with no
2327 * swappiness, but memcg users want to use this knob to
2328 * disable swapping for individual groups completely when
2329 * using the memory controller's swap limit feature would be
2330 * too expensive.
2331 */
2332 if (!global_reclaim(sc) && !swappiness) {
2333 scan_balance = SCAN_FILE;
2334 goto out;
2335 }
2336
2337 /*
2338 * Do not apply any pressure balancing cleverness when the
2339 * system is close to OOM, scan both anon and file equally
2340 * (unless the swappiness setting disagrees with swapping).
2341 */
2342 if (!sc->priority && swappiness) {
2343 scan_balance = SCAN_EQUAL;
2344 goto out;
2345 }
2346
2347 /*
2348 * Prevent the reclaimer from falling into the cache trap: as
2349 * cache pages start out inactive, every cache fault will tip
2350 * the scan balance towards the file LRU. And as the file LRU
2351 * shrinks, so does the window for rotation from references.
2352 * This means we have a runaway feedback loop where a tiny
2353 * thrashing file LRU becomes infinitely more attractive than
2354 * anon pages. Try to detect this based on file LRU size.
2355 */
2356 if (global_reclaim(sc)) {
2357 unsigned long pgdatfile;
2358 unsigned long pgdatfree;
2359 int z;
2360 unsigned long total_high_wmark = 0;
2361
2362 pgdatfree = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
2363 pgdatfile = node_page_state(pgdat, NR_ACTIVE_FILE) +
2364 node_page_state(pgdat, NR_INACTIVE_FILE);
2365
2366 for (z = 0; z < MAX_NR_ZONES; z++) {
2367 struct zone *zone = &pgdat->node_zones[z];
2368 if (!managed_zone(zone))
2369 continue;
2370
2371 total_high_wmark += high_wmark_pages(zone);
2372 }
2373
2374 if (unlikely(pgdatfile + pgdatfree <= total_high_wmark)) {
2375 /*
2376 * Force SCAN_ANON if there are enough inactive
2377 * anonymous pages on the LRU in eligible zones.
2378 * Otherwise, the small LRU gets thrashed.
2379 */
2380 if (!inactive_list_is_low(lruvec, false, sc, false) &&
2381 lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, sc->reclaim_idx)
2382 >> sc->priority) {
2383 scan_balance = SCAN_ANON;
2384 goto out;
2385 }
2386 }
2387 }
2388
2389 /*
2390 * If there is enough inactive page cache, i.e. if the size of the
2391 * inactive list is greater than that of the active list *and* the
2392 * inactive list actually has some pages to scan on this priority, we
2393 * do not reclaim anything from the anonymous working set right now.
2394 * Without the second condition we could end up never scanning an
2395 * lruvec even if it has plenty of old anonymous pages unless the
2396 * system is under heavy pressure.
2397 */
2398 if (!inactive_list_is_low(lruvec, true, sc, false) &&
2399 lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, sc->reclaim_idx) >> sc->priority) {
2400 scan_balance = SCAN_FILE;
2401 goto out;
2402 }
2403
2404 scan_balance = SCAN_FRACT;
2405
2406 /*
2407 * With swappiness at 100, anonymous and file have the same priority.
2408 * This scanning priority is essentially the inverse of IO cost.
2409 */
2410 anon_prio = swappiness;
2411 file_prio = 200 - anon_prio;
2412
2413 /*
2414 * OK, so we have swap space and a fair amount of page cache
2415 * pages. We use the recently rotated / recently scanned
2416 * ratios to determine how valuable each cache is.
2417 *
2418 * Because workloads change over time (and to avoid overflow)
2419 * we keep these statistics as a floating average, which ends
2420 * up weighing recent references more than old ones.
2421 *
2422 * anon in [0], file in [1]
2423 */
2424
2425 anon = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, MAX_NR_ZONES) +
2426 lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, MAX_NR_ZONES);
2427 file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES) +
2428 lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, MAX_NR_ZONES);
2429
2430 spin_lock_irq(&pgdat->lru_lock);
2431 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
2432 reclaim_stat->recent_scanned[0] /= 2;
2433 reclaim_stat->recent_rotated[0] /= 2;
2434 }
2435
2436 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
2437 reclaim_stat->recent_scanned[1] /= 2;
2438 reclaim_stat->recent_rotated[1] /= 2;
2439 }
2440
2441 /*
2442 * The amount of pressure on anon vs file pages is inversely
2443 * proportional to the fraction of recently scanned pages on
2444 * each list that were recently referenced and in active use.
2445 */
2446 ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
2447 ap /= reclaim_stat->recent_rotated[0] + 1;
2448
2449 fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
2450 fp /= reclaim_stat->recent_rotated[1] + 1;
2451 spin_unlock_irq(&pgdat->lru_lock);
2452
2453 fraction[0] = ap;
2454 fraction[1] = fp;
2455 denominator = ap + fp + 1;
2456out:
2457 *lru_pages = 0;
2458 for_each_evictable_lru(lru) {
2459 int file = is_file_lru(lru);
2460 unsigned long lruvec_size;
2461 unsigned long scan;
2462 unsigned long protection;
2463
2464 lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
2465 protection = mem_cgroup_protection(memcg,
2466 sc->memcg_low_reclaim);
2467
2468 if (protection) {
2469 /*
2470 * Scale a cgroup's reclaim pressure by proportioning
2471 * its current usage to its memory.low or memory.min
2472 * setting.
2473 *
2474 * This is important, as otherwise scanning aggression
2475 * becomes extremely binary -- from nothing as we
2476 * approach the memory protection threshold, to totally
2477 * nominal as we exceed it. This results in requiring
2478 * setting extremely liberal protection thresholds. It
2479 * also means we simply get no protection at all if we
2480 * set it too low, which is not ideal.
2481 *
2482 * If there is any protection in place, we reduce scan
2483 * pressure by how much of the total memory used is
2484 * within protection thresholds.
2485 *
2486 * There is one special case: in the first reclaim pass,
2487 * we skip over all groups that are within their low
2488 * protection. If that fails to reclaim enough pages to
2489 * satisfy the reclaim goal, we come back and override
2490 * the best-effort low protection. However, we still
2491 * ideally want to honor how well-behaved groups are in
2492 * that case instead of simply punishing them all
2493 * equally. As such, we reclaim them based on how much
2494 * memory they are using, reducing the scan pressure
2495 * again by how much of the total memory used is under
2496 * hard protection.
2497 */
2498 unsigned long cgroup_size = mem_cgroup_size(memcg);
2499
2500 /* Avoid TOCTOU with earlier protection check */
2501 cgroup_size = max(cgroup_size, protection);
2502
2503 scan = lruvec_size - lruvec_size * protection /
2504 cgroup_size;
2505
2506 /*
2507 * Minimally target SWAP_CLUSTER_MAX pages to keep
2508 * reclaim moving forwards, avoiding decremeting
2509 * sc->priority further than desirable.
2510 */
2511 scan = max(scan, SWAP_CLUSTER_MAX);
2512 } else {
2513 scan = lruvec_size;
2514 }
2515
2516 scan >>= sc->priority;
2517
2518 /*
2519 * If the cgroup's already been deleted, make sure to
2520 * scrape out the remaining cache.
2521 */
2522 if (!scan && !mem_cgroup_online(memcg))
2523 scan = min(lruvec_size, SWAP_CLUSTER_MAX);
2524
2525 switch (scan_balance) {
2526 case SCAN_EQUAL:
2527 /* Scan lists relative to size */
2528 break;
2529 case SCAN_FRACT:
2530 /*
2531 * Scan types proportional to swappiness and
2532 * their relative recent reclaim efficiency.
2533 * Make sure we don't miss the last page
2534 * because of a round-off error.
2535 */
2536 scan = DIV64_U64_ROUND_UP(scan * fraction[file],
2537 denominator);
2538 break;
2539 case SCAN_FILE:
2540 case SCAN_ANON:
2541 /* Scan one type exclusively */
2542 if ((scan_balance == SCAN_FILE) != file) {
2543 lruvec_size = 0;
2544 scan = 0;
2545 }
2546 break;
2547 default:
2548 /* Look ma, no brain */
2549 BUG();
2550 }
2551
2552 *lru_pages += lruvec_size;
2553 nr[lru] = scan;
2554 }
2555}
2556
2557/*
2558 * This is a basic per-node page freer. Used by both kswapd and direct reclaim.
2559 */
2560static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memcg,
2561 struct scan_control *sc, unsigned long *lru_pages)
2562{
2563 struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
2564 unsigned long nr[NR_LRU_LISTS];
2565 unsigned long targets[NR_LRU_LISTS];
2566 unsigned long nr_to_scan;
2567 enum lru_list lru;
2568 unsigned long nr_reclaimed = 0;
2569 unsigned long nr_to_reclaim = sc->nr_to_reclaim;
2570 struct blk_plug plug;
2571 bool scan_adjusted;
2572
2573 get_scan_count(lruvec, memcg, sc, nr, lru_pages);
2574
2575 /* Record the original scan target for proportional adjustments later */
2576 memcpy(targets, nr, sizeof(nr));
2577
2578 /*
2579 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
2580 * event that can occur when there is little memory pressure e.g.
2581 * multiple streaming readers/writers. Hence, we do not abort scanning
2582 * when the requested number of pages are reclaimed when scanning at
2583 * DEF_PRIORITY on the assumption that the fact we are direct
2584 * reclaiming implies that kswapd is not keeping up and it is best to
2585 * do a batch of work at once. For memcg reclaim one check is made to
2586 * abort proportional reclaim if either the file or anon lru has already
2587 * dropped to zero at the first pass.
2588 */
2589 scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
2590 sc->priority == DEF_PRIORITY);
2591
2592 blk_start_plug(&plug);
2593 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
2594 nr[LRU_INACTIVE_FILE]) {
2595 unsigned long nr_anon, nr_file, percentage;
2596 unsigned long nr_scanned;
2597
2598 for_each_evictable_lru(lru) {
2599 if (nr[lru]) {
2600 nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
2601 nr[lru] -= nr_to_scan;
2602
2603 nr_reclaimed += shrink_list(lru, nr_to_scan,
2604 lruvec, sc);
2605 }
2606 }
2607
2608 cond_resched();
2609
2610 if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
2611 continue;
2612
2613 /*
2614 * For kswapd and memcg, reclaim at least the number of pages
2615 * requested. Ensure that the anon and file LRUs are scanned
2616 * proportionally what was requested by get_scan_count(). We
2617 * stop reclaiming one LRU and reduce the amount scanning
2618 * proportional to the original scan target.
2619 */
2620 nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
2621 nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
2622
2623 /*
2624 * It's just vindictive to attack the larger once the smaller
2625 * has gone to zero. And given the way we stop scanning the
2626 * smaller below, this makes sure that we only make one nudge
2627 * towards proportionality once we've got nr_to_reclaim.
2628 */
2629 if (!nr_file || !nr_anon)
2630 break;
2631
2632 if (nr_file > nr_anon) {
2633 unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
2634 targets[LRU_ACTIVE_ANON] + 1;
2635 lru = LRU_BASE;
2636 percentage = nr_anon * 100 / scan_target;
2637 } else {
2638 unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
2639 targets[LRU_ACTIVE_FILE] + 1;
2640 lru = LRU_FILE;
2641 percentage = nr_file * 100 / scan_target;
2642 }
2643
2644 /* Stop scanning the smaller of the LRU */
2645 nr[lru] = 0;
2646 nr[lru + LRU_ACTIVE] = 0;
2647
2648 /*
2649 * Recalculate the other LRU scan count based on its original
2650 * scan target and the percentage scanning already complete
2651 */
2652 lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
2653 nr_scanned = targets[lru] - nr[lru];
2654 nr[lru] = targets[lru] * (100 - percentage) / 100;
2655 nr[lru] -= min(nr[lru], nr_scanned);
2656
2657 lru += LRU_ACTIVE;
2658 nr_scanned = targets[lru] - nr[lru];
2659 nr[lru] = targets[lru] * (100 - percentage) / 100;
2660 nr[lru] -= min(nr[lru], nr_scanned);
2661
2662 scan_adjusted = true;
2663 }
2664 blk_finish_plug(&plug);
2665 sc->nr_reclaimed += nr_reclaimed;
2666
2667 /*
2668 * Even if we did not try to evict anon pages at all, we want to
2669 * rebalance the anon lru active/inactive ratio.
2670 */
2671 if (inactive_list_is_low(lruvec, false, sc, true))
2672 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
2673 sc, LRU_ACTIVE_ANON);
2674}
2675
2676/* Use reclaim/compaction for costly allocs or under memory pressure */
2677static bool in_reclaim_compaction(struct scan_control *sc)
2678{
2679 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
2680 (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
2681 sc->priority < DEF_PRIORITY - 2))
2682 return true;
2683
2684 return false;
2685}
2686
2687/*
2688 * Reclaim/compaction is used for high-order allocation requests. It reclaims
2689 * order-0 pages before compacting the zone. should_continue_reclaim() returns
2690 * true if more pages should be reclaimed such that when the page allocator
2691 * calls try_to_compact_zone() that it will have enough free pages to succeed.
2692 * It will give up earlier than that if there is difficulty reclaiming pages.
2693 */
2694static inline bool should_continue_reclaim(struct pglist_data *pgdat,
2695 unsigned long nr_reclaimed,
2696 struct scan_control *sc)
2697{
2698 unsigned long pages_for_compaction;
2699 unsigned long inactive_lru_pages;
2700 int z;
2701
2702 /* If not in reclaim/compaction mode, stop */
2703 if (!in_reclaim_compaction(sc))
2704 return false;
2705
2706 /*
2707 * Stop if we failed to reclaim any pages from the last SWAP_CLUSTER_MAX
2708 * number of pages that were scanned. This will return to the caller
2709 * with the risk reclaim/compaction and the resulting allocation attempt
2710 * fails. In the past we have tried harder for __GFP_RETRY_MAYFAIL
2711 * allocations through requiring that the full LRU list has been scanned
2712 * first, by assuming that zero delta of sc->nr_scanned means full LRU
2713 * scan, but that approximation was wrong, and there were corner cases
2714 * where always a non-zero amount of pages were scanned.
2715 */
2716 if (!nr_reclaimed)
2717 return false;
2718
2719 /* If compaction would go ahead or the allocation would succeed, stop */
2720 for (z = 0; z <= sc->reclaim_idx; z++) {
2721 struct zone *zone = &pgdat->node_zones[z];
2722 if (!managed_zone(zone))
2723 continue;
2724
2725 switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
2726 case COMPACT_SUCCESS:
2727 case COMPACT_CONTINUE:
2728 return false;
2729 default:
2730 /* check next zone */
2731 ;
2732 }
2733 }
2734
2735 /*
2736 * If we have not reclaimed enough pages for compaction and the
2737 * inactive lists are large enough, continue reclaiming
2738 */
2739 pages_for_compaction = compact_gap(sc->order);
2740 inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
2741 if (get_nr_swap_pages() > 0)
2742 inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
2743
2744 return inactive_lru_pages > pages_for_compaction;
2745}
2746
2747static bool pgdat_memcg_congested(pg_data_t *pgdat, struct mem_cgroup *memcg)
2748{
2749 return test_bit(PGDAT_CONGESTED, &pgdat->flags) ||
2750 (memcg && memcg_congested(pgdat, memcg));
2751}
2752
2753static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
2754{
2755 struct reclaim_state *reclaim_state = current->reclaim_state;
2756 unsigned long nr_reclaimed, nr_scanned;
2757 bool reclaimable = false;
2758
2759 do {
2760 struct mem_cgroup *root = sc->target_mem_cgroup;
2761 unsigned long node_lru_pages = 0;
2762 struct mem_cgroup *memcg;
2763
2764 memset(&sc->nr, 0, sizeof(sc->nr));
2765
2766 nr_reclaimed = sc->nr_reclaimed;
2767 nr_scanned = sc->nr_scanned;
2768
2769 memcg = mem_cgroup_iter(root, NULL, NULL);
2770 do {
2771 unsigned long lru_pages;
2772 unsigned long reclaimed;
2773 unsigned long scanned;
2774
2775 switch (mem_cgroup_protected(root, memcg)) {
2776 case MEMCG_PROT_MIN:
2777 /*
2778 * Hard protection.
2779 * If there is no reclaimable memory, OOM.
2780 */
2781 continue;
2782 case MEMCG_PROT_LOW:
2783 /*
2784 * Soft protection.
2785 * Respect the protection only as long as
2786 * there is an unprotected supply
2787 * of reclaimable memory from other cgroups.
2788 */
2789 if (!sc->memcg_low_reclaim) {
2790 sc->memcg_low_skipped = 1;
2791 continue;
2792 }
2793 memcg_memory_event(memcg, MEMCG_LOW);
2794 break;
2795 case MEMCG_PROT_NONE:
2796 /*
2797 * All protection thresholds breached. We may
2798 * still choose to vary the scan pressure
2799 * applied based on by how much the cgroup in
2800 * question has exceeded its protection
2801 * thresholds (see get_scan_count).
2802 */
2803 break;
2804 }
2805
2806 reclaimed = sc->nr_reclaimed;
2807 scanned = sc->nr_scanned;
2808 shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
2809 node_lru_pages += lru_pages;
2810
2811 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
2812 sc->priority);
2813
2814 /* Record the group's reclaim efficiency */
2815 vmpressure(sc->gfp_mask, memcg, false,
2816 sc->nr_scanned - scanned,
2817 sc->nr_reclaimed - reclaimed);
2818
2819 } while ((memcg = mem_cgroup_iter(root, memcg, NULL)));
2820
2821 if (reclaim_state) {
2822 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2823 reclaim_state->reclaimed_slab = 0;
2824 }
2825
2826 /* Record the subtree's reclaim efficiency */
2827 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
2828 sc->nr_scanned - nr_scanned,
2829 sc->nr_reclaimed - nr_reclaimed);
2830
2831 if (sc->nr_reclaimed - nr_reclaimed)
2832 reclaimable = true;
2833
2834 if (current_is_kswapd()) {
2835 /*
2836 * If reclaim is isolating dirty pages under writeback,
2837 * it implies that the long-lived page allocation rate
2838 * is exceeding the page laundering rate. Either the
2839 * global limits are not being effective at throttling
2840 * processes due to the page distribution throughout
2841 * zones or there is heavy usage of a slow backing
2842 * device. The only option is to throttle from reclaim
2843 * context which is not ideal as there is no guarantee
2844 * the dirtying process is throttled in the same way
2845 * balance_dirty_pages() manages.
2846 *
2847 * Once a node is flagged PGDAT_WRITEBACK, kswapd will
2848 * count the number of pages under pages flagged for
2849 * immediate reclaim and stall if any are encountered
2850 * in the nr_immediate check below.
2851 */
2852 if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken)
2853 set_bit(PGDAT_WRITEBACK, &pgdat->flags);
2854
2855 /*
2856 * Tag a node as congested if all the dirty pages
2857 * scanned were backed by a congested BDI and
2858 * wait_iff_congested will stall.
2859 */
2860 if (sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
2861 set_bit(PGDAT_CONGESTED, &pgdat->flags);
2862
2863 /* Allow kswapd to start writing pages during reclaim.*/
2864 if (sc->nr.unqueued_dirty == sc->nr.file_taken)
2865 set_bit(PGDAT_DIRTY, &pgdat->flags);
2866
2867 /*
2868 * If kswapd scans pages marked marked for immediate
2869 * reclaim and under writeback (nr_immediate), it
2870 * implies that pages are cycling through the LRU
2871 * faster than they are written so also forcibly stall.
2872 */
2873 if (sc->nr.immediate)
2874 congestion_wait(BLK_RW_ASYNC, HZ/10);
2875 }
2876
2877 /*
2878 * Legacy memcg will stall in page writeback so avoid forcibly
2879 * stalling in wait_iff_congested().
2880 */
2881 if (!global_reclaim(sc) && sane_reclaim(sc) &&
2882 sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
2883 set_memcg_congestion(pgdat, root, true);
2884
2885 /*
2886 * Stall direct reclaim for IO completions if underlying BDIs
2887 * and node is congested. Allow kswapd to continue until it
2888 * starts encountering unqueued dirty pages or cycling through
2889 * the LRU too quickly.
2890 */
2891 if (!sc->hibernation_mode && !current_is_kswapd() &&
2892 current_may_throttle() && pgdat_memcg_congested(pgdat, root))
2893 wait_iff_congested(BLK_RW_ASYNC, HZ/10);
2894
2895 } while (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
2896 sc));
2897
2898 /*
2899 * Kswapd gives up on balancing particular nodes after too
2900 * many failures to reclaim anything from them and goes to
2901 * sleep. On reclaim progress, reset the failure counter. A
2902 * successful direct reclaim run will revive a dormant kswapd.
2903 */
2904 if (reclaimable)
2905 pgdat->kswapd_failures = 0;
2906
2907 return reclaimable;
2908}
2909
2910/*
2911 * Returns true if compaction should go ahead for a costly-order request, or
2912 * the allocation would already succeed without compaction. Return false if we
2913 * should reclaim first.
2914 */
2915static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
2916{
2917 unsigned long watermark;
2918 enum compact_result suitable;
2919
2920 suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx);
2921 if (suitable == COMPACT_SUCCESS)
2922 /* Allocation should succeed already. Don't reclaim. */
2923 return true;
2924 if (suitable == COMPACT_SKIPPED)
2925 /* Compaction cannot yet proceed. Do reclaim. */
2926 return false;
2927
2928 /*
2929 * Compaction is already possible, but it takes time to run and there
2930 * are potentially other callers using the pages just freed. So proceed
2931 * with reclaim to make a buffer of free pages available to give
2932 * compaction a reasonable chance of completing and allocating the page.
2933 * Note that we won't actually reclaim the whole buffer in one attempt
2934 * as the target watermark in should_continue_reclaim() is lower. But if
2935 * we are already above the high+gap watermark, don't reclaim at all.
2936 */
2937 watermark = high_wmark_pages(zone) + compact_gap(sc->order);
2938
2939 return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx);
2940}
2941
2942/*
2943 * This is the direct reclaim path, for page-allocating processes. We only
2944 * try to reclaim pages from zones which will satisfy the caller's allocation
2945 * request.
2946 *
2947 * If a zone is deemed to be full of pinned pages then just give it a light
2948 * scan then give up on it.
2949 */
2950static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2951{
2952 struct zoneref *z;
2953 struct zone *zone;
2954 unsigned long nr_soft_reclaimed;
2955 unsigned long nr_soft_scanned;
2956 gfp_t orig_mask;
2957 pg_data_t *last_pgdat = NULL;
2958
2959 /*
2960 * If the number of buffer_heads in the machine exceeds the maximum
2961 * allowed level, force direct reclaim to scan the highmem zone as
2962 * highmem pages could be pinning lowmem pages storing buffer_heads
2963 */
2964 orig_mask = sc->gfp_mask;
2965 if (buffer_heads_over_limit) {
2966 sc->gfp_mask |= __GFP_HIGHMEM;
2967 sc->reclaim_idx = gfp_zone(sc->gfp_mask);
2968 }
2969
2970 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2971 sc->reclaim_idx, sc->nodemask) {
2972 /*
2973 * Take care memory controller reclaiming has small influence
2974 * to global LRU.
2975 */
2976 if (global_reclaim(sc)) {
2977 if (!cpuset_zone_allowed(zone,
2978 GFP_KERNEL | __GFP_HARDWALL))
2979 continue;
2980
2981 /*
2982 * If we already have plenty of memory free for
2983 * compaction in this zone, don't free any more.
2984 * Even though compaction is invoked for any
2985 * non-zero order, only frequent costly order
2986 * reclamation is disruptive enough to become a
2987 * noticeable problem, like transparent huge
2988 * page allocations.
2989 */
2990 if (IS_ENABLED(CONFIG_COMPACTION) &&
2991 sc->order > PAGE_ALLOC_COSTLY_ORDER &&
2992 compaction_ready(zone, sc)) {
2993 sc->compaction_ready = true;
2994 continue;
2995 }
2996
2997 /*
2998 * Shrink each node in the zonelist once. If the
2999 * zonelist is ordered by zone (not the default) then a
3000 * node may be shrunk multiple times but in that case
3001 * the user prefers lower zones being preserved.
3002 */
3003 if (zone->zone_pgdat == last_pgdat)
3004 continue;
3005
3006 /*
3007 * This steals pages from memory cgroups over softlimit
3008 * and returns the number of reclaimed pages and
3009 * scanned pages. This works for global memory pressure
3010 * and balancing, not for a memcg's limit.
3011 */
3012 nr_soft_scanned = 0;
3013 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat,
3014 sc->order, sc->gfp_mask,
3015 &nr_soft_scanned);
3016 sc->nr_reclaimed += nr_soft_reclaimed;
3017 sc->nr_scanned += nr_soft_scanned;
3018 /* need some check for avoid more shrink_zone() */
3019 }
3020
3021 /* See comment about same check for global reclaim above */
3022 if (zone->zone_pgdat == last_pgdat)
3023 continue;
3024 last_pgdat = zone->zone_pgdat;
3025 shrink_node(zone->zone_pgdat, sc);
3026 }
3027
3028 /*
3029 * Restore to original mask to avoid the impact on the caller if we
3030 * promoted it to __GFP_HIGHMEM.
3031 */
3032 sc->gfp_mask = orig_mask;
3033}
3034
3035static void snapshot_refaults(struct mem_cgroup *root_memcg, pg_data_t *pgdat)
3036{
3037 struct mem_cgroup *memcg;
3038
3039 memcg = mem_cgroup_iter(root_memcg, NULL, NULL);
3040 do {
3041 unsigned long refaults;
3042 struct lruvec *lruvec;
3043
3044 lruvec = mem_cgroup_lruvec(pgdat, memcg);
3045 refaults = lruvec_page_state_local(lruvec, WORKINGSET_ACTIVATE);
3046 lruvec->refaults = refaults;
3047 } while ((memcg = mem_cgroup_iter(root_memcg, memcg, NULL)));
3048}
3049
3050/*
3051 * This is the main entry point to direct page reclaim.
3052 *
3053 * If a full scan of the inactive list fails to free enough memory then we
3054 * are "out of memory" and something needs to be killed.
3055 *
3056 * If the caller is !__GFP_FS then the probability of a failure is reasonably
3057 * high - the zone may be full of dirty or under-writeback pages, which this
3058 * caller can't do much about. We kick the writeback threads and take explicit
3059 * naps in the hope that some of these pages can be written. But if the
3060 * allocating task holds filesystem locks which prevent writeout this might not
3061 * work, and the allocation attempt will fail.
3062 *
3063 * returns: 0, if no pages reclaimed
3064 * else, the number of pages reclaimed
3065 */
3066static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
3067 struct scan_control *sc)
3068{
3069 int initial_priority = sc->priority;
3070 pg_data_t *last_pgdat;
3071 struct zoneref *z;
3072 struct zone *zone;
3073retry:
3074 delayacct_freepages_start();
3075
3076 if (global_reclaim(sc))
3077 __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1);
3078
3079 do {
3080 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
3081 sc->priority);
3082 sc->nr_scanned = 0;
3083 shrink_zones(zonelist, sc);
3084
3085 if (sc->nr_reclaimed >= sc->nr_to_reclaim)
3086 break;
3087
3088 if (sc->compaction_ready)
3089 break;
3090
3091 /*
3092 * If we're getting trouble reclaiming, start doing
3093 * writepage even in laptop mode.
3094 */
3095 if (sc->priority < DEF_PRIORITY - 2)
3096 sc->may_writepage = 1;
3097 } while (--sc->priority >= 0);
3098
3099 last_pgdat = NULL;
3100 for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx,
3101 sc->nodemask) {
3102 if (zone->zone_pgdat == last_pgdat)
3103 continue;
3104 last_pgdat = zone->zone_pgdat;
3105 snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat);
3106 set_memcg_congestion(last_pgdat, sc->target_mem_cgroup, false);
3107 }
3108
3109 delayacct_freepages_end();
3110
3111 if (sc->nr_reclaimed)
3112 return sc->nr_reclaimed;
3113
3114 /* Aborted reclaim to try compaction? don't OOM, then */
3115 if (sc->compaction_ready)
3116 return 1;
3117
3118 /* Untapped cgroup reserves? Don't OOM, retry. */
3119 if (sc->memcg_low_skipped) {
3120 sc->priority = initial_priority;
3121 sc->memcg_low_reclaim = 1;
3122 sc->memcg_low_skipped = 0;
3123 goto retry;
3124 }
3125
3126 return 0;
3127}
3128
3129static bool allow_direct_reclaim(pg_data_t *pgdat)
3130{
3131 struct zone *zone;
3132 unsigned long pfmemalloc_reserve = 0;
3133 unsigned long free_pages = 0;
3134 int i;
3135 bool wmark_ok;
3136
3137 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
3138 return true;
3139
3140 for (i = 0; i <= ZONE_NORMAL; i++) {
3141 zone = &pgdat->node_zones[i];
3142 if (!managed_zone(zone))
3143 continue;
3144
3145 if (!zone_reclaimable_pages(zone))
3146 continue;
3147
3148 pfmemalloc_reserve += min_wmark_pages(zone);
3149 free_pages += zone_page_state(zone, NR_FREE_PAGES);
3150 }
3151
3152 /* If there are no reserves (unexpected config) then do not throttle */
3153 if (!pfmemalloc_reserve)
3154 return true;
3155
3156 wmark_ok = free_pages > pfmemalloc_reserve / 2;
3157
3158 /* kswapd must be awake if processes are being throttled */
3159 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
3160 pgdat->kswapd_classzone_idx = min(pgdat->kswapd_classzone_idx,
3161 (enum zone_type)ZONE_NORMAL);
3162 wake_up_interruptible(&pgdat->kswapd_wait);
3163 }
3164
3165 return wmark_ok;
3166}
3167
3168/*
3169 * Throttle direct reclaimers if backing storage is backed by the network
3170 * and the PFMEMALLOC reserve for the preferred node is getting dangerously
3171 * depleted. kswapd will continue to make progress and wake the processes
3172 * when the low watermark is reached.
3173 *
3174 * Returns true if a fatal signal was delivered during throttling. If this
3175 * happens, the page allocator should not consider triggering the OOM killer.
3176 */
3177static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
3178 nodemask_t *nodemask)
3179{
3180 struct zoneref *z;
3181 struct zone *zone;
3182 pg_data_t *pgdat = NULL;
3183
3184 /*
3185 * Kernel threads should not be throttled as they may be indirectly
3186 * responsible for cleaning pages necessary for reclaim to make forward
3187 * progress. kjournald for example may enter direct reclaim while
3188 * committing a transaction where throttling it could forcing other
3189 * processes to block on log_wait_commit().
3190 */
3191 if (current->flags & PF_KTHREAD)
3192 goto out;
3193
3194 /*
3195 * If a fatal signal is pending, this process should not throttle.
3196 * It should return quickly so it can exit and free its memory
3197 */
3198 if (fatal_signal_pending(current))
3199 goto out;
3200
3201 /*
3202 * Check if the pfmemalloc reserves are ok by finding the first node
3203 * with a usable ZONE_NORMAL or lower zone. The expectation is that
3204 * GFP_KERNEL will be required for allocating network buffers when
3205 * swapping over the network so ZONE_HIGHMEM is unusable.
3206 *
3207 * Throttling is based on the first usable node and throttled processes
3208 * wait on a queue until kswapd makes progress and wakes them. There
3209 * is an affinity then between processes waking up and where reclaim
3210 * progress has been made assuming the process wakes on the same node.
3211 * More importantly, processes running on remote nodes will not compete
3212 * for remote pfmemalloc reserves and processes on different nodes
3213 * should make reasonable progress.
3214 */
3215 for_each_zone_zonelist_nodemask(zone, z, zonelist,
3216 gfp_zone(gfp_mask), nodemask) {
3217 if (zone_idx(zone) > ZONE_NORMAL)
3218 continue;
3219
3220 /* Throttle based on the first usable node */
3221 pgdat = zone->zone_pgdat;
3222 if (allow_direct_reclaim(pgdat))
3223 goto out;
3224 break;
3225 }
3226
3227 /* If no zone was usable by the allocation flags then do not throttle */
3228 if (!pgdat)
3229 goto out;
3230
3231 /* Account for the throttling */
3232 count_vm_event(PGSCAN_DIRECT_THROTTLE);
3233
3234 /*
3235 * If the caller cannot enter the filesystem, it's possible that it
3236 * is due to the caller holding an FS lock or performing a journal
3237 * transaction in the case of a filesystem like ext[3|4]. In this case,
3238 * it is not safe to block on pfmemalloc_wait as kswapd could be
3239 * blocked waiting on the same lock. Instead, throttle for up to a
3240 * second before continuing.
3241 */
3242 if (!(gfp_mask & __GFP_FS)) {
3243 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
3244 allow_direct_reclaim(pgdat), HZ);
3245
3246 goto check_pending;
3247 }
3248
3249 /* Throttle until kswapd wakes the process */
3250 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
3251 allow_direct_reclaim(pgdat));
3252
3253check_pending:
3254 if (fatal_signal_pending(current))
3255 return true;
3256
3257out:
3258 return false;
3259}
3260
3261unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
3262 gfp_t gfp_mask, nodemask_t *nodemask)
3263{
3264 unsigned long nr_reclaimed;
3265 struct scan_control sc = {
3266 .nr_to_reclaim = SWAP_CLUSTER_MAX,
3267 .gfp_mask = current_gfp_context(gfp_mask),
3268 .reclaim_idx = gfp_zone(gfp_mask),
3269 .order = order,
3270 .nodemask = nodemask,
3271 .priority = DEF_PRIORITY,
3272 .may_writepage = !laptop_mode,
3273 .may_unmap = 1,
3274 .may_swap = 1,
3275 };
3276
3277 /*
3278 * scan_control uses s8 fields for order, priority, and reclaim_idx.
3279 * Confirm they are large enough for max values.
3280 */
3281 BUILD_BUG_ON(MAX_ORDER > S8_MAX);
3282 BUILD_BUG_ON(DEF_PRIORITY > S8_MAX);
3283 BUILD_BUG_ON(MAX_NR_ZONES > S8_MAX);
3284
3285 /*
3286 * Do not enter reclaim if fatal signal was delivered while throttled.
3287 * 1 is returned so that the page allocator does not OOM kill at this
3288 * point.
3289 */
3290 if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
3291 return 1;
3292
3293 set_task_reclaim_state(current, &sc.reclaim_state);
3294 trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask);
3295
3296 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
3297
3298 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
3299 set_task_reclaim_state(current, NULL);
3300
3301 return nr_reclaimed;
3302}
3303
3304#ifdef CONFIG_MEMCG
3305
3306/* Only used by soft limit reclaim. Do not reuse for anything else. */
3307unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
3308 gfp_t gfp_mask, bool noswap,
3309 pg_data_t *pgdat,
3310 unsigned long *nr_scanned)
3311{
3312 struct scan_control sc = {
3313 .nr_to_reclaim = SWAP_CLUSTER_MAX,
3314 .target_mem_cgroup = memcg,
3315 .may_writepage = !laptop_mode,
3316 .may_unmap = 1,
3317 .reclaim_idx = MAX_NR_ZONES - 1,
3318 .may_swap = !noswap,
3319 };
3320 unsigned long lru_pages;
3321
3322 WARN_ON_ONCE(!current->reclaim_state);
3323
3324 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
3325 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
3326
3327 trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
3328 sc.gfp_mask);
3329
3330 /*
3331 * NOTE: Although we can get the priority field, using it
3332 * here is not a good idea, since it limits the pages we can scan.
3333 * if we don't reclaim here, the shrink_node from balance_pgdat
3334 * will pick up pages from other mem cgroup's as well. We hack
3335 * the priority and make it zero.
3336 */
3337 shrink_node_memcg(pgdat, memcg, &sc, &lru_pages);
3338
3339 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
3340
3341 *nr_scanned = sc.nr_scanned;
3342
3343 return sc.nr_reclaimed;
3344}
3345
3346unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
3347 unsigned long nr_pages,
3348 gfp_t gfp_mask,
3349 bool may_swap)
3350{
3351 struct zonelist *zonelist;
3352 unsigned long nr_reclaimed;
3353 unsigned long pflags;
3354 int nid;
3355 unsigned int noreclaim_flag;
3356 struct scan_control sc = {
3357 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
3358 .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) |
3359 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
3360 .reclaim_idx = MAX_NR_ZONES - 1,
3361 .target_mem_cgroup = memcg,
3362 .priority = DEF_PRIORITY,
3363 .may_writepage = !laptop_mode,
3364 .may_unmap = 1,
3365 .may_swap = may_swap,
3366 };
3367
3368 set_task_reclaim_state(current, &sc.reclaim_state);
3369 /*
3370 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
3371 * take care of from where we get pages. So the node where we start the
3372 * scan does not need to be the current node.
3373 */
3374 nid = mem_cgroup_select_victim_node(memcg);
3375
3376 zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
3377
3378 trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);
3379
3380 psi_memstall_enter(&pflags);
3381 noreclaim_flag = memalloc_noreclaim_save();
3382
3383 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
3384
3385 memalloc_noreclaim_restore(noreclaim_flag);
3386 psi_memstall_leave(&pflags);
3387
3388 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
3389 set_task_reclaim_state(current, NULL);
3390
3391 return nr_reclaimed;
3392}
3393#endif
3394
3395static void age_active_anon(struct pglist_data *pgdat,
3396 struct scan_control *sc)
3397{
3398 struct mem_cgroup *memcg;
3399
3400 if (!total_swap_pages)
3401 return;
3402
3403 memcg = mem_cgroup_iter(NULL, NULL, NULL);
3404 do {
3405 struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
3406
3407 if (inactive_list_is_low(lruvec, false, sc, true))
3408 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
3409 sc, LRU_ACTIVE_ANON);
3410
3411 memcg = mem_cgroup_iter(NULL, memcg, NULL);
3412 } while (memcg);
3413}
3414
3415static bool pgdat_watermark_boosted(pg_data_t *pgdat, int classzone_idx)
3416{
3417 int i;
3418 struct zone *zone;
3419
3420 /*
3421 * Check for watermark boosts top-down as the higher zones
3422 * are more likely to be boosted. Both watermarks and boosts
3423 * should not be checked at the time time as reclaim would
3424 * start prematurely when there is no boosting and a lower
3425 * zone is balanced.
3426 */
3427 for (i = classzone_idx; i >= 0; i--) {
3428 zone = pgdat->node_zones + i;
3429 if (!managed_zone(zone))
3430 continue;
3431
3432 if (zone->watermark_boost)
3433 return true;
3434 }
3435
3436 return false;
3437}
3438
3439/*
3440 * Returns true if there is an eligible zone balanced for the request order
3441 * and classzone_idx
3442 */
3443static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
3444{
3445 int i;
3446 unsigned long mark = -1;
3447 struct zone *zone;
3448
3449 /*
3450 * Check watermarks bottom-up as lower zones are more likely to
3451 * meet watermarks.
3452 */
3453 for (i = 0; i <= classzone_idx; i++) {
3454 zone = pgdat->node_zones + i;
3455
3456 if (!managed_zone(zone))
3457 continue;
3458
3459 mark = high_wmark_pages(zone);
3460 if (zone_watermark_ok_safe(zone, order, mark, classzone_idx))
3461 return true;
3462 }
3463
3464 /*
3465 * If a node has no populated zone within classzone_idx, it does not
3466 * need balancing by definition. This can happen if a zone-restricted
3467 * allocation tries to wake a remote kswapd.
3468 */
3469 if (mark == -1)
3470 return true;
3471
3472 return false;
3473}
3474
3475/* Clear pgdat state for congested, dirty or under writeback. */
3476static void clear_pgdat_congested(pg_data_t *pgdat)
3477{
3478 clear_bit(PGDAT_CONGESTED, &pgdat->flags);
3479 clear_bit(PGDAT_DIRTY, &pgdat->flags);
3480 clear_bit(PGDAT_WRITEBACK, &pgdat->flags);
3481}
3482
3483/*
3484 * Prepare kswapd for sleeping. This verifies that there are no processes
3485 * waiting in throttle_direct_reclaim() and that watermarks have been met.
3486 *
3487 * Returns true if kswapd is ready to sleep
3488 */
3489static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, int classzone_idx)
3490{
3491 /*
3492 * The throttled processes are normally woken up in balance_pgdat() as
3493 * soon as allow_direct_reclaim() is true. But there is a potential
3494 * race between when kswapd checks the watermarks and a process gets
3495 * throttled. There is also a potential race if processes get
3496 * throttled, kswapd wakes, a large process exits thereby balancing the
3497 * zones, which causes kswapd to exit balance_pgdat() before reaching
3498 * the wake up checks. If kswapd is going to sleep, no process should
3499 * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
3500 * the wake up is premature, processes will wake kswapd and get
3501 * throttled again. The difference from wake ups in balance_pgdat() is
3502 * that here we are under prepare_to_wait().
3503 */
3504 if (waitqueue_active(&pgdat->pfmemalloc_wait))
3505 wake_up_all(&pgdat->pfmemalloc_wait);
3506
3507 /* Hopeless node, leave it to direct reclaim */
3508 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
3509 return true;
3510
3511 if (pgdat_balanced(pgdat, order, classzone_idx)) {
3512 clear_pgdat_congested(pgdat);
3513 return true;
3514 }
3515
3516 return false;
3517}
3518
3519/*
3520 * kswapd shrinks a node of pages that are at or below the highest usable
3521 * zone that is currently unbalanced.
3522 *
3523 * Returns true if kswapd scanned at least the requested number of pages to
3524 * reclaim or if the lack of progress was due to pages under writeback.
3525 * This is used to determine if the scanning priority needs to be raised.
3526 */
3527static bool kswapd_shrink_node(pg_data_t *pgdat,
3528 struct scan_control *sc)
3529{
3530 struct zone *zone;
3531 int z;
3532
3533 /* Reclaim a number of pages proportional to the number of zones */
3534 sc->nr_to_reclaim = 0;
3535 for (z = 0; z <= sc->reclaim_idx; z++) {
3536 zone = pgdat->node_zones + z;
3537 if (!managed_zone(zone))
3538 continue;
3539
3540 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
3541 }
3542
3543 /*
3544 * Historically care was taken to put equal pressure on all zones but
3545 * now pressure is applied based on node LRU order.
3546 */
3547 shrink_node(pgdat, sc);
3548
3549 /*
3550 * Fragmentation may mean that the system cannot be rebalanced for
3551 * high-order allocations. If twice the allocation size has been
3552 * reclaimed then recheck watermarks only at order-0 to prevent
3553 * excessive reclaim. Assume that a process requested a high-order
3554 * can direct reclaim/compact.
3555 */
3556 if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order))
3557 sc->order = 0;
3558
3559 return sc->nr_scanned >= sc->nr_to_reclaim;
3560}
3561
3562/*
3563 * For kswapd, balance_pgdat() will reclaim pages across a node from zones
3564 * that are eligible for use by the caller until at least one zone is
3565 * balanced.
3566 *
3567 * Returns the order kswapd finished reclaiming at.
3568 *
3569 * kswapd scans the zones in the highmem->normal->dma direction. It skips
3570 * zones which have free_pages > high_wmark_pages(zone), but once a zone is
3571 * found to have free_pages <= high_wmark_pages(zone), any page in that zone
3572 * or lower is eligible for reclaim until at least one usable zone is
3573 * balanced.
3574 */
3575static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
3576{
3577 int i;
3578 unsigned long nr_soft_reclaimed;
3579 unsigned long nr_soft_scanned;
3580 unsigned long pflags;
3581 unsigned long nr_boost_reclaim;
3582 unsigned long zone_boosts[MAX_NR_ZONES] = { 0, };
3583 bool boosted;
3584 struct zone *zone;
3585 struct scan_control sc = {
3586 .gfp_mask = GFP_KERNEL,
3587 .order = order,
3588 .may_unmap = 1,
3589 };
3590
3591 set_task_reclaim_state(current, &sc.reclaim_state);
3592 psi_memstall_enter(&pflags);
3593 __fs_reclaim_acquire();
3594
3595 count_vm_event(PAGEOUTRUN);
3596
3597 /*
3598 * Account for the reclaim boost. Note that the zone boost is left in
3599 * place so that parallel allocations that are near the watermark will
3600 * stall or direct reclaim until kswapd is finished.
3601 */
3602 nr_boost_reclaim = 0;
3603 for (i = 0; i <= classzone_idx; i++) {
3604 zone = pgdat->node_zones + i;
3605 if (!managed_zone(zone))
3606 continue;
3607
3608 nr_boost_reclaim += zone->watermark_boost;
3609 zone_boosts[i] = zone->watermark_boost;
3610 }
3611 boosted = nr_boost_reclaim;
3612
3613restart:
3614 sc.priority = DEF_PRIORITY;
3615 do {
3616 unsigned long nr_reclaimed = sc.nr_reclaimed;
3617 bool raise_priority = true;
3618 bool balanced;
3619 bool ret;
3620
3621 sc.reclaim_idx = classzone_idx;
3622
3623 /*
3624 * If the number of buffer_heads exceeds the maximum allowed
3625 * then consider reclaiming from all zones. This has a dual
3626 * purpose -- on 64-bit systems it is expected that
3627 * buffer_heads are stripped during active rotation. On 32-bit
3628 * systems, highmem pages can pin lowmem memory and shrinking
3629 * buffers can relieve lowmem pressure. Reclaim may still not
3630 * go ahead if all eligible zones for the original allocation
3631 * request are balanced to avoid excessive reclaim from kswapd.
3632 */
3633 if (buffer_heads_over_limit) {
3634 for (i = MAX_NR_ZONES - 1; i >= 0; i--) {
3635 zone = pgdat->node_zones + i;
3636 if (!managed_zone(zone))
3637 continue;
3638
3639 sc.reclaim_idx = i;
3640 break;
3641 }
3642 }
3643
3644 /*
3645 * If the pgdat is imbalanced then ignore boosting and preserve
3646 * the watermarks for a later time and restart. Note that the
3647 * zone watermarks will be still reset at the end of balancing
3648 * on the grounds that the normal reclaim should be enough to
3649 * re-evaluate if boosting is required when kswapd next wakes.
3650 */
3651 balanced = pgdat_balanced(pgdat, sc.order, classzone_idx);
3652 if (!balanced && nr_boost_reclaim) {
3653 nr_boost_reclaim = 0;
3654 goto restart;
3655 }
3656
3657 /*
3658 * If boosting is not active then only reclaim if there are no
3659 * eligible zones. Note that sc.reclaim_idx is not used as
3660 * buffer_heads_over_limit may have adjusted it.
3661 */
3662 if (!nr_boost_reclaim && balanced)
3663 goto out;
3664
3665 /* Limit the priority of boosting to avoid reclaim writeback */
3666 if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2)
3667 raise_priority = false;
3668
3669 /*
3670 * Do not writeback or swap pages for boosted reclaim. The
3671 * intent is to relieve pressure not issue sub-optimal IO
3672 * from reclaim context. If no pages are reclaimed, the
3673 * reclaim will be aborted.
3674 */
3675 sc.may_writepage = !laptop_mode && !nr_boost_reclaim;
3676 sc.may_swap = !nr_boost_reclaim;
3677
3678 /*
3679 * Do some background aging of the anon list, to give
3680 * pages a chance to be referenced before reclaiming. All
3681 * pages are rotated regardless of classzone as this is
3682 * about consistent aging.
3683 */
3684 age_active_anon(pgdat, &sc);
3685
3686 /*
3687 * If we're getting trouble reclaiming, start doing writepage
3688 * even in laptop mode.
3689 */
3690 if (sc.priority < DEF_PRIORITY - 2)
3691 sc.may_writepage = 1;
3692
3693 /* Call soft limit reclaim before calling shrink_node. */
3694 sc.nr_scanned = 0;
3695 nr_soft_scanned = 0;
3696 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order,
3697 sc.gfp_mask, &nr_soft_scanned);
3698 sc.nr_reclaimed += nr_soft_reclaimed;
3699
3700 /*
3701 * There should be no need to raise the scanning priority if
3702 * enough pages are already being scanned that that high
3703 * watermark would be met at 100% efficiency.
3704 */
3705 if (kswapd_shrink_node(pgdat, &sc))
3706 raise_priority = false;
3707
3708 /*
3709 * If the low watermark is met there is no need for processes
3710 * to be throttled on pfmemalloc_wait as they should not be
3711 * able to safely make forward progress. Wake them
3712 */
3713 if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
3714 allow_direct_reclaim(pgdat))
3715 wake_up_all(&pgdat->pfmemalloc_wait);
3716
3717 /* Check if kswapd should be suspending */
3718 __fs_reclaim_release();
3719 ret = try_to_freeze();
3720 __fs_reclaim_acquire();
3721 if (ret || kthread_should_stop())
3722 break;
3723
3724 /*
3725 * Raise priority if scanning rate is too low or there was no
3726 * progress in reclaiming pages
3727 */
3728 nr_reclaimed = sc.nr_reclaimed - nr_reclaimed;
3729 nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed);
3730
3731 /*
3732 * If reclaim made no progress for a boost, stop reclaim as
3733 * IO cannot be queued and it could be an infinite loop in
3734 * extreme circumstances.
3735 */
3736 if (nr_boost_reclaim && !nr_reclaimed)
3737 break;
3738
3739 if (raise_priority || !nr_reclaimed)
3740 sc.priority--;
3741 } while (sc.priority >= 1);
3742
3743 if (!sc.nr_reclaimed)
3744 pgdat->kswapd_failures++;
3745
3746out:
3747 /* If reclaim was boosted, account for the reclaim done in this pass */
3748 if (boosted) {
3749 unsigned long flags;
3750
3751 for (i = 0; i <= classzone_idx; i++) {
3752 if (!zone_boosts[i])
3753 continue;
3754
3755 /* Increments are under the zone lock */
3756 zone = pgdat->node_zones + i;
3757 spin_lock_irqsave(&zone->lock, flags);
3758 zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]);
3759 spin_unlock_irqrestore(&zone->lock, flags);
3760 }
3761
3762 /*
3763 * As there is now likely space, wakeup kcompact to defragment
3764 * pageblocks.
3765 */
3766 wakeup_kcompactd(pgdat, pageblock_order, classzone_idx);
3767 }
3768
3769 snapshot_refaults(NULL, pgdat);
3770 __fs_reclaim_release();
3771 psi_memstall_leave(&pflags);
3772 set_task_reclaim_state(current, NULL);
3773
3774 /*
3775 * Return the order kswapd stopped reclaiming at as
3776 * prepare_kswapd_sleep() takes it into account. If another caller
3777 * entered the allocator slow path while kswapd was awake, order will
3778 * remain at the higher level.
3779 */
3780 return sc.order;
3781}
3782
3783/*
3784 * The pgdat->kswapd_classzone_idx is used to pass the highest zone index to be
3785 * reclaimed by kswapd from the waker. If the value is MAX_NR_ZONES which is not
3786 * a valid index then either kswapd runs for first time or kswapd couldn't sleep
3787 * after previous reclaim attempt (node is still unbalanced). In that case
3788 * return the zone index of the previous kswapd reclaim cycle.
3789 */
3790static enum zone_type kswapd_classzone_idx(pg_data_t *pgdat,
3791 enum zone_type prev_classzone_idx)
3792{
3793 if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES)
3794 return prev_classzone_idx;
3795 return pgdat->kswapd_classzone_idx;
3796}
3797
3798static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
3799 unsigned int classzone_idx)
3800{
3801 long remaining = 0;
3802 DEFINE_WAIT(wait);
3803
3804 if (freezing(current) || kthread_should_stop())
3805 return;
3806
3807 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3808
3809 /*
3810 * Try to sleep for a short interval. Note that kcompactd will only be
3811 * woken if it is possible to sleep for a short interval. This is
3812 * deliberate on the assumption that if reclaim cannot keep an
3813 * eligible zone balanced that it's also unlikely that compaction will
3814 * succeed.
3815 */
3816 if (prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) {
3817 /*
3818 * Compaction records what page blocks it recently failed to
3819 * isolate pages from and skips them in the future scanning.
3820 * When kswapd is going to sleep, it is reasonable to assume
3821 * that pages and compaction may succeed so reset the cache.
3822 */
3823 reset_isolation_suitable(pgdat);
3824
3825 /*
3826 * We have freed the memory, now we should compact it to make
3827 * allocation of the requested order possible.
3828 */
3829 wakeup_kcompactd(pgdat, alloc_order, classzone_idx);
3830
3831 remaining = schedule_timeout(HZ/10);
3832
3833 /*
3834 * If woken prematurely then reset kswapd_classzone_idx and
3835 * order. The values will either be from a wakeup request or
3836 * the previous request that slept prematurely.
3837 */
3838 if (remaining) {
3839 pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
3840 pgdat->kswapd_order = max(pgdat->kswapd_order, reclaim_order);
3841 }
3842
3843 finish_wait(&pgdat->kswapd_wait, &wait);
3844 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3845 }
3846
3847 /*
3848 * After a short sleep, check if it was a premature sleep. If not, then
3849 * go fully to sleep until explicitly woken up.
3850 */
3851 if (!remaining &&
3852 prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) {
3853 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
3854
3855 /*
3856 * vmstat counters are not perfectly accurate and the estimated
3857 * value for counters such as NR_FREE_PAGES can deviate from the
3858 * true value by nr_online_cpus * threshold. To avoid the zone
3859 * watermarks being breached while under pressure, we reduce the
3860 * per-cpu vmstat threshold while kswapd is awake and restore
3861 * them before going back to sleep.
3862 */
3863 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
3864
3865 if (!kthread_should_stop())
3866 schedule();
3867
3868 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
3869 } else {
3870 if (remaining)
3871 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
3872 else
3873 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
3874 }
3875 finish_wait(&pgdat->kswapd_wait, &wait);
3876}
3877
3878/*
3879 * The background pageout daemon, started as a kernel thread
3880 * from the init process.
3881 *
3882 * This basically trickles out pages so that we have _some_
3883 * free memory available even if there is no other activity
3884 * that frees anything up. This is needed for things like routing
3885 * etc, where we otherwise might have all activity going on in
3886 * asynchronous contexts that cannot page things out.
3887 *
3888 * If there are applications that are active memory-allocators
3889 * (most normal use), this basically shouldn't matter.
3890 */
3891static int kswapd(void *p)
3892{
3893 unsigned int alloc_order, reclaim_order;
3894 unsigned int classzone_idx = MAX_NR_ZONES - 1;
3895 pg_data_t *pgdat = (pg_data_t*)p;
3896 struct task_struct *tsk = current;
3897 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
3898
3899 if (!cpumask_empty(cpumask))
3900 set_cpus_allowed_ptr(tsk, cpumask);
3901
3902 /*
3903 * Tell the memory management that we're a "memory allocator",
3904 * and that if we need more memory we should get access to it
3905 * regardless (see "__alloc_pages()"). "kswapd" should
3906 * never get caught in the normal page freeing logic.
3907 *
3908 * (Kswapd normally doesn't need memory anyway, but sometimes
3909 * you need a small amount of memory in order to be able to
3910 * page out something else, and this flag essentially protects
3911 * us from recursively trying to free more memory as we're
3912 * trying to free the first piece of memory in the first place).
3913 */
3914 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
3915 set_freezable();
3916
3917 pgdat->kswapd_order = 0;
3918 pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
3919 for ( ; ; ) {
3920 bool ret;
3921
3922 alloc_order = reclaim_order = pgdat->kswapd_order;
3923 classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
3924
3925kswapd_try_sleep:
3926 kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order,
3927 classzone_idx);
3928
3929 /* Read the new order and classzone_idx */
3930 alloc_order = reclaim_order = pgdat->kswapd_order;
3931 classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
3932 pgdat->kswapd_order = 0;
3933 pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
3934
3935 ret = try_to_freeze();
3936 if (kthread_should_stop())
3937 break;
3938
3939 /*
3940 * We can speed up thawing tasks if we don't call balance_pgdat
3941 * after returning from the refrigerator
3942 */
3943 if (ret)
3944 continue;
3945
3946 /*
3947 * Reclaim begins at the requested order but if a high-order
3948 * reclaim fails then kswapd falls back to reclaiming for
3949 * order-0. If that happens, kswapd will consider sleeping
3950 * for the order it finished reclaiming at (reclaim_order)
3951 * but kcompactd is woken to compact for the original
3952 * request (alloc_order).
3953 */
3954 trace_mm_vmscan_kswapd_wake(pgdat->node_id, classzone_idx,
3955 alloc_order);
3956 reclaim_order = balance_pgdat(pgdat, alloc_order, classzone_idx);
3957 if (reclaim_order < alloc_order)
3958 goto kswapd_try_sleep;
3959 }
3960
3961 tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
3962
3963 return 0;
3964}
3965
3966/*
3967 * A zone is low on free memory or too fragmented for high-order memory. If
3968 * kswapd should reclaim (direct reclaim is deferred), wake it up for the zone's
3969 * pgdat. It will wake up kcompactd after reclaiming memory. If kswapd reclaim
3970 * has failed or is not needed, still wake up kcompactd if only compaction is
3971 * needed.
3972 */
3973void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
3974 enum zone_type classzone_idx)
3975{
3976 pg_data_t *pgdat;
3977
3978 if (!managed_zone(zone))
3979 return;
3980
3981 if (!cpuset_zone_allowed(zone, gfp_flags))
3982 return;
3983 pgdat = zone->zone_pgdat;
3984
3985 if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES)
3986 pgdat->kswapd_classzone_idx = classzone_idx;
3987 else
3988 pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx,
3989 classzone_idx);
3990 pgdat->kswapd_order = max(pgdat->kswapd_order, order);
3991 if (!waitqueue_active(&pgdat->kswapd_wait))
3992 return;
3993
3994 /* Hopeless node, leave it to direct reclaim if possible */
3995 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ||
3996 (pgdat_balanced(pgdat, order, classzone_idx) &&
3997 !pgdat_watermark_boosted(pgdat, classzone_idx))) {
3998 /*
3999 * There may be plenty of free memory available, but it's too
4000 * fragmented for high-order allocations. Wake up kcompactd
4001 * and rely on compaction_suitable() to determine if it's
4002 * needed. If it fails, it will defer subsequent attempts to
4003 * ratelimit its work.
4004 */
4005 if (!(gfp_flags & __GFP_DIRECT_RECLAIM))
4006 wakeup_kcompactd(pgdat, order, classzone_idx);
4007 return;
4008 }
4009
4010 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, classzone_idx, order,
4011 gfp_flags);
4012 wake_up_interruptible(&pgdat->kswapd_wait);
4013}
4014
4015#ifdef CONFIG_HIBERNATION
4016/*
4017 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
4018 * freed pages.
4019 *
4020 * Rather than trying to age LRUs the aim is to preserve the overall
4021 * LRU order by reclaiming preferentially
4022 * inactive > active > active referenced > active mapped
4023 */
4024unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
4025{
4026 struct scan_control sc = {
4027 .nr_to_reclaim = nr_to_reclaim,
4028 .gfp_mask = GFP_HIGHUSER_MOVABLE,
4029 .reclaim_idx = MAX_NR_ZONES - 1,
4030 .priority = DEF_PRIORITY,
4031 .may_writepage = 1,
4032 .may_unmap = 1,
4033 .may_swap = 1,
4034 .hibernation_mode = 1,
4035 };
4036 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
4037 unsigned long nr_reclaimed;
4038 unsigned int noreclaim_flag;
4039
4040 fs_reclaim_acquire(sc.gfp_mask);
4041 noreclaim_flag = memalloc_noreclaim_save();
4042 set_task_reclaim_state(current, &sc.reclaim_state);
4043
4044 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
4045
4046 set_task_reclaim_state(current, NULL);
4047 memalloc_noreclaim_restore(noreclaim_flag);
4048 fs_reclaim_release(sc.gfp_mask);
4049
4050 return nr_reclaimed;
4051}
4052#endif /* CONFIG_HIBERNATION */
4053
4054/* It's optimal to keep kswapds on the same CPUs as their memory, but
4055 not required for correctness. So if the last cpu in a node goes
4056 away, we get changed to run anywhere: as the first one comes back,
4057 restore their cpu bindings. */
4058static int kswapd_cpu_online(unsigned int cpu)
4059{
4060 int nid;
4061
4062 for_each_node_state(nid, N_MEMORY) {
4063 pg_data_t *pgdat = NODE_DATA(nid);
4064 const struct cpumask *mask;
4065
4066 mask = cpumask_of_node(pgdat->node_id);
4067
4068 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
4069 /* One of our CPUs online: restore mask */
4070 set_cpus_allowed_ptr(pgdat->kswapd, mask);
4071 }
4072 return 0;
4073}
4074
4075/*
4076 * This kswapd start function will be called by init and node-hot-add.
4077 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
4078 */
4079int kswapd_run(int nid)
4080{
4081 pg_data_t *pgdat = NODE_DATA(nid);
4082 int ret = 0;
4083
4084 if (pgdat->kswapd)
4085 return 0;
4086
4087 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
4088 if (IS_ERR(pgdat->kswapd)) {
4089 /* failure at boot is fatal */
4090 BUG_ON(system_state < SYSTEM_RUNNING);
4091 pr_err("Failed to start kswapd on node %d\n", nid);
4092 ret = PTR_ERR(pgdat->kswapd);
4093 pgdat->kswapd = NULL;
4094 }
4095 return ret;
4096}
4097
4098/*
4099 * Called by memory hotplug when all memory in a node is offlined. Caller must
4100 * hold mem_hotplug_begin/end().
4101 */
4102void kswapd_stop(int nid)
4103{
4104 struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
4105
4106 if (kswapd) {
4107 kthread_stop(kswapd);
4108 NODE_DATA(nid)->kswapd = NULL;
4109 }
4110}
4111
4112static int __init kswapd_init(void)
4113{
4114 int nid, ret;
4115
4116 swap_setup();
4117 for_each_node_state(nid, N_MEMORY)
4118 kswapd_run(nid);
4119 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
4120 "mm/vmscan:online", kswapd_cpu_online,
4121 NULL);
4122 WARN_ON(ret < 0);
4123 return 0;
4124}
4125
4126module_init(kswapd_init)
4127
4128#ifdef CONFIG_NUMA
4129/*
4130 * Node reclaim mode
4131 *
4132 * If non-zero call node_reclaim when the number of free pages falls below
4133 * the watermarks.
4134 */
4135int node_reclaim_mode __read_mostly;
4136
4137#define RECLAIM_OFF 0
4138#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
4139#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
4140#define RECLAIM_UNMAP (1<<2) /* Unmap pages during reclaim */
4141
4142/*
4143 * Priority for NODE_RECLAIM. This determines the fraction of pages
4144 * of a node considered for each zone_reclaim. 4 scans 1/16th of
4145 * a zone.
4146 */
4147#define NODE_RECLAIM_PRIORITY 4
4148
4149/*
4150 * Percentage of pages in a zone that must be unmapped for node_reclaim to
4151 * occur.
4152 */
4153int sysctl_min_unmapped_ratio = 1;
4154
4155/*
4156 * If the number of slab pages in a zone grows beyond this percentage then
4157 * slab reclaim needs to occur.
4158 */
4159int sysctl_min_slab_ratio = 5;
4160
4161static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat)
4162{
4163 unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED);
4164 unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) +
4165 node_page_state(pgdat, NR_ACTIVE_FILE);
4166
4167 /*
4168 * It's possible for there to be more file mapped pages than
4169 * accounted for by the pages on the file LRU lists because
4170 * tmpfs pages accounted for as ANON can also be FILE_MAPPED
4171 */
4172 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
4173}
4174
4175/* Work out how many page cache pages we can reclaim in this reclaim_mode */
4176static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat)
4177{
4178 unsigned long nr_pagecache_reclaimable;
4179 unsigned long delta = 0;
4180
4181 /*
4182 * If RECLAIM_UNMAP is set, then all file pages are considered
4183 * potentially reclaimable. Otherwise, we have to worry about
4184 * pages like swapcache and node_unmapped_file_pages() provides
4185 * a better estimate
4186 */
4187 if (node_reclaim_mode & RECLAIM_UNMAP)
4188 nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES);
4189 else
4190 nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat);
4191
4192 /* If we can't clean pages, remove dirty pages from consideration */
4193 if (!(node_reclaim_mode & RECLAIM_WRITE))
4194 delta += node_page_state(pgdat, NR_FILE_DIRTY);
4195
4196 /* Watch for any possible underflows due to delta */
4197 if (unlikely(delta > nr_pagecache_reclaimable))
4198 delta = nr_pagecache_reclaimable;
4199
4200 return nr_pagecache_reclaimable - delta;
4201}
4202
4203/*
4204 * Try to free up some pages from this node through reclaim.
4205 */
4206static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
4207{
4208 /* Minimum pages needed in order to stay on node */
4209 const unsigned long nr_pages = 1 << order;
4210 struct task_struct *p = current;
4211 unsigned int noreclaim_flag;
4212 struct scan_control sc = {
4213 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
4214 .gfp_mask = current_gfp_context(gfp_mask),
4215 .order = order,
4216 .priority = NODE_RECLAIM_PRIORITY,
4217 .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
4218 .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
4219 .may_swap = 1,
4220 .reclaim_idx = gfp_zone(gfp_mask),
4221 };
4222
4223 trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,
4224 sc.gfp_mask);
4225
4226 cond_resched();
4227 fs_reclaim_acquire(sc.gfp_mask);
4228 /*
4229 * We need to be able to allocate from the reserves for RECLAIM_UNMAP
4230 * and we also need to be able to write out pages for RECLAIM_WRITE
4231 * and RECLAIM_UNMAP.
4232 */
4233 noreclaim_flag = memalloc_noreclaim_save();
4234 p->flags |= PF_SWAPWRITE;
4235 set_task_reclaim_state(p, &sc.reclaim_state);
4236
4237 if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) {
4238 /*
4239 * Free memory by calling shrink node with increasing
4240 * priorities until we have enough memory freed.
4241 */
4242 do {
4243 shrink_node(pgdat, &sc);
4244 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
4245 }
4246
4247 set_task_reclaim_state(p, NULL);
4248 current->flags &= ~PF_SWAPWRITE;
4249 memalloc_noreclaim_restore(noreclaim_flag);
4250 fs_reclaim_release(sc.gfp_mask);
4251
4252 trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed);
4253
4254 return sc.nr_reclaimed >= nr_pages;
4255}
4256
4257int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
4258{
4259 int ret;
4260
4261 /*
4262 * Node reclaim reclaims unmapped file backed pages and
4263 * slab pages if we are over the defined limits.
4264 *
4265 * A small portion of unmapped file backed pages is needed for
4266 * file I/O otherwise pages read by file I/O will be immediately
4267 * thrown out if the node is overallocated. So we do not reclaim
4268 * if less than a specified percentage of the node is used by
4269 * unmapped file backed pages.
4270 */
4271 if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
4272 node_page_state(pgdat, NR_SLAB_RECLAIMABLE) <= pgdat->min_slab_pages)
4273 return NODE_RECLAIM_FULL;
4274
4275 /*
4276 * Do not scan if the allocation should not be delayed.
4277 */
4278 if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC))
4279 return NODE_RECLAIM_NOSCAN;
4280
4281 /*
4282 * Only run node reclaim on the local node or on nodes that do not
4283 * have associated processors. This will favor the local processor
4284 * over remote processors and spread off node memory allocations
4285 * as wide as possible.
4286 */
4287 if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id())
4288 return NODE_RECLAIM_NOSCAN;
4289
4290 if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
4291 return NODE_RECLAIM_NOSCAN;
4292
4293 ret = __node_reclaim(pgdat, gfp_mask, order);
4294 clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
4295
4296 if (!ret)
4297 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
4298
4299 return ret;
4300}
4301#endif
4302
4303/*
4304 * page_evictable - test whether a page is evictable
4305 * @page: the page to test
4306 *
4307 * Test whether page is evictable--i.e., should be placed on active/inactive
4308 * lists vs unevictable list.
4309 *
4310 * Reasons page might not be evictable:
4311 * (1) page's mapping marked unevictable
4312 * (2) page is part of an mlocked VMA
4313 *
4314 */
4315int page_evictable(struct page *page)
4316{
4317 int ret;
4318
4319 /* Prevent address_space of inode and swap cache from being freed */
4320 rcu_read_lock();
4321 ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
4322 rcu_read_unlock();
4323 return ret;
4324}
4325
4326/**
4327 * check_move_unevictable_pages - check pages for evictability and move to
4328 * appropriate zone lru list
4329 * @pvec: pagevec with lru pages to check
4330 *
4331 * Checks pages for evictability, if an evictable page is in the unevictable
4332 * lru list, moves it to the appropriate evictable lru list. This function
4333 * should be only used for lru pages.
4334 */
4335void check_move_unevictable_pages(struct pagevec *pvec)
4336{
4337 struct lruvec *lruvec;
4338 struct pglist_data *pgdat = NULL;
4339 int pgscanned = 0;
4340 int pgrescued = 0;
4341 int i;
4342
4343 for (i = 0; i < pvec->nr; i++) {
4344 struct page *page = pvec->pages[i];
4345 struct pglist_data *pagepgdat = page_pgdat(page);
4346
4347 pgscanned++;
4348 if (pagepgdat != pgdat) {
4349 if (pgdat)
4350 spin_unlock_irq(&pgdat->lru_lock);
4351 pgdat = pagepgdat;
4352 spin_lock_irq(&pgdat->lru_lock);
4353 }
4354 lruvec = mem_cgroup_page_lruvec(page, pgdat);
4355
4356 if (!PageLRU(page) || !PageUnevictable(page))
4357 continue;
4358
4359 if (page_evictable(page)) {
4360 enum lru_list lru = page_lru_base_type(page);
4361
4362 VM_BUG_ON_PAGE(PageActive(page), page);
4363 ClearPageUnevictable(page);
4364 del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
4365 add_page_to_lru_list(page, lruvec, lru);
4366 pgrescued++;
4367 }
4368 }
4369
4370 if (pgdat) {
4371 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
4372 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
4373 spin_unlock_irq(&pgdat->lru_lock);
4374 }
4375}
4376EXPORT_SYMBOL_GPL(check_move_unevictable_pages);