Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/mm/vmscan.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 *
7 * Swap reorganised 29.12.95, Stephen Tweedie.
8 * kswapd added: 7.1.96 sct
9 * Removed kswapd_ctl limits, and swap out as many pages as needed
10 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
11 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
12 * Multiqueue VM started 5.8.00, Rik van Riel.
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/mm.h>
18#include <linux/sched/mm.h>
19#include <linux/module.h>
20#include <linux/gfp.h>
21#include <linux/kernel_stat.h>
22#include <linux/swap.h>
23#include <linux/pagemap.h>
24#include <linux/init.h>
25#include <linux/highmem.h>
26#include <linux/vmpressure.h>
27#include <linux/vmstat.h>
28#include <linux/file.h>
29#include <linux/writeback.h>
30#include <linux/blkdev.h>
31#include <linux/buffer_head.h> /* for try_to_release_page(),
32 buffer_heads_over_limit */
33#include <linux/mm_inline.h>
34#include <linux/backing-dev.h>
35#include <linux/rmap.h>
36#include <linux/topology.h>
37#include <linux/cpu.h>
38#include <linux/cpuset.h>
39#include <linux/compaction.h>
40#include <linux/notifier.h>
41#include <linux/rwsem.h>
42#include <linux/delay.h>
43#include <linux/kthread.h>
44#include <linux/freezer.h>
45#include <linux/memcontrol.h>
46#include <linux/delayacct.h>
47#include <linux/sysctl.h>
48#include <linux/oom.h>
49#include <linux/pagevec.h>
50#include <linux/prefetch.h>
51#include <linux/printk.h>
52#include <linux/dax.h>
53#include <linux/psi.h>
54
55#include <asm/tlbflush.h>
56#include <asm/div64.h>
57
58#include <linux/swapops.h>
59#include <linux/balloon_compaction.h>
60
61#include "internal.h"
62
63#define CREATE_TRACE_POINTS
64#include <trace/events/vmscan.h>
65
66struct scan_control {
67 /* How many pages shrink_list() should reclaim */
68 unsigned long nr_to_reclaim;
69
70 /*
71 * Nodemask of nodes allowed by the caller. If NULL, all nodes
72 * are scanned.
73 */
74 nodemask_t *nodemask;
75
76 /*
77 * The memory cgroup that hit its limit and as a result is the
78 * primary target of this reclaim invocation.
79 */
80 struct mem_cgroup *target_mem_cgroup;
81
82 /*
83 * Scan pressure balancing between anon and file LRUs
84 */
85 unsigned long anon_cost;
86 unsigned long file_cost;
87
88 /* Can active pages be deactivated as part of reclaim? */
89#define DEACTIVATE_ANON 1
90#define DEACTIVATE_FILE 2
91 unsigned int may_deactivate:2;
92 unsigned int force_deactivate:1;
93 unsigned int skipped_deactivate:1;
94
95 /* Writepage batching in laptop mode; RECLAIM_WRITE */
96 unsigned int may_writepage:1;
97
98 /* Can mapped pages be reclaimed? */
99 unsigned int may_unmap:1;
100
101 /* Can pages be swapped as part of reclaim? */
102 unsigned int may_swap:1;
103
104 /*
105 * Cgroups are not reclaimed below their configured memory.low,
106 * unless we threaten to OOM. If any cgroups are skipped due to
107 * memory.low and nothing was reclaimed, go back for memory.low.
108 */
109 unsigned int memcg_low_reclaim:1;
110 unsigned int memcg_low_skipped:1;
111
112 unsigned int hibernation_mode:1;
113
114 /* One of the zones is ready for compaction */
115 unsigned int compaction_ready:1;
116
117 /* There is easily reclaimable cold cache in the current node */
118 unsigned int cache_trim_mode:1;
119
120 /* The file pages on the current node are dangerously low */
121 unsigned int file_is_tiny:1;
122
123 /* Allocation order */
124 s8 order;
125
126 /* Scan (total_size >> priority) pages at once */
127 s8 priority;
128
129 /* The highest zone to isolate pages for reclaim from */
130 s8 reclaim_idx;
131
132 /* This context's GFP mask */
133 gfp_t gfp_mask;
134
135 /* Incremented by the number of inactive pages that were scanned */
136 unsigned long nr_scanned;
137
138 /* Number of pages freed so far during a call to shrink_zones() */
139 unsigned long nr_reclaimed;
140
141 struct {
142 unsigned int dirty;
143 unsigned int unqueued_dirty;
144 unsigned int congested;
145 unsigned int writeback;
146 unsigned int immediate;
147 unsigned int file_taken;
148 unsigned int taken;
149 } nr;
150
151 /* for recording the reclaimed slab by now */
152 struct reclaim_state reclaim_state;
153};
154
155#ifdef ARCH_HAS_PREFETCHW
156#define prefetchw_prev_lru_page(_page, _base, _field) \
157 do { \
158 if ((_page)->lru.prev != _base) { \
159 struct page *prev; \
160 \
161 prev = lru_to_page(&(_page->lru)); \
162 prefetchw(&prev->_field); \
163 } \
164 } while (0)
165#else
166#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
167#endif
168
169/*
170 * From 0 .. 200. Higher means more swappy.
171 */
172int vm_swappiness = 60;
173
174static void set_task_reclaim_state(struct task_struct *task,
175 struct reclaim_state *rs)
176{
177 /* Check for an overwrite */
178 WARN_ON_ONCE(rs && task->reclaim_state);
179
180 /* Check for the nulling of an already-nulled member */
181 WARN_ON_ONCE(!rs && !task->reclaim_state);
182
183 task->reclaim_state = rs;
184}
185
186static LIST_HEAD(shrinker_list);
187static DECLARE_RWSEM(shrinker_rwsem);
188
189#ifdef CONFIG_MEMCG
190/*
191 * We allow subsystems to populate their shrinker-related
192 * LRU lists before register_shrinker_prepared() is called
193 * for the shrinker, since we don't want to impose
194 * restrictions on their internal registration order.
195 * In this case shrink_slab_memcg() may find corresponding
196 * bit is set in the shrinkers map.
197 *
198 * This value is used by the function to detect registering
199 * shrinkers and to skip do_shrink_slab() calls for them.
200 */
201#define SHRINKER_REGISTERING ((struct shrinker *)~0UL)
202
203static DEFINE_IDR(shrinker_idr);
204static int shrinker_nr_max;
205
206static int prealloc_memcg_shrinker(struct shrinker *shrinker)
207{
208 int id, ret = -ENOMEM;
209
210 down_write(&shrinker_rwsem);
211 /* This may call shrinker, so it must use down_read_trylock() */
212 id = idr_alloc(&shrinker_idr, SHRINKER_REGISTERING, 0, 0, GFP_KERNEL);
213 if (id < 0)
214 goto unlock;
215
216 if (id >= shrinker_nr_max) {
217 if (memcg_expand_shrinker_maps(id)) {
218 idr_remove(&shrinker_idr, id);
219 goto unlock;
220 }
221
222 shrinker_nr_max = id + 1;
223 }
224 shrinker->id = id;
225 ret = 0;
226unlock:
227 up_write(&shrinker_rwsem);
228 return ret;
229}
230
231static void unregister_memcg_shrinker(struct shrinker *shrinker)
232{
233 int id = shrinker->id;
234
235 BUG_ON(id < 0);
236
237 down_write(&shrinker_rwsem);
238 idr_remove(&shrinker_idr, id);
239 up_write(&shrinker_rwsem);
240}
241
242static bool cgroup_reclaim(struct scan_control *sc)
243{
244 return sc->target_mem_cgroup;
245}
246
247/**
248 * writeback_throttling_sane - is the usual dirty throttling mechanism available?
249 * @sc: scan_control in question
250 *
251 * The normal page dirty throttling mechanism in balance_dirty_pages() is
252 * completely broken with the legacy memcg and direct stalling in
253 * shrink_page_list() is used for throttling instead, which lacks all the
254 * niceties such as fairness, adaptive pausing, bandwidth proportional
255 * allocation and configurability.
256 *
257 * This function tests whether the vmscan currently in progress can assume
258 * that the normal dirty throttling mechanism is operational.
259 */
260static bool writeback_throttling_sane(struct scan_control *sc)
261{
262 if (!cgroup_reclaim(sc))
263 return true;
264#ifdef CONFIG_CGROUP_WRITEBACK
265 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
266 return true;
267#endif
268 return false;
269}
270#else
271static int prealloc_memcg_shrinker(struct shrinker *shrinker)
272{
273 return 0;
274}
275
276static void unregister_memcg_shrinker(struct shrinker *shrinker)
277{
278}
279
280static bool cgroup_reclaim(struct scan_control *sc)
281{
282 return false;
283}
284
285static bool writeback_throttling_sane(struct scan_control *sc)
286{
287 return true;
288}
289#endif
290
291/*
292 * This misses isolated pages which are not accounted for to save counters.
293 * As the data only determines if reclaim or compaction continues, it is
294 * not expected that isolated pages will be a dominating factor.
295 */
296unsigned long zone_reclaimable_pages(struct zone *zone)
297{
298 unsigned long nr;
299
300 nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) +
301 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE);
302 if (get_nr_swap_pages() > 0)
303 nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
304 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
305
306 return nr;
307}
308
309/**
310 * lruvec_lru_size - Returns the number of pages on the given LRU list.
311 * @lruvec: lru vector
312 * @lru: lru to use
313 * @zone_idx: zones to consider (use MAX_NR_ZONES for the whole LRU list)
314 */
315unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx)
316{
317 unsigned long size = 0;
318 int zid;
319
320 for (zid = 0; zid <= zone_idx && zid < MAX_NR_ZONES; zid++) {
321 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
322
323 if (!managed_zone(zone))
324 continue;
325
326 if (!mem_cgroup_disabled())
327 size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
328 else
329 size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru);
330 }
331 return size;
332}
333
334/*
335 * Add a shrinker callback to be called from the vm.
336 */
337int prealloc_shrinker(struct shrinker *shrinker)
338{
339 unsigned int size = sizeof(*shrinker->nr_deferred);
340
341 if (shrinker->flags & SHRINKER_NUMA_AWARE)
342 size *= nr_node_ids;
343
344 shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
345 if (!shrinker->nr_deferred)
346 return -ENOMEM;
347
348 if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
349 if (prealloc_memcg_shrinker(shrinker))
350 goto free_deferred;
351 }
352
353 return 0;
354
355free_deferred:
356 kfree(shrinker->nr_deferred);
357 shrinker->nr_deferred = NULL;
358 return -ENOMEM;
359}
360
361void free_prealloced_shrinker(struct shrinker *shrinker)
362{
363 if (!shrinker->nr_deferred)
364 return;
365
366 if (shrinker->flags & SHRINKER_MEMCG_AWARE)
367 unregister_memcg_shrinker(shrinker);
368
369 kfree(shrinker->nr_deferred);
370 shrinker->nr_deferred = NULL;
371}
372
373void register_shrinker_prepared(struct shrinker *shrinker)
374{
375 down_write(&shrinker_rwsem);
376 list_add_tail(&shrinker->list, &shrinker_list);
377#ifdef CONFIG_MEMCG
378 if (shrinker->flags & SHRINKER_MEMCG_AWARE)
379 idr_replace(&shrinker_idr, shrinker, shrinker->id);
380#endif
381 up_write(&shrinker_rwsem);
382}
383
384int register_shrinker(struct shrinker *shrinker)
385{
386 int err = prealloc_shrinker(shrinker);
387
388 if (err)
389 return err;
390 register_shrinker_prepared(shrinker);
391 return 0;
392}
393EXPORT_SYMBOL(register_shrinker);
394
395/*
396 * Remove one
397 */
398void unregister_shrinker(struct shrinker *shrinker)
399{
400 if (!shrinker->nr_deferred)
401 return;
402 if (shrinker->flags & SHRINKER_MEMCG_AWARE)
403 unregister_memcg_shrinker(shrinker);
404 down_write(&shrinker_rwsem);
405 list_del(&shrinker->list);
406 up_write(&shrinker_rwsem);
407 kfree(shrinker->nr_deferred);
408 shrinker->nr_deferred = NULL;
409}
410EXPORT_SYMBOL(unregister_shrinker);
411
412#define SHRINK_BATCH 128
413
414static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
415 struct shrinker *shrinker, int priority)
416{
417 unsigned long freed = 0;
418 unsigned long long delta;
419 long total_scan;
420 long freeable;
421 long nr;
422 long new_nr;
423 int nid = shrinkctl->nid;
424 long batch_size = shrinker->batch ? shrinker->batch
425 : SHRINK_BATCH;
426 long scanned = 0, next_deferred;
427
428 if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
429 nid = 0;
430
431 freeable = shrinker->count_objects(shrinker, shrinkctl);
432 if (freeable == 0 || freeable == SHRINK_EMPTY)
433 return freeable;
434
435 /*
436 * copy the current shrinker scan count into a local variable
437 * and zero it so that other concurrent shrinker invocations
438 * don't also do this scanning work.
439 */
440 nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
441
442 total_scan = nr;
443 if (shrinker->seeks) {
444 delta = freeable >> priority;
445 delta *= 4;
446 do_div(delta, shrinker->seeks);
447 } else {
448 /*
449 * These objects don't require any IO to create. Trim
450 * them aggressively under memory pressure to keep
451 * them from causing refetches in the IO caches.
452 */
453 delta = freeable / 2;
454 }
455
456 total_scan += delta;
457 if (total_scan < 0) {
458 pr_err("shrink_slab: %pS negative objects to delete nr=%ld\n",
459 shrinker->scan_objects, total_scan);
460 total_scan = freeable;
461 next_deferred = nr;
462 } else
463 next_deferred = total_scan;
464
465 /*
466 * We need to avoid excessive windup on filesystem shrinkers
467 * due to large numbers of GFP_NOFS allocations causing the
468 * shrinkers to return -1 all the time. This results in a large
469 * nr being built up so when a shrink that can do some work
470 * comes along it empties the entire cache due to nr >>>
471 * freeable. This is bad for sustaining a working set in
472 * memory.
473 *
474 * Hence only allow the shrinker to scan the entire cache when
475 * a large delta change is calculated directly.
476 */
477 if (delta < freeable / 4)
478 total_scan = min(total_scan, freeable / 2);
479
480 /*
481 * Avoid risking looping forever due to too large nr value:
482 * never try to free more than twice the estimate number of
483 * freeable entries.
484 */
485 if (total_scan > freeable * 2)
486 total_scan = freeable * 2;
487
488 trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
489 freeable, delta, total_scan, priority);
490
491 /*
492 * Normally, we should not scan less than batch_size objects in one
493 * pass to avoid too frequent shrinker calls, but if the slab has less
494 * than batch_size objects in total and we are really tight on memory,
495 * we will try to reclaim all available objects, otherwise we can end
496 * up failing allocations although there are plenty of reclaimable
497 * objects spread over several slabs with usage less than the
498 * batch_size.
499 *
500 * We detect the "tight on memory" situations by looking at the total
501 * number of objects we want to scan (total_scan). If it is greater
502 * than the total number of objects on slab (freeable), we must be
503 * scanning at high prio and therefore should try to reclaim as much as
504 * possible.
505 */
506 while (total_scan >= batch_size ||
507 total_scan >= freeable) {
508 unsigned long ret;
509 unsigned long nr_to_scan = min(batch_size, total_scan);
510
511 shrinkctl->nr_to_scan = nr_to_scan;
512 shrinkctl->nr_scanned = nr_to_scan;
513 ret = shrinker->scan_objects(shrinker, shrinkctl);
514 if (ret == SHRINK_STOP)
515 break;
516 freed += ret;
517
518 count_vm_events(SLABS_SCANNED, shrinkctl->nr_scanned);
519 total_scan -= shrinkctl->nr_scanned;
520 scanned += shrinkctl->nr_scanned;
521
522 cond_resched();
523 }
524
525 if (next_deferred >= scanned)
526 next_deferred -= scanned;
527 else
528 next_deferred = 0;
529 /*
530 * move the unused scan count back into the shrinker in a
531 * manner that handles concurrent updates. If we exhausted the
532 * scan, there is no need to do an update.
533 */
534 if (next_deferred > 0)
535 new_nr = atomic_long_add_return(next_deferred,
536 &shrinker->nr_deferred[nid]);
537 else
538 new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
539
540 trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan);
541 return freed;
542}
543
544#ifdef CONFIG_MEMCG
545static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
546 struct mem_cgroup *memcg, int priority)
547{
548 struct memcg_shrinker_map *map;
549 unsigned long ret, freed = 0;
550 int i;
551
552 if (!mem_cgroup_online(memcg))
553 return 0;
554
555 if (!down_read_trylock(&shrinker_rwsem))
556 return 0;
557
558 map = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_map,
559 true);
560 if (unlikely(!map))
561 goto unlock;
562
563 for_each_set_bit(i, map->map, shrinker_nr_max) {
564 struct shrink_control sc = {
565 .gfp_mask = gfp_mask,
566 .nid = nid,
567 .memcg = memcg,
568 };
569 struct shrinker *shrinker;
570
571 shrinker = idr_find(&shrinker_idr, i);
572 if (unlikely(!shrinker || shrinker == SHRINKER_REGISTERING)) {
573 if (!shrinker)
574 clear_bit(i, map->map);
575 continue;
576 }
577
578 /* Call non-slab shrinkers even though kmem is disabled */
579 if (!memcg_kmem_enabled() &&
580 !(shrinker->flags & SHRINKER_NONSLAB))
581 continue;
582
583 ret = do_shrink_slab(&sc, shrinker, priority);
584 if (ret == SHRINK_EMPTY) {
585 clear_bit(i, map->map);
586 /*
587 * After the shrinker reported that it had no objects to
588 * free, but before we cleared the corresponding bit in
589 * the memcg shrinker map, a new object might have been
590 * added. To make sure, we have the bit set in this
591 * case, we invoke the shrinker one more time and reset
592 * the bit if it reports that it is not empty anymore.
593 * The memory barrier here pairs with the barrier in
594 * memcg_set_shrinker_bit():
595 *
596 * list_lru_add() shrink_slab_memcg()
597 * list_add_tail() clear_bit()
598 * <MB> <MB>
599 * set_bit() do_shrink_slab()
600 */
601 smp_mb__after_atomic();
602 ret = do_shrink_slab(&sc, shrinker, priority);
603 if (ret == SHRINK_EMPTY)
604 ret = 0;
605 else
606 memcg_set_shrinker_bit(memcg, nid, i);
607 }
608 freed += ret;
609
610 if (rwsem_is_contended(&shrinker_rwsem)) {
611 freed = freed ? : 1;
612 break;
613 }
614 }
615unlock:
616 up_read(&shrinker_rwsem);
617 return freed;
618}
619#else /* CONFIG_MEMCG */
620static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
621 struct mem_cgroup *memcg, int priority)
622{
623 return 0;
624}
625#endif /* CONFIG_MEMCG */
626
627/**
628 * shrink_slab - shrink slab caches
629 * @gfp_mask: allocation context
630 * @nid: node whose slab caches to target
631 * @memcg: memory cgroup whose slab caches to target
632 * @priority: the reclaim priority
633 *
634 * Call the shrink functions to age shrinkable caches.
635 *
636 * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set,
637 * unaware shrinkers will receive a node id of 0 instead.
638 *
639 * @memcg specifies the memory cgroup to target. Unaware shrinkers
640 * are called only if it is the root cgroup.
641 *
642 * @priority is sc->priority, we take the number of objects and >> by priority
643 * in order to get the scan target.
644 *
645 * Returns the number of reclaimed slab objects.
646 */
647static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
648 struct mem_cgroup *memcg,
649 int priority)
650{
651 unsigned long ret, freed = 0;
652 struct shrinker *shrinker;
653
654 /*
655 * The root memcg might be allocated even though memcg is disabled
656 * via "cgroup_disable=memory" boot parameter. This could make
657 * mem_cgroup_is_root() return false, then just run memcg slab
658 * shrink, but skip global shrink. This may result in premature
659 * oom.
660 */
661 if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
662 return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
663
664 if (!down_read_trylock(&shrinker_rwsem))
665 goto out;
666
667 list_for_each_entry(shrinker, &shrinker_list, list) {
668 struct shrink_control sc = {
669 .gfp_mask = gfp_mask,
670 .nid = nid,
671 .memcg = memcg,
672 };
673
674 ret = do_shrink_slab(&sc, shrinker, priority);
675 if (ret == SHRINK_EMPTY)
676 ret = 0;
677 freed += ret;
678 /*
679 * Bail out if someone want to register a new shrinker to
680 * prevent the registration from being stalled for long periods
681 * by parallel ongoing shrinking.
682 */
683 if (rwsem_is_contended(&shrinker_rwsem)) {
684 freed = freed ? : 1;
685 break;
686 }
687 }
688
689 up_read(&shrinker_rwsem);
690out:
691 cond_resched();
692 return freed;
693}
694
695void drop_slab_node(int nid)
696{
697 unsigned long freed;
698
699 do {
700 struct mem_cgroup *memcg = NULL;
701
702 freed = 0;
703 memcg = mem_cgroup_iter(NULL, NULL, NULL);
704 do {
705 freed += shrink_slab(GFP_KERNEL, nid, memcg, 0);
706 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
707 } while (freed > 10);
708}
709
710void drop_slab(void)
711{
712 int nid;
713
714 for_each_online_node(nid)
715 drop_slab_node(nid);
716}
717
718static inline int is_page_cache_freeable(struct page *page)
719{
720 /*
721 * A freeable page cache page is referenced only by the caller
722 * that isolated the page, the page cache and optional buffer
723 * heads at page->private.
724 */
725 int page_cache_pins = PageTransHuge(page) && PageSwapCache(page) ?
726 HPAGE_PMD_NR : 1;
727 return page_count(page) - page_has_private(page) == 1 + page_cache_pins;
728}
729
730static int may_write_to_inode(struct inode *inode)
731{
732 if (current->flags & PF_SWAPWRITE)
733 return 1;
734 if (!inode_write_congested(inode))
735 return 1;
736 if (inode_to_bdi(inode) == current->backing_dev_info)
737 return 1;
738 return 0;
739}
740
741/*
742 * We detected a synchronous write error writing a page out. Probably
743 * -ENOSPC. We need to propagate that into the address_space for a subsequent
744 * fsync(), msync() or close().
745 *
746 * The tricky part is that after writepage we cannot touch the mapping: nothing
747 * prevents it from being freed up. But we have a ref on the page and once
748 * that page is locked, the mapping is pinned.
749 *
750 * We're allowed to run sleeping lock_page() here because we know the caller has
751 * __GFP_FS.
752 */
753static void handle_write_error(struct address_space *mapping,
754 struct page *page, int error)
755{
756 lock_page(page);
757 if (page_mapping(page) == mapping)
758 mapping_set_error(mapping, error);
759 unlock_page(page);
760}
761
762/* possible outcome of pageout() */
763typedef enum {
764 /* failed to write page out, page is locked */
765 PAGE_KEEP,
766 /* move page to the active list, page is locked */
767 PAGE_ACTIVATE,
768 /* page has been sent to the disk successfully, page is unlocked */
769 PAGE_SUCCESS,
770 /* page is clean and locked */
771 PAGE_CLEAN,
772} pageout_t;
773
774/*
775 * pageout is called by shrink_page_list() for each dirty page.
776 * Calls ->writepage().
777 */
778static pageout_t pageout(struct page *page, struct address_space *mapping)
779{
780 /*
781 * If the page is dirty, only perform writeback if that write
782 * will be non-blocking. To prevent this allocation from being
783 * stalled by pagecache activity. But note that there may be
784 * stalls if we need to run get_block(). We could test
785 * PagePrivate for that.
786 *
787 * If this process is currently in __generic_file_write_iter() against
788 * this page's queue, we can perform writeback even if that
789 * will block.
790 *
791 * If the page is swapcache, write it back even if that would
792 * block, for some throttling. This happens by accident, because
793 * swap_backing_dev_info is bust: it doesn't reflect the
794 * congestion state of the swapdevs. Easy to fix, if needed.
795 */
796 if (!is_page_cache_freeable(page))
797 return PAGE_KEEP;
798 if (!mapping) {
799 /*
800 * Some data journaling orphaned pages can have
801 * page->mapping == NULL while being dirty with clean buffers.
802 */
803 if (page_has_private(page)) {
804 if (try_to_free_buffers(page)) {
805 ClearPageDirty(page);
806 pr_info("%s: orphaned page\n", __func__);
807 return PAGE_CLEAN;
808 }
809 }
810 return PAGE_KEEP;
811 }
812 if (mapping->a_ops->writepage == NULL)
813 return PAGE_ACTIVATE;
814 if (!may_write_to_inode(mapping->host))
815 return PAGE_KEEP;
816
817 if (clear_page_dirty_for_io(page)) {
818 int res;
819 struct writeback_control wbc = {
820 .sync_mode = WB_SYNC_NONE,
821 .nr_to_write = SWAP_CLUSTER_MAX,
822 .range_start = 0,
823 .range_end = LLONG_MAX,
824 .for_reclaim = 1,
825 };
826
827 SetPageReclaim(page);
828 res = mapping->a_ops->writepage(page, &wbc);
829 if (res < 0)
830 handle_write_error(mapping, page, res);
831 if (res == AOP_WRITEPAGE_ACTIVATE) {
832 ClearPageReclaim(page);
833 return PAGE_ACTIVATE;
834 }
835
836 if (!PageWriteback(page)) {
837 /* synchronous write or broken a_ops? */
838 ClearPageReclaim(page);
839 }
840 trace_mm_vmscan_writepage(page);
841 inc_node_page_state(page, NR_VMSCAN_WRITE);
842 return PAGE_SUCCESS;
843 }
844
845 return PAGE_CLEAN;
846}
847
848/*
849 * Same as remove_mapping, but if the page is removed from the mapping, it
850 * gets returned with a refcount of 0.
851 */
852static int __remove_mapping(struct address_space *mapping, struct page *page,
853 bool reclaimed, struct mem_cgroup *target_memcg)
854{
855 unsigned long flags;
856 int refcount;
857 void *shadow = NULL;
858
859 BUG_ON(!PageLocked(page));
860 BUG_ON(mapping != page_mapping(page));
861
862 xa_lock_irqsave(&mapping->i_pages, flags);
863 /*
864 * The non racy check for a busy page.
865 *
866 * Must be careful with the order of the tests. When someone has
867 * a ref to the page, it may be possible that they dirty it then
868 * drop the reference. So if PageDirty is tested before page_count
869 * here, then the following race may occur:
870 *
871 * get_user_pages(&page);
872 * [user mapping goes away]
873 * write_to(page);
874 * !PageDirty(page) [good]
875 * SetPageDirty(page);
876 * put_page(page);
877 * !page_count(page) [good, discard it]
878 *
879 * [oops, our write_to data is lost]
880 *
881 * Reversing the order of the tests ensures such a situation cannot
882 * escape unnoticed. The smp_rmb is needed to ensure the page->flags
883 * load is not satisfied before that of page->_refcount.
884 *
885 * Note that if SetPageDirty is always performed via set_page_dirty,
886 * and thus under the i_pages lock, then this ordering is not required.
887 */
888 refcount = 1 + compound_nr(page);
889 if (!page_ref_freeze(page, refcount))
890 goto cannot_free;
891 /* note: atomic_cmpxchg in page_ref_freeze provides the smp_rmb */
892 if (unlikely(PageDirty(page))) {
893 page_ref_unfreeze(page, refcount);
894 goto cannot_free;
895 }
896
897 if (PageSwapCache(page)) {
898 swp_entry_t swap = { .val = page_private(page) };
899 mem_cgroup_swapout(page, swap);
900 if (reclaimed && !mapping_exiting(mapping))
901 shadow = workingset_eviction(page, target_memcg);
902 __delete_from_swap_cache(page, swap, shadow);
903 xa_unlock_irqrestore(&mapping->i_pages, flags);
904 put_swap_page(page, swap);
905 } else {
906 void (*freepage)(struct page *);
907
908 freepage = mapping->a_ops->freepage;
909 /*
910 * Remember a shadow entry for reclaimed file cache in
911 * order to detect refaults, thus thrashing, later on.
912 *
913 * But don't store shadows in an address space that is
914 * already exiting. This is not just an optimization,
915 * inode reclaim needs to empty out the radix tree or
916 * the nodes are lost. Don't plant shadows behind its
917 * back.
918 *
919 * We also don't store shadows for DAX mappings because the
920 * only page cache pages found in these are zero pages
921 * covering holes, and because we don't want to mix DAX
922 * exceptional entries and shadow exceptional entries in the
923 * same address_space.
924 */
925 if (reclaimed && page_is_file_lru(page) &&
926 !mapping_exiting(mapping) && !dax_mapping(mapping))
927 shadow = workingset_eviction(page, target_memcg);
928 __delete_from_page_cache(page, shadow);
929 xa_unlock_irqrestore(&mapping->i_pages, flags);
930
931 if (freepage != NULL)
932 freepage(page);
933 }
934
935 return 1;
936
937cannot_free:
938 xa_unlock_irqrestore(&mapping->i_pages, flags);
939 return 0;
940}
941
942/*
943 * Attempt to detach a locked page from its ->mapping. If it is dirty or if
944 * someone else has a ref on the page, abort and return 0. If it was
945 * successfully detached, return 1. Assumes the caller has a single ref on
946 * this page.
947 */
948int remove_mapping(struct address_space *mapping, struct page *page)
949{
950 if (__remove_mapping(mapping, page, false, NULL)) {
951 /*
952 * Unfreezing the refcount with 1 rather than 2 effectively
953 * drops the pagecache ref for us without requiring another
954 * atomic operation.
955 */
956 page_ref_unfreeze(page, 1);
957 return 1;
958 }
959 return 0;
960}
961
962/**
963 * putback_lru_page - put previously isolated page onto appropriate LRU list
964 * @page: page to be put back to appropriate lru list
965 *
966 * Add previously isolated @page to appropriate LRU list.
967 * Page may still be unevictable for other reasons.
968 *
969 * lru_lock must not be held, interrupts must be enabled.
970 */
971void putback_lru_page(struct page *page)
972{
973 lru_cache_add(page);
974 put_page(page); /* drop ref from isolate */
975}
976
977enum page_references {
978 PAGEREF_RECLAIM,
979 PAGEREF_RECLAIM_CLEAN,
980 PAGEREF_KEEP,
981 PAGEREF_ACTIVATE,
982};
983
984static enum page_references page_check_references(struct page *page,
985 struct scan_control *sc)
986{
987 int referenced_ptes, referenced_page;
988 unsigned long vm_flags;
989
990 referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
991 &vm_flags);
992 referenced_page = TestClearPageReferenced(page);
993
994 /*
995 * Mlock lost the isolation race with us. Let try_to_unmap()
996 * move the page to the unevictable list.
997 */
998 if (vm_flags & VM_LOCKED)
999 return PAGEREF_RECLAIM;
1000
1001 if (referenced_ptes) {
1002 /*
1003 * All mapped pages start out with page table
1004 * references from the instantiating fault, so we need
1005 * to look twice if a mapped file page is used more
1006 * than once.
1007 *
1008 * Mark it and spare it for another trip around the
1009 * inactive list. Another page table reference will
1010 * lead to its activation.
1011 *
1012 * Note: the mark is set for activated pages as well
1013 * so that recently deactivated but used pages are
1014 * quickly recovered.
1015 */
1016 SetPageReferenced(page);
1017
1018 if (referenced_page || referenced_ptes > 1)
1019 return PAGEREF_ACTIVATE;
1020
1021 /*
1022 * Activate file-backed executable pages after first usage.
1023 */
1024 if ((vm_flags & VM_EXEC) && !PageSwapBacked(page))
1025 return PAGEREF_ACTIVATE;
1026
1027 return PAGEREF_KEEP;
1028 }
1029
1030 /* Reclaim if clean, defer dirty pages to writeback */
1031 if (referenced_page && !PageSwapBacked(page))
1032 return PAGEREF_RECLAIM_CLEAN;
1033
1034 return PAGEREF_RECLAIM;
1035}
1036
1037/* Check if a page is dirty or under writeback */
1038static void page_check_dirty_writeback(struct page *page,
1039 bool *dirty, bool *writeback)
1040{
1041 struct address_space *mapping;
1042
1043 /*
1044 * Anonymous pages are not handled by flushers and must be written
1045 * from reclaim context. Do not stall reclaim based on them
1046 */
1047 if (!page_is_file_lru(page) ||
1048 (PageAnon(page) && !PageSwapBacked(page))) {
1049 *dirty = false;
1050 *writeback = false;
1051 return;
1052 }
1053
1054 /* By default assume that the page flags are accurate */
1055 *dirty = PageDirty(page);
1056 *writeback = PageWriteback(page);
1057
1058 /* Verify dirty/writeback state if the filesystem supports it */
1059 if (!page_has_private(page))
1060 return;
1061
1062 mapping = page_mapping(page);
1063 if (mapping && mapping->a_ops->is_dirty_writeback)
1064 mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
1065}
1066
1067/*
1068 * shrink_page_list() returns the number of reclaimed pages
1069 */
1070static unsigned int shrink_page_list(struct list_head *page_list,
1071 struct pglist_data *pgdat,
1072 struct scan_control *sc,
1073 enum ttu_flags ttu_flags,
1074 struct reclaim_stat *stat,
1075 bool ignore_references)
1076{
1077 LIST_HEAD(ret_pages);
1078 LIST_HEAD(free_pages);
1079 unsigned int nr_reclaimed = 0;
1080 unsigned int pgactivate = 0;
1081
1082 memset(stat, 0, sizeof(*stat));
1083 cond_resched();
1084
1085 while (!list_empty(page_list)) {
1086 struct address_space *mapping;
1087 struct page *page;
1088 enum page_references references = PAGEREF_RECLAIM;
1089 bool dirty, writeback, may_enter_fs;
1090 unsigned int nr_pages;
1091
1092 cond_resched();
1093
1094 page = lru_to_page(page_list);
1095 list_del(&page->lru);
1096
1097 if (!trylock_page(page))
1098 goto keep;
1099
1100 VM_BUG_ON_PAGE(PageActive(page), page);
1101
1102 nr_pages = compound_nr(page);
1103
1104 /* Account the number of base pages even though THP */
1105 sc->nr_scanned += nr_pages;
1106
1107 if (unlikely(!page_evictable(page)))
1108 goto activate_locked;
1109
1110 if (!sc->may_unmap && page_mapped(page))
1111 goto keep_locked;
1112
1113 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
1114 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
1115
1116 /*
1117 * The number of dirty pages determines if a node is marked
1118 * reclaim_congested which affects wait_iff_congested. kswapd
1119 * will stall and start writing pages if the tail of the LRU
1120 * is all dirty unqueued pages.
1121 */
1122 page_check_dirty_writeback(page, &dirty, &writeback);
1123 if (dirty || writeback)
1124 stat->nr_dirty++;
1125
1126 if (dirty && !writeback)
1127 stat->nr_unqueued_dirty++;
1128
1129 /*
1130 * Treat this page as congested if the underlying BDI is or if
1131 * pages are cycling through the LRU so quickly that the
1132 * pages marked for immediate reclaim are making it to the
1133 * end of the LRU a second time.
1134 */
1135 mapping = page_mapping(page);
1136 if (((dirty || writeback) && mapping &&
1137 inode_write_congested(mapping->host)) ||
1138 (writeback && PageReclaim(page)))
1139 stat->nr_congested++;
1140
1141 /*
1142 * If a page at the tail of the LRU is under writeback, there
1143 * are three cases to consider.
1144 *
1145 * 1) If reclaim is encountering an excessive number of pages
1146 * under writeback and this page is both under writeback and
1147 * PageReclaim then it indicates that pages are being queued
1148 * for IO but are being recycled through the LRU before the
1149 * IO can complete. Waiting on the page itself risks an
1150 * indefinite stall if it is impossible to writeback the
1151 * page due to IO error or disconnected storage so instead
1152 * note that the LRU is being scanned too quickly and the
1153 * caller can stall after page list has been processed.
1154 *
1155 * 2) Global or new memcg reclaim encounters a page that is
1156 * not marked for immediate reclaim, or the caller does not
1157 * have __GFP_FS (or __GFP_IO if it's simply going to swap,
1158 * not to fs). In this case mark the page for immediate
1159 * reclaim and continue scanning.
1160 *
1161 * Require may_enter_fs because we would wait on fs, which
1162 * may not have submitted IO yet. And the loop driver might
1163 * enter reclaim, and deadlock if it waits on a page for
1164 * which it is needed to do the write (loop masks off
1165 * __GFP_IO|__GFP_FS for this reason); but more thought
1166 * would probably show more reasons.
1167 *
1168 * 3) Legacy memcg encounters a page that is already marked
1169 * PageReclaim. memcg does not have any dirty pages
1170 * throttling so we could easily OOM just because too many
1171 * pages are in writeback and there is nothing else to
1172 * reclaim. Wait for the writeback to complete.
1173 *
1174 * In cases 1) and 2) we activate the pages to get them out of
1175 * the way while we continue scanning for clean pages on the
1176 * inactive list and refilling from the active list. The
1177 * observation here is that waiting for disk writes is more
1178 * expensive than potentially causing reloads down the line.
1179 * Since they're marked for immediate reclaim, they won't put
1180 * memory pressure on the cache working set any longer than it
1181 * takes to write them to disk.
1182 */
1183 if (PageWriteback(page)) {
1184 /* Case 1 above */
1185 if (current_is_kswapd() &&
1186 PageReclaim(page) &&
1187 test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
1188 stat->nr_immediate++;
1189 goto activate_locked;
1190
1191 /* Case 2 above */
1192 } else if (writeback_throttling_sane(sc) ||
1193 !PageReclaim(page) || !may_enter_fs) {
1194 /*
1195 * This is slightly racy - end_page_writeback()
1196 * might have just cleared PageReclaim, then
1197 * setting PageReclaim here end up interpreted
1198 * as PageReadahead - but that does not matter
1199 * enough to care. What we do want is for this
1200 * page to have PageReclaim set next time memcg
1201 * reclaim reaches the tests above, so it will
1202 * then wait_on_page_writeback() to avoid OOM;
1203 * and it's also appropriate in global reclaim.
1204 */
1205 SetPageReclaim(page);
1206 stat->nr_writeback++;
1207 goto activate_locked;
1208
1209 /* Case 3 above */
1210 } else {
1211 unlock_page(page);
1212 wait_on_page_writeback(page);
1213 /* then go back and try same page again */
1214 list_add_tail(&page->lru, page_list);
1215 continue;
1216 }
1217 }
1218
1219 if (!ignore_references)
1220 references = page_check_references(page, sc);
1221
1222 switch (references) {
1223 case PAGEREF_ACTIVATE:
1224 goto activate_locked;
1225 case PAGEREF_KEEP:
1226 stat->nr_ref_keep += nr_pages;
1227 goto keep_locked;
1228 case PAGEREF_RECLAIM:
1229 case PAGEREF_RECLAIM_CLEAN:
1230 ; /* try to reclaim the page below */
1231 }
1232
1233 /*
1234 * Anonymous process memory has backing store?
1235 * Try to allocate it some swap space here.
1236 * Lazyfree page could be freed directly
1237 */
1238 if (PageAnon(page) && PageSwapBacked(page)) {
1239 if (!PageSwapCache(page)) {
1240 if (!(sc->gfp_mask & __GFP_IO))
1241 goto keep_locked;
1242 if (PageTransHuge(page)) {
1243 /* cannot split THP, skip it */
1244 if (!can_split_huge_page(page, NULL))
1245 goto activate_locked;
1246 /*
1247 * Split pages without a PMD map right
1248 * away. Chances are some or all of the
1249 * tail pages can be freed without IO.
1250 */
1251 if (!compound_mapcount(page) &&
1252 split_huge_page_to_list(page,
1253 page_list))
1254 goto activate_locked;
1255 }
1256 if (!add_to_swap(page)) {
1257 if (!PageTransHuge(page))
1258 goto activate_locked_split;
1259 /* Fallback to swap normal pages */
1260 if (split_huge_page_to_list(page,
1261 page_list))
1262 goto activate_locked;
1263#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1264 count_vm_event(THP_SWPOUT_FALLBACK);
1265#endif
1266 if (!add_to_swap(page))
1267 goto activate_locked_split;
1268 }
1269
1270 may_enter_fs = true;
1271
1272 /* Adding to swap updated mapping */
1273 mapping = page_mapping(page);
1274 }
1275 } else if (unlikely(PageTransHuge(page))) {
1276 /* Split file THP */
1277 if (split_huge_page_to_list(page, page_list))
1278 goto keep_locked;
1279 }
1280
1281 /*
1282 * THP may get split above, need minus tail pages and update
1283 * nr_pages to avoid accounting tail pages twice.
1284 *
1285 * The tail pages that are added into swap cache successfully
1286 * reach here.
1287 */
1288 if ((nr_pages > 1) && !PageTransHuge(page)) {
1289 sc->nr_scanned -= (nr_pages - 1);
1290 nr_pages = 1;
1291 }
1292
1293 /*
1294 * The page is mapped into the page tables of one or more
1295 * processes. Try to unmap it here.
1296 */
1297 if (page_mapped(page)) {
1298 enum ttu_flags flags = ttu_flags | TTU_BATCH_FLUSH;
1299 bool was_swapbacked = PageSwapBacked(page);
1300
1301 if (unlikely(PageTransHuge(page)))
1302 flags |= TTU_SPLIT_HUGE_PMD;
1303
1304 if (!try_to_unmap(page, flags)) {
1305 stat->nr_unmap_fail += nr_pages;
1306 if (!was_swapbacked && PageSwapBacked(page))
1307 stat->nr_lazyfree_fail += nr_pages;
1308 goto activate_locked;
1309 }
1310 }
1311
1312 if (PageDirty(page)) {
1313 /*
1314 * Only kswapd can writeback filesystem pages
1315 * to avoid risk of stack overflow. But avoid
1316 * injecting inefficient single-page IO into
1317 * flusher writeback as much as possible: only
1318 * write pages when we've encountered many
1319 * dirty pages, and when we've already scanned
1320 * the rest of the LRU for clean pages and see
1321 * the same dirty pages again (PageReclaim).
1322 */
1323 if (page_is_file_lru(page) &&
1324 (!current_is_kswapd() || !PageReclaim(page) ||
1325 !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
1326 /*
1327 * Immediately reclaim when written back.
1328 * Similar in principal to deactivate_page()
1329 * except we already have the page isolated
1330 * and know it's dirty
1331 */
1332 inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);
1333 SetPageReclaim(page);
1334
1335 goto activate_locked;
1336 }
1337
1338 if (references == PAGEREF_RECLAIM_CLEAN)
1339 goto keep_locked;
1340 if (!may_enter_fs)
1341 goto keep_locked;
1342 if (!sc->may_writepage)
1343 goto keep_locked;
1344
1345 /*
1346 * Page is dirty. Flush the TLB if a writable entry
1347 * potentially exists to avoid CPU writes after IO
1348 * starts and then write it out here.
1349 */
1350 try_to_unmap_flush_dirty();
1351 switch (pageout(page, mapping)) {
1352 case PAGE_KEEP:
1353 goto keep_locked;
1354 case PAGE_ACTIVATE:
1355 goto activate_locked;
1356 case PAGE_SUCCESS:
1357 stat->nr_pageout += thp_nr_pages(page);
1358
1359 if (PageWriteback(page))
1360 goto keep;
1361 if (PageDirty(page))
1362 goto keep;
1363
1364 /*
1365 * A synchronous write - probably a ramdisk. Go
1366 * ahead and try to reclaim the page.
1367 */
1368 if (!trylock_page(page))
1369 goto keep;
1370 if (PageDirty(page) || PageWriteback(page))
1371 goto keep_locked;
1372 mapping = page_mapping(page);
1373 case PAGE_CLEAN:
1374 ; /* try to free the page below */
1375 }
1376 }
1377
1378 /*
1379 * If the page has buffers, try to free the buffer mappings
1380 * associated with this page. If we succeed we try to free
1381 * the page as well.
1382 *
1383 * We do this even if the page is PageDirty().
1384 * try_to_release_page() does not perform I/O, but it is
1385 * possible for a page to have PageDirty set, but it is actually
1386 * clean (all its buffers are clean). This happens if the
1387 * buffers were written out directly, with submit_bh(). ext3
1388 * will do this, as well as the blockdev mapping.
1389 * try_to_release_page() will discover that cleanness and will
1390 * drop the buffers and mark the page clean - it can be freed.
1391 *
1392 * Rarely, pages can have buffers and no ->mapping. These are
1393 * the pages which were not successfully invalidated in
1394 * truncate_complete_page(). We try to drop those buffers here
1395 * and if that worked, and the page is no longer mapped into
1396 * process address space (page_count == 1) it can be freed.
1397 * Otherwise, leave the page on the LRU so it is swappable.
1398 */
1399 if (page_has_private(page)) {
1400 if (!try_to_release_page(page, sc->gfp_mask))
1401 goto activate_locked;
1402 if (!mapping && page_count(page) == 1) {
1403 unlock_page(page);
1404 if (put_page_testzero(page))
1405 goto free_it;
1406 else {
1407 /*
1408 * rare race with speculative reference.
1409 * the speculative reference will free
1410 * this page shortly, so we may
1411 * increment nr_reclaimed here (and
1412 * leave it off the LRU).
1413 */
1414 nr_reclaimed++;
1415 continue;
1416 }
1417 }
1418 }
1419
1420 if (PageAnon(page) && !PageSwapBacked(page)) {
1421 /* follow __remove_mapping for reference */
1422 if (!page_ref_freeze(page, 1))
1423 goto keep_locked;
1424 if (PageDirty(page)) {
1425 page_ref_unfreeze(page, 1);
1426 goto keep_locked;
1427 }
1428
1429 count_vm_event(PGLAZYFREED);
1430 count_memcg_page_event(page, PGLAZYFREED);
1431 } else if (!mapping || !__remove_mapping(mapping, page, true,
1432 sc->target_mem_cgroup))
1433 goto keep_locked;
1434
1435 unlock_page(page);
1436free_it:
1437 /*
1438 * THP may get swapped out in a whole, need account
1439 * all base pages.
1440 */
1441 nr_reclaimed += nr_pages;
1442
1443 /*
1444 * Is there need to periodically free_page_list? It would
1445 * appear not as the counts should be low
1446 */
1447 if (unlikely(PageTransHuge(page)))
1448 destroy_compound_page(page);
1449 else
1450 list_add(&page->lru, &free_pages);
1451 continue;
1452
1453activate_locked_split:
1454 /*
1455 * The tail pages that are failed to add into swap cache
1456 * reach here. Fixup nr_scanned and nr_pages.
1457 */
1458 if (nr_pages > 1) {
1459 sc->nr_scanned -= (nr_pages - 1);
1460 nr_pages = 1;
1461 }
1462activate_locked:
1463 /* Not a candidate for swapping, so reclaim swap space. */
1464 if (PageSwapCache(page) && (mem_cgroup_swap_full(page) ||
1465 PageMlocked(page)))
1466 try_to_free_swap(page);
1467 VM_BUG_ON_PAGE(PageActive(page), page);
1468 if (!PageMlocked(page)) {
1469 int type = page_is_file_lru(page);
1470 SetPageActive(page);
1471 stat->nr_activate[type] += nr_pages;
1472 count_memcg_page_event(page, PGACTIVATE);
1473 }
1474keep_locked:
1475 unlock_page(page);
1476keep:
1477 list_add(&page->lru, &ret_pages);
1478 VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
1479 }
1480
1481 pgactivate = stat->nr_activate[0] + stat->nr_activate[1];
1482
1483 mem_cgroup_uncharge_list(&free_pages);
1484 try_to_unmap_flush();
1485 free_unref_page_list(&free_pages);
1486
1487 list_splice(&ret_pages, page_list);
1488 count_vm_events(PGACTIVATE, pgactivate);
1489
1490 return nr_reclaimed;
1491}
1492
1493unsigned int reclaim_clean_pages_from_list(struct zone *zone,
1494 struct list_head *page_list)
1495{
1496 struct scan_control sc = {
1497 .gfp_mask = GFP_KERNEL,
1498 .priority = DEF_PRIORITY,
1499 .may_unmap = 1,
1500 };
1501 struct reclaim_stat stat;
1502 unsigned int nr_reclaimed;
1503 struct page *page, *next;
1504 LIST_HEAD(clean_pages);
1505
1506 list_for_each_entry_safe(page, next, page_list, lru) {
1507 if (page_is_file_lru(page) && !PageDirty(page) &&
1508 !__PageMovable(page) && !PageUnevictable(page)) {
1509 ClearPageActive(page);
1510 list_move(&page->lru, &clean_pages);
1511 }
1512 }
1513
1514 nr_reclaimed = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
1515 TTU_IGNORE_ACCESS, &stat, true);
1516 list_splice(&clean_pages, page_list);
1517 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -nr_reclaimed);
1518 /*
1519 * Since lazyfree pages are isolated from file LRU from the beginning,
1520 * they will rotate back to anonymous LRU in the end if it failed to
1521 * discard so isolated count will be mismatched.
1522 * Compensate the isolated count for both LRU lists.
1523 */
1524 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON,
1525 stat.nr_lazyfree_fail);
1526 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
1527 -stat.nr_lazyfree_fail);
1528 return nr_reclaimed;
1529}
1530
1531/*
1532 * Attempt to remove the specified page from its LRU. Only take this page
1533 * if it is of the appropriate PageActive status. Pages which are being
1534 * freed elsewhere are also ignored.
1535 *
1536 * page: page to consider
1537 * mode: one of the LRU isolation modes defined above
1538 *
1539 * returns 0 on success, -ve errno on failure.
1540 */
1541int __isolate_lru_page(struct page *page, isolate_mode_t mode)
1542{
1543 int ret = -EINVAL;
1544
1545 /* Only take pages on the LRU. */
1546 if (!PageLRU(page))
1547 return ret;
1548
1549 /* Compaction should not handle unevictable pages but CMA can do so */
1550 if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
1551 return ret;
1552
1553 ret = -EBUSY;
1554
1555 /*
1556 * To minimise LRU disruption, the caller can indicate that it only
1557 * wants to isolate pages it will be able to operate on without
1558 * blocking - clean pages for the most part.
1559 *
1560 * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
1561 * that it is possible to migrate without blocking
1562 */
1563 if (mode & ISOLATE_ASYNC_MIGRATE) {
1564 /* All the caller can do on PageWriteback is block */
1565 if (PageWriteback(page))
1566 return ret;
1567
1568 if (PageDirty(page)) {
1569 struct address_space *mapping;
1570 bool migrate_dirty;
1571
1572 /*
1573 * Only pages without mappings or that have a
1574 * ->migratepage callback are possible to migrate
1575 * without blocking. However, we can be racing with
1576 * truncation so it's necessary to lock the page
1577 * to stabilise the mapping as truncation holds
1578 * the page lock until after the page is removed
1579 * from the page cache.
1580 */
1581 if (!trylock_page(page))
1582 return ret;
1583
1584 mapping = page_mapping(page);
1585 migrate_dirty = !mapping || mapping->a_ops->migratepage;
1586 unlock_page(page);
1587 if (!migrate_dirty)
1588 return ret;
1589 }
1590 }
1591
1592 if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
1593 return ret;
1594
1595 if (likely(get_page_unless_zero(page))) {
1596 /*
1597 * Be careful not to clear PageLRU until after we're
1598 * sure the page is not being freed elsewhere -- the
1599 * page release code relies on it.
1600 */
1601 ClearPageLRU(page);
1602 ret = 0;
1603 }
1604
1605 return ret;
1606}
1607
1608
1609/*
1610 * Update LRU sizes after isolating pages. The LRU size updates must
1611 * be complete before mem_cgroup_update_lru_size due to a sanity check.
1612 */
1613static __always_inline void update_lru_sizes(struct lruvec *lruvec,
1614 enum lru_list lru, unsigned long *nr_zone_taken)
1615{
1616 int zid;
1617
1618 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1619 if (!nr_zone_taken[zid])
1620 continue;
1621
1622 update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
1623 }
1624
1625}
1626
1627/**
1628 * pgdat->lru_lock is heavily contended. Some of the functions that
1629 * shrink the lists perform better by taking out a batch of pages
1630 * and working on them outside the LRU lock.
1631 *
1632 * For pagecache intensive workloads, this function is the hottest
1633 * spot in the kernel (apart from copy_*_user functions).
1634 *
1635 * Appropriate locks must be held before calling this function.
1636 *
1637 * @nr_to_scan: The number of eligible pages to look through on the list.
1638 * @lruvec: The LRU vector to pull pages from.
1639 * @dst: The temp list to put pages on to.
1640 * @nr_scanned: The number of pages that were scanned.
1641 * @sc: The scan_control struct for this reclaim session
1642 * @lru: LRU list id for isolating
1643 *
1644 * returns how many pages were moved onto *@dst.
1645 */
1646static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1647 struct lruvec *lruvec, struct list_head *dst,
1648 unsigned long *nr_scanned, struct scan_control *sc,
1649 enum lru_list lru)
1650{
1651 struct list_head *src = &lruvec->lists[lru];
1652 unsigned long nr_taken = 0;
1653 unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
1654 unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
1655 unsigned long skipped = 0;
1656 unsigned long scan, total_scan, nr_pages;
1657 LIST_HEAD(pages_skipped);
1658 isolate_mode_t mode = (sc->may_unmap ? 0 : ISOLATE_UNMAPPED);
1659
1660 total_scan = 0;
1661 scan = 0;
1662 while (scan < nr_to_scan && !list_empty(src)) {
1663 struct page *page;
1664
1665 page = lru_to_page(src);
1666 prefetchw_prev_lru_page(page, src, flags);
1667
1668 VM_BUG_ON_PAGE(!PageLRU(page), page);
1669
1670 nr_pages = compound_nr(page);
1671 total_scan += nr_pages;
1672
1673 if (page_zonenum(page) > sc->reclaim_idx) {
1674 list_move(&page->lru, &pages_skipped);
1675 nr_skipped[page_zonenum(page)] += nr_pages;
1676 continue;
1677 }
1678
1679 /*
1680 * Do not count skipped pages because that makes the function
1681 * return with no isolated pages if the LRU mostly contains
1682 * ineligible pages. This causes the VM to not reclaim any
1683 * pages, triggering a premature OOM.
1684 *
1685 * Account all tail pages of THP. This would not cause
1686 * premature OOM since __isolate_lru_page() returns -EBUSY
1687 * only when the page is being freed somewhere else.
1688 */
1689 scan += nr_pages;
1690 switch (__isolate_lru_page(page, mode)) {
1691 case 0:
1692 nr_taken += nr_pages;
1693 nr_zone_taken[page_zonenum(page)] += nr_pages;
1694 list_move(&page->lru, dst);
1695 break;
1696
1697 case -EBUSY:
1698 /* else it is being freed elsewhere */
1699 list_move(&page->lru, src);
1700 continue;
1701
1702 default:
1703 BUG();
1704 }
1705 }
1706
1707 /*
1708 * Splice any skipped pages to the start of the LRU list. Note that
1709 * this disrupts the LRU order when reclaiming for lower zones but
1710 * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX
1711 * scanning would soon rescan the same pages to skip and put the
1712 * system at risk of premature OOM.
1713 */
1714 if (!list_empty(&pages_skipped)) {
1715 int zid;
1716
1717 list_splice(&pages_skipped, src);
1718 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1719 if (!nr_skipped[zid])
1720 continue;
1721
1722 __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]);
1723 skipped += nr_skipped[zid];
1724 }
1725 }
1726 *nr_scanned = total_scan;
1727 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan,
1728 total_scan, skipped, nr_taken, mode, lru);
1729 update_lru_sizes(lruvec, lru, nr_zone_taken);
1730 return nr_taken;
1731}
1732
1733/**
1734 * isolate_lru_page - tries to isolate a page from its LRU list
1735 * @page: page to isolate from its LRU list
1736 *
1737 * Isolates a @page from an LRU list, clears PageLRU and adjusts the
1738 * vmstat statistic corresponding to whatever LRU list the page was on.
1739 *
1740 * Returns 0 if the page was removed from an LRU list.
1741 * Returns -EBUSY if the page was not on an LRU list.
1742 *
1743 * The returned page will have PageLRU() cleared. If it was found on
1744 * the active list, it will have PageActive set. If it was found on
1745 * the unevictable list, it will have the PageUnevictable bit set. That flag
1746 * may need to be cleared by the caller before letting the page go.
1747 *
1748 * The vmstat statistic corresponding to the list on which the page was
1749 * found will be decremented.
1750 *
1751 * Restrictions:
1752 *
1753 * (1) Must be called with an elevated refcount on the page. This is a
1754 * fundamentnal difference from isolate_lru_pages (which is called
1755 * without a stable reference).
1756 * (2) the lru_lock must not be held.
1757 * (3) interrupts must be enabled.
1758 */
1759int isolate_lru_page(struct page *page)
1760{
1761 int ret = -EBUSY;
1762
1763 VM_BUG_ON_PAGE(!page_count(page), page);
1764 WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
1765
1766 if (PageLRU(page)) {
1767 pg_data_t *pgdat = page_pgdat(page);
1768 struct lruvec *lruvec;
1769
1770 spin_lock_irq(&pgdat->lru_lock);
1771 lruvec = mem_cgroup_page_lruvec(page, pgdat);
1772 if (PageLRU(page)) {
1773 int lru = page_lru(page);
1774 get_page(page);
1775 ClearPageLRU(page);
1776 del_page_from_lru_list(page, lruvec, lru);
1777 ret = 0;
1778 }
1779 spin_unlock_irq(&pgdat->lru_lock);
1780 }
1781 return ret;
1782}
1783
1784/*
1785 * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
1786 * then get rescheduled. When there are massive number of tasks doing page
1787 * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
1788 * the LRU list will go small and be scanned faster than necessary, leading to
1789 * unnecessary swapping, thrashing and OOM.
1790 */
1791static int too_many_isolated(struct pglist_data *pgdat, int file,
1792 struct scan_control *sc)
1793{
1794 unsigned long inactive, isolated;
1795
1796 if (current_is_kswapd())
1797 return 0;
1798
1799 if (!writeback_throttling_sane(sc))
1800 return 0;
1801
1802 if (file) {
1803 inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
1804 isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
1805 } else {
1806 inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
1807 isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
1808 }
1809
1810 /*
1811 * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
1812 * won't get blocked by normal direct-reclaimers, forming a circular
1813 * deadlock.
1814 */
1815 if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
1816 inactive >>= 3;
1817
1818 return isolated > inactive;
1819}
1820
1821/*
1822 * This moves pages from @list to corresponding LRU list.
1823 *
1824 * We move them the other way if the page is referenced by one or more
1825 * processes, from rmap.
1826 *
1827 * If the pages are mostly unmapped, the processing is fast and it is
1828 * appropriate to hold zone_lru_lock across the whole operation. But if
1829 * the pages are mapped, the processing is slow (page_referenced()) so we
1830 * should drop zone_lru_lock around each page. It's impossible to balance
1831 * this, so instead we remove the pages from the LRU while processing them.
1832 * It is safe to rely on PG_active against the non-LRU pages in here because
1833 * nobody will play with that bit on a non-LRU page.
1834 *
1835 * The downside is that we have to touch page->_refcount against each page.
1836 * But we had to alter page->flags anyway.
1837 *
1838 * Returns the number of pages moved to the given lruvec.
1839 */
1840
1841static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
1842 struct list_head *list)
1843{
1844 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1845 int nr_pages, nr_moved = 0;
1846 LIST_HEAD(pages_to_free);
1847 struct page *page;
1848 enum lru_list lru;
1849
1850 while (!list_empty(list)) {
1851 page = lru_to_page(list);
1852 VM_BUG_ON_PAGE(PageLRU(page), page);
1853 if (unlikely(!page_evictable(page))) {
1854 list_del(&page->lru);
1855 spin_unlock_irq(&pgdat->lru_lock);
1856 putback_lru_page(page);
1857 spin_lock_irq(&pgdat->lru_lock);
1858 continue;
1859 }
1860 lruvec = mem_cgroup_page_lruvec(page, pgdat);
1861
1862 SetPageLRU(page);
1863 lru = page_lru(page);
1864
1865 nr_pages = thp_nr_pages(page);
1866 update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
1867 list_move(&page->lru, &lruvec->lists[lru]);
1868
1869 if (put_page_testzero(page)) {
1870 __ClearPageLRU(page);
1871 __ClearPageActive(page);
1872 del_page_from_lru_list(page, lruvec, lru);
1873
1874 if (unlikely(PageCompound(page))) {
1875 spin_unlock_irq(&pgdat->lru_lock);
1876 destroy_compound_page(page);
1877 spin_lock_irq(&pgdat->lru_lock);
1878 } else
1879 list_add(&page->lru, &pages_to_free);
1880 } else {
1881 nr_moved += nr_pages;
1882 if (PageActive(page))
1883 workingset_age_nonresident(lruvec, nr_pages);
1884 }
1885 }
1886
1887 /*
1888 * To save our caller's stack, now use input list for pages to free.
1889 */
1890 list_splice(&pages_to_free, list);
1891
1892 return nr_moved;
1893}
1894
1895/*
1896 * If a kernel thread (such as nfsd for loop-back mounts) services
1897 * a backing device by writing to the page cache it sets PF_LOCAL_THROTTLE.
1898 * In that case we should only throttle if the backing device it is
1899 * writing to is congested. In other cases it is safe to throttle.
1900 */
1901static int current_may_throttle(void)
1902{
1903 return !(current->flags & PF_LOCAL_THROTTLE) ||
1904 current->backing_dev_info == NULL ||
1905 bdi_write_congested(current->backing_dev_info);
1906}
1907
1908/*
1909 * shrink_inactive_list() is a helper for shrink_node(). It returns the number
1910 * of reclaimed pages
1911 */
1912static noinline_for_stack unsigned long
1913shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1914 struct scan_control *sc, enum lru_list lru)
1915{
1916 LIST_HEAD(page_list);
1917 unsigned long nr_scanned;
1918 unsigned int nr_reclaimed = 0;
1919 unsigned long nr_taken;
1920 struct reclaim_stat stat;
1921 bool file = is_file_lru(lru);
1922 enum vm_event_item item;
1923 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1924 bool stalled = false;
1925
1926 while (unlikely(too_many_isolated(pgdat, file, sc))) {
1927 if (stalled)
1928 return 0;
1929
1930 /* wait a bit for the reclaimer. */
1931 msleep(100);
1932 stalled = true;
1933
1934 /* We are about to die and free our memory. Return now. */
1935 if (fatal_signal_pending(current))
1936 return SWAP_CLUSTER_MAX;
1937 }
1938
1939 lru_add_drain();
1940
1941 spin_lock_irq(&pgdat->lru_lock);
1942
1943 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
1944 &nr_scanned, sc, lru);
1945
1946 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
1947 item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT;
1948 if (!cgroup_reclaim(sc))
1949 __count_vm_events(item, nr_scanned);
1950 __count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
1951 __count_vm_events(PGSCAN_ANON + file, nr_scanned);
1952
1953 spin_unlock_irq(&pgdat->lru_lock);
1954
1955 if (nr_taken == 0)
1956 return 0;
1957
1958 nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, 0,
1959 &stat, false);
1960
1961 spin_lock_irq(&pgdat->lru_lock);
1962
1963 move_pages_to_lru(lruvec, &page_list);
1964
1965 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
1966 lru_note_cost(lruvec, file, stat.nr_pageout);
1967 item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
1968 if (!cgroup_reclaim(sc))
1969 __count_vm_events(item, nr_reclaimed);
1970 __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
1971 __count_vm_events(PGSTEAL_ANON + file, nr_reclaimed);
1972
1973 spin_unlock_irq(&pgdat->lru_lock);
1974
1975 mem_cgroup_uncharge_list(&page_list);
1976 free_unref_page_list(&page_list);
1977
1978 /*
1979 * If dirty pages are scanned that are not queued for IO, it
1980 * implies that flushers are not doing their job. This can
1981 * happen when memory pressure pushes dirty pages to the end of
1982 * the LRU before the dirty limits are breached and the dirty
1983 * data has expired. It can also happen when the proportion of
1984 * dirty pages grows not through writes but through memory
1985 * pressure reclaiming all the clean cache. And in some cases,
1986 * the flushers simply cannot keep up with the allocation
1987 * rate. Nudge the flusher threads in case they are asleep.
1988 */
1989 if (stat.nr_unqueued_dirty == nr_taken)
1990 wakeup_flusher_threads(WB_REASON_VMSCAN);
1991
1992 sc->nr.dirty += stat.nr_dirty;
1993 sc->nr.congested += stat.nr_congested;
1994 sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;
1995 sc->nr.writeback += stat.nr_writeback;
1996 sc->nr.immediate += stat.nr_immediate;
1997 sc->nr.taken += nr_taken;
1998 if (file)
1999 sc->nr.file_taken += nr_taken;
2000
2001 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
2002 nr_scanned, nr_reclaimed, &stat, sc->priority, file);
2003 return nr_reclaimed;
2004}
2005
2006static void shrink_active_list(unsigned long nr_to_scan,
2007 struct lruvec *lruvec,
2008 struct scan_control *sc,
2009 enum lru_list lru)
2010{
2011 unsigned long nr_taken;
2012 unsigned long nr_scanned;
2013 unsigned long vm_flags;
2014 LIST_HEAD(l_hold); /* The pages which were snipped off */
2015 LIST_HEAD(l_active);
2016 LIST_HEAD(l_inactive);
2017 struct page *page;
2018 unsigned nr_deactivate, nr_activate;
2019 unsigned nr_rotated = 0;
2020 int file = is_file_lru(lru);
2021 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2022
2023 lru_add_drain();
2024
2025 spin_lock_irq(&pgdat->lru_lock);
2026
2027 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
2028 &nr_scanned, sc, lru);
2029
2030 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
2031
2032 if (!cgroup_reclaim(sc))
2033 __count_vm_events(PGREFILL, nr_scanned);
2034 __count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
2035
2036 spin_unlock_irq(&pgdat->lru_lock);
2037
2038 while (!list_empty(&l_hold)) {
2039 cond_resched();
2040 page = lru_to_page(&l_hold);
2041 list_del(&page->lru);
2042
2043 if (unlikely(!page_evictable(page))) {
2044 putback_lru_page(page);
2045 continue;
2046 }
2047
2048 if (unlikely(buffer_heads_over_limit)) {
2049 if (page_has_private(page) && trylock_page(page)) {
2050 if (page_has_private(page))
2051 try_to_release_page(page, 0);
2052 unlock_page(page);
2053 }
2054 }
2055
2056 if (page_referenced(page, 0, sc->target_mem_cgroup,
2057 &vm_flags)) {
2058 /*
2059 * Identify referenced, file-backed active pages and
2060 * give them one more trip around the active list. So
2061 * that executable code get better chances to stay in
2062 * memory under moderate memory pressure. Anon pages
2063 * are not likely to be evicted by use-once streaming
2064 * IO, plus JVM can create lots of anon VM_EXEC pages,
2065 * so we ignore them here.
2066 */
2067 if ((vm_flags & VM_EXEC) && page_is_file_lru(page)) {
2068 nr_rotated += thp_nr_pages(page);
2069 list_add(&page->lru, &l_active);
2070 continue;
2071 }
2072 }
2073
2074 ClearPageActive(page); /* we are de-activating */
2075 SetPageWorkingset(page);
2076 list_add(&page->lru, &l_inactive);
2077 }
2078
2079 /*
2080 * Move pages back to the lru list.
2081 */
2082 spin_lock_irq(&pgdat->lru_lock);
2083
2084 nr_activate = move_pages_to_lru(lruvec, &l_active);
2085 nr_deactivate = move_pages_to_lru(lruvec, &l_inactive);
2086 /* Keep all free pages in l_active list */
2087 list_splice(&l_inactive, &l_active);
2088
2089 __count_vm_events(PGDEACTIVATE, nr_deactivate);
2090 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
2091
2092 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
2093 spin_unlock_irq(&pgdat->lru_lock);
2094
2095 mem_cgroup_uncharge_list(&l_active);
2096 free_unref_page_list(&l_active);
2097 trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
2098 nr_deactivate, nr_rotated, sc->priority, file);
2099}
2100
2101unsigned long reclaim_pages(struct list_head *page_list)
2102{
2103 int nid = NUMA_NO_NODE;
2104 unsigned int nr_reclaimed = 0;
2105 LIST_HEAD(node_page_list);
2106 struct reclaim_stat dummy_stat;
2107 struct page *page;
2108 struct scan_control sc = {
2109 .gfp_mask = GFP_KERNEL,
2110 .priority = DEF_PRIORITY,
2111 .may_writepage = 1,
2112 .may_unmap = 1,
2113 .may_swap = 1,
2114 };
2115
2116 while (!list_empty(page_list)) {
2117 page = lru_to_page(page_list);
2118 if (nid == NUMA_NO_NODE) {
2119 nid = page_to_nid(page);
2120 INIT_LIST_HEAD(&node_page_list);
2121 }
2122
2123 if (nid == page_to_nid(page)) {
2124 ClearPageActive(page);
2125 list_move(&page->lru, &node_page_list);
2126 continue;
2127 }
2128
2129 nr_reclaimed += shrink_page_list(&node_page_list,
2130 NODE_DATA(nid),
2131 &sc, 0,
2132 &dummy_stat, false);
2133 while (!list_empty(&node_page_list)) {
2134 page = lru_to_page(&node_page_list);
2135 list_del(&page->lru);
2136 putback_lru_page(page);
2137 }
2138
2139 nid = NUMA_NO_NODE;
2140 }
2141
2142 if (!list_empty(&node_page_list)) {
2143 nr_reclaimed += shrink_page_list(&node_page_list,
2144 NODE_DATA(nid),
2145 &sc, 0,
2146 &dummy_stat, false);
2147 while (!list_empty(&node_page_list)) {
2148 page = lru_to_page(&node_page_list);
2149 list_del(&page->lru);
2150 putback_lru_page(page);
2151 }
2152 }
2153
2154 return nr_reclaimed;
2155}
2156
2157static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
2158 struct lruvec *lruvec, struct scan_control *sc)
2159{
2160 if (is_active_lru(lru)) {
2161 if (sc->may_deactivate & (1 << is_file_lru(lru)))
2162 shrink_active_list(nr_to_scan, lruvec, sc, lru);
2163 else
2164 sc->skipped_deactivate = 1;
2165 return 0;
2166 }
2167
2168 return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
2169}
2170
2171/*
2172 * The inactive anon list should be small enough that the VM never has
2173 * to do too much work.
2174 *
2175 * The inactive file list should be small enough to leave most memory
2176 * to the established workingset on the scan-resistant active list,
2177 * but large enough to avoid thrashing the aggregate readahead window.
2178 *
2179 * Both inactive lists should also be large enough that each inactive
2180 * page has a chance to be referenced again before it is reclaimed.
2181 *
2182 * If that fails and refaulting is observed, the inactive list grows.
2183 *
2184 * The inactive_ratio is the target ratio of ACTIVE to INACTIVE pages
2185 * on this LRU, maintained by the pageout code. An inactive_ratio
2186 * of 3 means 3:1 or 25% of the pages are kept on the inactive list.
2187 *
2188 * total target max
2189 * memory ratio inactive
2190 * -------------------------------------
2191 * 10MB 1 5MB
2192 * 100MB 1 50MB
2193 * 1GB 3 250MB
2194 * 10GB 10 0.9GB
2195 * 100GB 31 3GB
2196 * 1TB 101 10GB
2197 * 10TB 320 32GB
2198 */
2199static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru)
2200{
2201 enum lru_list active_lru = inactive_lru + LRU_ACTIVE;
2202 unsigned long inactive, active;
2203 unsigned long inactive_ratio;
2204 unsigned long gb;
2205
2206 inactive = lruvec_page_state(lruvec, NR_LRU_BASE + inactive_lru);
2207 active = lruvec_page_state(lruvec, NR_LRU_BASE + active_lru);
2208
2209 gb = (inactive + active) >> (30 - PAGE_SHIFT);
2210 if (gb)
2211 inactive_ratio = int_sqrt(10 * gb);
2212 else
2213 inactive_ratio = 1;
2214
2215 return inactive * inactive_ratio < active;
2216}
2217
2218enum scan_balance {
2219 SCAN_EQUAL,
2220 SCAN_FRACT,
2221 SCAN_ANON,
2222 SCAN_FILE,
2223};
2224
2225/*
2226 * Determine how aggressively the anon and file LRU lists should be
2227 * scanned. The relative value of each set of LRU lists is determined
2228 * by looking at the fraction of the pages scanned we did rotate back
2229 * onto the active list instead of evict.
2230 *
2231 * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
2232 * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
2233 */
2234static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
2235 unsigned long *nr)
2236{
2237 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
2238 unsigned long anon_cost, file_cost, total_cost;
2239 int swappiness = mem_cgroup_swappiness(memcg);
2240 u64 fraction[2];
2241 u64 denominator = 0; /* gcc */
2242 enum scan_balance scan_balance;
2243 unsigned long ap, fp;
2244 enum lru_list lru;
2245
2246 /* If we have no swap space, do not bother scanning anon pages. */
2247 if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) {
2248 scan_balance = SCAN_FILE;
2249 goto out;
2250 }
2251
2252 /*
2253 * Global reclaim will swap to prevent OOM even with no
2254 * swappiness, but memcg users want to use this knob to
2255 * disable swapping for individual groups completely when
2256 * using the memory controller's swap limit feature would be
2257 * too expensive.
2258 */
2259 if (cgroup_reclaim(sc) && !swappiness) {
2260 scan_balance = SCAN_FILE;
2261 goto out;
2262 }
2263
2264 /*
2265 * Do not apply any pressure balancing cleverness when the
2266 * system is close to OOM, scan both anon and file equally
2267 * (unless the swappiness setting disagrees with swapping).
2268 */
2269 if (!sc->priority && swappiness) {
2270 scan_balance = SCAN_EQUAL;
2271 goto out;
2272 }
2273
2274 /*
2275 * If the system is almost out of file pages, force-scan anon.
2276 */
2277 if (sc->file_is_tiny) {
2278 scan_balance = SCAN_ANON;
2279 goto out;
2280 }
2281
2282 /*
2283 * If there is enough inactive page cache, we do not reclaim
2284 * anything from the anonymous working right now.
2285 */
2286 if (sc->cache_trim_mode) {
2287 scan_balance = SCAN_FILE;
2288 goto out;
2289 }
2290
2291 scan_balance = SCAN_FRACT;
2292 /*
2293 * Calculate the pressure balance between anon and file pages.
2294 *
2295 * The amount of pressure we put on each LRU is inversely
2296 * proportional to the cost of reclaiming each list, as
2297 * determined by the share of pages that are refaulting, times
2298 * the relative IO cost of bringing back a swapped out
2299 * anonymous page vs reloading a filesystem page (swappiness).
2300 *
2301 * Although we limit that influence to ensure no list gets
2302 * left behind completely: at least a third of the pressure is
2303 * applied, before swappiness.
2304 *
2305 * With swappiness at 100, anon and file have equal IO cost.
2306 */
2307 total_cost = sc->anon_cost + sc->file_cost;
2308 anon_cost = total_cost + sc->anon_cost;
2309 file_cost = total_cost + sc->file_cost;
2310 total_cost = anon_cost + file_cost;
2311
2312 ap = swappiness * (total_cost + 1);
2313 ap /= anon_cost + 1;
2314
2315 fp = (200 - swappiness) * (total_cost + 1);
2316 fp /= file_cost + 1;
2317
2318 fraction[0] = ap;
2319 fraction[1] = fp;
2320 denominator = ap + fp;
2321out:
2322 for_each_evictable_lru(lru) {
2323 int file = is_file_lru(lru);
2324 unsigned long lruvec_size;
2325 unsigned long scan;
2326 unsigned long protection;
2327
2328 lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
2329 protection = mem_cgroup_protection(sc->target_mem_cgroup,
2330 memcg,
2331 sc->memcg_low_reclaim);
2332
2333 if (protection) {
2334 /*
2335 * Scale a cgroup's reclaim pressure by proportioning
2336 * its current usage to its memory.low or memory.min
2337 * setting.
2338 *
2339 * This is important, as otherwise scanning aggression
2340 * becomes extremely binary -- from nothing as we
2341 * approach the memory protection threshold, to totally
2342 * nominal as we exceed it. This results in requiring
2343 * setting extremely liberal protection thresholds. It
2344 * also means we simply get no protection at all if we
2345 * set it too low, which is not ideal.
2346 *
2347 * If there is any protection in place, we reduce scan
2348 * pressure by how much of the total memory used is
2349 * within protection thresholds.
2350 *
2351 * There is one special case: in the first reclaim pass,
2352 * we skip over all groups that are within their low
2353 * protection. If that fails to reclaim enough pages to
2354 * satisfy the reclaim goal, we come back and override
2355 * the best-effort low protection. However, we still
2356 * ideally want to honor how well-behaved groups are in
2357 * that case instead of simply punishing them all
2358 * equally. As such, we reclaim them based on how much
2359 * memory they are using, reducing the scan pressure
2360 * again by how much of the total memory used is under
2361 * hard protection.
2362 */
2363 unsigned long cgroup_size = mem_cgroup_size(memcg);
2364
2365 /* Avoid TOCTOU with earlier protection check */
2366 cgroup_size = max(cgroup_size, protection);
2367
2368 scan = lruvec_size - lruvec_size * protection /
2369 cgroup_size;
2370
2371 /*
2372 * Minimally target SWAP_CLUSTER_MAX pages to keep
2373 * reclaim moving forwards, avoiding decrementing
2374 * sc->priority further than desirable.
2375 */
2376 scan = max(scan, SWAP_CLUSTER_MAX);
2377 } else {
2378 scan = lruvec_size;
2379 }
2380
2381 scan >>= sc->priority;
2382
2383 /*
2384 * If the cgroup's already been deleted, make sure to
2385 * scrape out the remaining cache.
2386 */
2387 if (!scan && !mem_cgroup_online(memcg))
2388 scan = min(lruvec_size, SWAP_CLUSTER_MAX);
2389
2390 switch (scan_balance) {
2391 case SCAN_EQUAL:
2392 /* Scan lists relative to size */
2393 break;
2394 case SCAN_FRACT:
2395 /*
2396 * Scan types proportional to swappiness and
2397 * their relative recent reclaim efficiency.
2398 * Make sure we don't miss the last page on
2399 * the offlined memory cgroups because of a
2400 * round-off error.
2401 */
2402 scan = mem_cgroup_online(memcg) ?
2403 div64_u64(scan * fraction[file], denominator) :
2404 DIV64_U64_ROUND_UP(scan * fraction[file],
2405 denominator);
2406 break;
2407 case SCAN_FILE:
2408 case SCAN_ANON:
2409 /* Scan one type exclusively */
2410 if ((scan_balance == SCAN_FILE) != file)
2411 scan = 0;
2412 break;
2413 default:
2414 /* Look ma, no brain */
2415 BUG();
2416 }
2417
2418 nr[lru] = scan;
2419 }
2420}
2421
2422static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
2423{
2424 unsigned long nr[NR_LRU_LISTS];
2425 unsigned long targets[NR_LRU_LISTS];
2426 unsigned long nr_to_scan;
2427 enum lru_list lru;
2428 unsigned long nr_reclaimed = 0;
2429 unsigned long nr_to_reclaim = sc->nr_to_reclaim;
2430 struct blk_plug plug;
2431 bool scan_adjusted;
2432
2433 get_scan_count(lruvec, sc, nr);
2434
2435 /* Record the original scan target for proportional adjustments later */
2436 memcpy(targets, nr, sizeof(nr));
2437
2438 /*
2439 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
2440 * event that can occur when there is little memory pressure e.g.
2441 * multiple streaming readers/writers. Hence, we do not abort scanning
2442 * when the requested number of pages are reclaimed when scanning at
2443 * DEF_PRIORITY on the assumption that the fact we are direct
2444 * reclaiming implies that kswapd is not keeping up and it is best to
2445 * do a batch of work at once. For memcg reclaim one check is made to
2446 * abort proportional reclaim if either the file or anon lru has already
2447 * dropped to zero at the first pass.
2448 */
2449 scan_adjusted = (!cgroup_reclaim(sc) && !current_is_kswapd() &&
2450 sc->priority == DEF_PRIORITY);
2451
2452 blk_start_plug(&plug);
2453 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
2454 nr[LRU_INACTIVE_FILE]) {
2455 unsigned long nr_anon, nr_file, percentage;
2456 unsigned long nr_scanned;
2457
2458 for_each_evictable_lru(lru) {
2459 if (nr[lru]) {
2460 nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
2461 nr[lru] -= nr_to_scan;
2462
2463 nr_reclaimed += shrink_list(lru, nr_to_scan,
2464 lruvec, sc);
2465 }
2466 }
2467
2468 cond_resched();
2469
2470 if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
2471 continue;
2472
2473 /*
2474 * For kswapd and memcg, reclaim at least the number of pages
2475 * requested. Ensure that the anon and file LRUs are scanned
2476 * proportionally what was requested by get_scan_count(). We
2477 * stop reclaiming one LRU and reduce the amount scanning
2478 * proportional to the original scan target.
2479 */
2480 nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
2481 nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
2482
2483 /*
2484 * It's just vindictive to attack the larger once the smaller
2485 * has gone to zero. And given the way we stop scanning the
2486 * smaller below, this makes sure that we only make one nudge
2487 * towards proportionality once we've got nr_to_reclaim.
2488 */
2489 if (!nr_file || !nr_anon)
2490 break;
2491
2492 if (nr_file > nr_anon) {
2493 unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
2494 targets[LRU_ACTIVE_ANON] + 1;
2495 lru = LRU_BASE;
2496 percentage = nr_anon * 100 / scan_target;
2497 } else {
2498 unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
2499 targets[LRU_ACTIVE_FILE] + 1;
2500 lru = LRU_FILE;
2501 percentage = nr_file * 100 / scan_target;
2502 }
2503
2504 /* Stop scanning the smaller of the LRU */
2505 nr[lru] = 0;
2506 nr[lru + LRU_ACTIVE] = 0;
2507
2508 /*
2509 * Recalculate the other LRU scan count based on its original
2510 * scan target and the percentage scanning already complete
2511 */
2512 lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
2513 nr_scanned = targets[lru] - nr[lru];
2514 nr[lru] = targets[lru] * (100 - percentage) / 100;
2515 nr[lru] -= min(nr[lru], nr_scanned);
2516
2517 lru += LRU_ACTIVE;
2518 nr_scanned = targets[lru] - nr[lru];
2519 nr[lru] = targets[lru] * (100 - percentage) / 100;
2520 nr[lru] -= min(nr[lru], nr_scanned);
2521
2522 scan_adjusted = true;
2523 }
2524 blk_finish_plug(&plug);
2525 sc->nr_reclaimed += nr_reclaimed;
2526
2527 /*
2528 * Even if we did not try to evict anon pages at all, we want to
2529 * rebalance the anon lru active/inactive ratio.
2530 */
2531 if (total_swap_pages && inactive_is_low(lruvec, LRU_INACTIVE_ANON))
2532 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
2533 sc, LRU_ACTIVE_ANON);
2534}
2535
2536/* Use reclaim/compaction for costly allocs or under memory pressure */
2537static bool in_reclaim_compaction(struct scan_control *sc)
2538{
2539 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
2540 (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
2541 sc->priority < DEF_PRIORITY - 2))
2542 return true;
2543
2544 return false;
2545}
2546
2547/*
2548 * Reclaim/compaction is used for high-order allocation requests. It reclaims
2549 * order-0 pages before compacting the zone. should_continue_reclaim() returns
2550 * true if more pages should be reclaimed such that when the page allocator
2551 * calls try_to_compact_pages() that it will have enough free pages to succeed.
2552 * It will give up earlier than that if there is difficulty reclaiming pages.
2553 */
2554static inline bool should_continue_reclaim(struct pglist_data *pgdat,
2555 unsigned long nr_reclaimed,
2556 struct scan_control *sc)
2557{
2558 unsigned long pages_for_compaction;
2559 unsigned long inactive_lru_pages;
2560 int z;
2561
2562 /* If not in reclaim/compaction mode, stop */
2563 if (!in_reclaim_compaction(sc))
2564 return false;
2565
2566 /*
2567 * Stop if we failed to reclaim any pages from the last SWAP_CLUSTER_MAX
2568 * number of pages that were scanned. This will return to the caller
2569 * with the risk reclaim/compaction and the resulting allocation attempt
2570 * fails. In the past we have tried harder for __GFP_RETRY_MAYFAIL
2571 * allocations through requiring that the full LRU list has been scanned
2572 * first, by assuming that zero delta of sc->nr_scanned means full LRU
2573 * scan, but that approximation was wrong, and there were corner cases
2574 * where always a non-zero amount of pages were scanned.
2575 */
2576 if (!nr_reclaimed)
2577 return false;
2578
2579 /* If compaction would go ahead or the allocation would succeed, stop */
2580 for (z = 0; z <= sc->reclaim_idx; z++) {
2581 struct zone *zone = &pgdat->node_zones[z];
2582 if (!managed_zone(zone))
2583 continue;
2584
2585 switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
2586 case COMPACT_SUCCESS:
2587 case COMPACT_CONTINUE:
2588 return false;
2589 default:
2590 /* check next zone */
2591 ;
2592 }
2593 }
2594
2595 /*
2596 * If we have not reclaimed enough pages for compaction and the
2597 * inactive lists are large enough, continue reclaiming
2598 */
2599 pages_for_compaction = compact_gap(sc->order);
2600 inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
2601 if (get_nr_swap_pages() > 0)
2602 inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
2603
2604 return inactive_lru_pages > pages_for_compaction;
2605}
2606
2607static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
2608{
2609 struct mem_cgroup *target_memcg = sc->target_mem_cgroup;
2610 struct mem_cgroup *memcg;
2611
2612 memcg = mem_cgroup_iter(target_memcg, NULL, NULL);
2613 do {
2614 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
2615 unsigned long reclaimed;
2616 unsigned long scanned;
2617
2618 /*
2619 * This loop can become CPU-bound when target memcgs
2620 * aren't eligible for reclaim - either because they
2621 * don't have any reclaimable pages, or because their
2622 * memory is explicitly protected. Avoid soft lockups.
2623 */
2624 cond_resched();
2625
2626 mem_cgroup_calculate_protection(target_memcg, memcg);
2627
2628 if (mem_cgroup_below_min(memcg)) {
2629 /*
2630 * Hard protection.
2631 * If there is no reclaimable memory, OOM.
2632 */
2633 continue;
2634 } else if (mem_cgroup_below_low(memcg)) {
2635 /*
2636 * Soft protection.
2637 * Respect the protection only as long as
2638 * there is an unprotected supply
2639 * of reclaimable memory from other cgroups.
2640 */
2641 if (!sc->memcg_low_reclaim) {
2642 sc->memcg_low_skipped = 1;
2643 continue;
2644 }
2645 memcg_memory_event(memcg, MEMCG_LOW);
2646 }
2647
2648 reclaimed = sc->nr_reclaimed;
2649 scanned = sc->nr_scanned;
2650
2651 shrink_lruvec(lruvec, sc);
2652
2653 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
2654 sc->priority);
2655
2656 /* Record the group's reclaim efficiency */
2657 vmpressure(sc->gfp_mask, memcg, false,
2658 sc->nr_scanned - scanned,
2659 sc->nr_reclaimed - reclaimed);
2660
2661 } while ((memcg = mem_cgroup_iter(target_memcg, memcg, NULL)));
2662}
2663
2664static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
2665{
2666 struct reclaim_state *reclaim_state = current->reclaim_state;
2667 unsigned long nr_reclaimed, nr_scanned;
2668 struct lruvec *target_lruvec;
2669 bool reclaimable = false;
2670 unsigned long file;
2671
2672 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
2673
2674again:
2675 memset(&sc->nr, 0, sizeof(sc->nr));
2676
2677 nr_reclaimed = sc->nr_reclaimed;
2678 nr_scanned = sc->nr_scanned;
2679
2680 /*
2681 * Determine the scan balance between anon and file LRUs.
2682 */
2683 spin_lock_irq(&pgdat->lru_lock);
2684 sc->anon_cost = target_lruvec->anon_cost;
2685 sc->file_cost = target_lruvec->file_cost;
2686 spin_unlock_irq(&pgdat->lru_lock);
2687
2688 /*
2689 * Target desirable inactive:active list ratios for the anon
2690 * and file LRU lists.
2691 */
2692 if (!sc->force_deactivate) {
2693 unsigned long refaults;
2694
2695 refaults = lruvec_page_state(target_lruvec,
2696 WORKINGSET_ACTIVATE_ANON);
2697 if (refaults != target_lruvec->refaults[0] ||
2698 inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
2699 sc->may_deactivate |= DEACTIVATE_ANON;
2700 else
2701 sc->may_deactivate &= ~DEACTIVATE_ANON;
2702
2703 /*
2704 * When refaults are being observed, it means a new
2705 * workingset is being established. Deactivate to get
2706 * rid of any stale active pages quickly.
2707 */
2708 refaults = lruvec_page_state(target_lruvec,
2709 WORKINGSET_ACTIVATE_FILE);
2710 if (refaults != target_lruvec->refaults[1] ||
2711 inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
2712 sc->may_deactivate |= DEACTIVATE_FILE;
2713 else
2714 sc->may_deactivate &= ~DEACTIVATE_FILE;
2715 } else
2716 sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
2717
2718 /*
2719 * If we have plenty of inactive file pages that aren't
2720 * thrashing, try to reclaim those first before touching
2721 * anonymous pages.
2722 */
2723 file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
2724 if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
2725 sc->cache_trim_mode = 1;
2726 else
2727 sc->cache_trim_mode = 0;
2728
2729 /*
2730 * Prevent the reclaimer from falling into the cache trap: as
2731 * cache pages start out inactive, every cache fault will tip
2732 * the scan balance towards the file LRU. And as the file LRU
2733 * shrinks, so does the window for rotation from references.
2734 * This means we have a runaway feedback loop where a tiny
2735 * thrashing file LRU becomes infinitely more attractive than
2736 * anon pages. Try to detect this based on file LRU size.
2737 */
2738 if (!cgroup_reclaim(sc)) {
2739 unsigned long total_high_wmark = 0;
2740 unsigned long free, anon;
2741 int z;
2742
2743 free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
2744 file = node_page_state(pgdat, NR_ACTIVE_FILE) +
2745 node_page_state(pgdat, NR_INACTIVE_FILE);
2746
2747 for (z = 0; z < MAX_NR_ZONES; z++) {
2748 struct zone *zone = &pgdat->node_zones[z];
2749 if (!managed_zone(zone))
2750 continue;
2751
2752 total_high_wmark += high_wmark_pages(zone);
2753 }
2754
2755 /*
2756 * Consider anon: if that's low too, this isn't a
2757 * runaway file reclaim problem, but rather just
2758 * extreme pressure. Reclaim as per usual then.
2759 */
2760 anon = node_page_state(pgdat, NR_INACTIVE_ANON);
2761
2762 sc->file_is_tiny =
2763 file + free <= total_high_wmark &&
2764 !(sc->may_deactivate & DEACTIVATE_ANON) &&
2765 anon >> sc->priority;
2766 }
2767
2768 shrink_node_memcgs(pgdat, sc);
2769
2770 if (reclaim_state) {
2771 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2772 reclaim_state->reclaimed_slab = 0;
2773 }
2774
2775 /* Record the subtree's reclaim efficiency */
2776 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
2777 sc->nr_scanned - nr_scanned,
2778 sc->nr_reclaimed - nr_reclaimed);
2779
2780 if (sc->nr_reclaimed - nr_reclaimed)
2781 reclaimable = true;
2782
2783 if (current_is_kswapd()) {
2784 /*
2785 * If reclaim is isolating dirty pages under writeback,
2786 * it implies that the long-lived page allocation rate
2787 * is exceeding the page laundering rate. Either the
2788 * global limits are not being effective at throttling
2789 * processes due to the page distribution throughout
2790 * zones or there is heavy usage of a slow backing
2791 * device. The only option is to throttle from reclaim
2792 * context which is not ideal as there is no guarantee
2793 * the dirtying process is throttled in the same way
2794 * balance_dirty_pages() manages.
2795 *
2796 * Once a node is flagged PGDAT_WRITEBACK, kswapd will
2797 * count the number of pages under pages flagged for
2798 * immediate reclaim and stall if any are encountered
2799 * in the nr_immediate check below.
2800 */
2801 if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken)
2802 set_bit(PGDAT_WRITEBACK, &pgdat->flags);
2803
2804 /* Allow kswapd to start writing pages during reclaim.*/
2805 if (sc->nr.unqueued_dirty == sc->nr.file_taken)
2806 set_bit(PGDAT_DIRTY, &pgdat->flags);
2807
2808 /*
2809 * If kswapd scans pages marked for immediate
2810 * reclaim and under writeback (nr_immediate), it
2811 * implies that pages are cycling through the LRU
2812 * faster than they are written so also forcibly stall.
2813 */
2814 if (sc->nr.immediate)
2815 congestion_wait(BLK_RW_ASYNC, HZ/10);
2816 }
2817
2818 /*
2819 * Tag a node/memcg as congested if all the dirty pages
2820 * scanned were backed by a congested BDI and
2821 * wait_iff_congested will stall.
2822 *
2823 * Legacy memcg will stall in page writeback so avoid forcibly
2824 * stalling in wait_iff_congested().
2825 */
2826 if ((current_is_kswapd() ||
2827 (cgroup_reclaim(sc) && writeback_throttling_sane(sc))) &&
2828 sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
2829 set_bit(LRUVEC_CONGESTED, &target_lruvec->flags);
2830
2831 /*
2832 * Stall direct reclaim for IO completions if underlying BDIs
2833 * and node is congested. Allow kswapd to continue until it
2834 * starts encountering unqueued dirty pages or cycling through
2835 * the LRU too quickly.
2836 */
2837 if (!current_is_kswapd() && current_may_throttle() &&
2838 !sc->hibernation_mode &&
2839 test_bit(LRUVEC_CONGESTED, &target_lruvec->flags))
2840 wait_iff_congested(BLK_RW_ASYNC, HZ/10);
2841
2842 if (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
2843 sc))
2844 goto again;
2845
2846 /*
2847 * Kswapd gives up on balancing particular nodes after too
2848 * many failures to reclaim anything from them and goes to
2849 * sleep. On reclaim progress, reset the failure counter. A
2850 * successful direct reclaim run will revive a dormant kswapd.
2851 */
2852 if (reclaimable)
2853 pgdat->kswapd_failures = 0;
2854}
2855
2856/*
2857 * Returns true if compaction should go ahead for a costly-order request, or
2858 * the allocation would already succeed without compaction. Return false if we
2859 * should reclaim first.
2860 */
2861static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
2862{
2863 unsigned long watermark;
2864 enum compact_result suitable;
2865
2866 suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx);
2867 if (suitable == COMPACT_SUCCESS)
2868 /* Allocation should succeed already. Don't reclaim. */
2869 return true;
2870 if (suitable == COMPACT_SKIPPED)
2871 /* Compaction cannot yet proceed. Do reclaim. */
2872 return false;
2873
2874 /*
2875 * Compaction is already possible, but it takes time to run and there
2876 * are potentially other callers using the pages just freed. So proceed
2877 * with reclaim to make a buffer of free pages available to give
2878 * compaction a reasonable chance of completing and allocating the page.
2879 * Note that we won't actually reclaim the whole buffer in one attempt
2880 * as the target watermark in should_continue_reclaim() is lower. But if
2881 * we are already above the high+gap watermark, don't reclaim at all.
2882 */
2883 watermark = high_wmark_pages(zone) + compact_gap(sc->order);
2884
2885 return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx);
2886}
2887
2888/*
2889 * This is the direct reclaim path, for page-allocating processes. We only
2890 * try to reclaim pages from zones which will satisfy the caller's allocation
2891 * request.
2892 *
2893 * If a zone is deemed to be full of pinned pages then just give it a light
2894 * scan then give up on it.
2895 */
2896static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2897{
2898 struct zoneref *z;
2899 struct zone *zone;
2900 unsigned long nr_soft_reclaimed;
2901 unsigned long nr_soft_scanned;
2902 gfp_t orig_mask;
2903 pg_data_t *last_pgdat = NULL;
2904
2905 /*
2906 * If the number of buffer_heads in the machine exceeds the maximum
2907 * allowed level, force direct reclaim to scan the highmem zone as
2908 * highmem pages could be pinning lowmem pages storing buffer_heads
2909 */
2910 orig_mask = sc->gfp_mask;
2911 if (buffer_heads_over_limit) {
2912 sc->gfp_mask |= __GFP_HIGHMEM;
2913 sc->reclaim_idx = gfp_zone(sc->gfp_mask);
2914 }
2915
2916 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2917 sc->reclaim_idx, sc->nodemask) {
2918 /*
2919 * Take care memory controller reclaiming has small influence
2920 * to global LRU.
2921 */
2922 if (!cgroup_reclaim(sc)) {
2923 if (!cpuset_zone_allowed(zone,
2924 GFP_KERNEL | __GFP_HARDWALL))
2925 continue;
2926
2927 /*
2928 * If we already have plenty of memory free for
2929 * compaction in this zone, don't free any more.
2930 * Even though compaction is invoked for any
2931 * non-zero order, only frequent costly order
2932 * reclamation is disruptive enough to become a
2933 * noticeable problem, like transparent huge
2934 * page allocations.
2935 */
2936 if (IS_ENABLED(CONFIG_COMPACTION) &&
2937 sc->order > PAGE_ALLOC_COSTLY_ORDER &&
2938 compaction_ready(zone, sc)) {
2939 sc->compaction_ready = true;
2940 continue;
2941 }
2942
2943 /*
2944 * Shrink each node in the zonelist once. If the
2945 * zonelist is ordered by zone (not the default) then a
2946 * node may be shrunk multiple times but in that case
2947 * the user prefers lower zones being preserved.
2948 */
2949 if (zone->zone_pgdat == last_pgdat)
2950 continue;
2951
2952 /*
2953 * This steals pages from memory cgroups over softlimit
2954 * and returns the number of reclaimed pages and
2955 * scanned pages. This works for global memory pressure
2956 * and balancing, not for a memcg's limit.
2957 */
2958 nr_soft_scanned = 0;
2959 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat,
2960 sc->order, sc->gfp_mask,
2961 &nr_soft_scanned);
2962 sc->nr_reclaimed += nr_soft_reclaimed;
2963 sc->nr_scanned += nr_soft_scanned;
2964 /* need some check for avoid more shrink_zone() */
2965 }
2966
2967 /* See comment about same check for global reclaim above */
2968 if (zone->zone_pgdat == last_pgdat)
2969 continue;
2970 last_pgdat = zone->zone_pgdat;
2971 shrink_node(zone->zone_pgdat, sc);
2972 }
2973
2974 /*
2975 * Restore to original mask to avoid the impact on the caller if we
2976 * promoted it to __GFP_HIGHMEM.
2977 */
2978 sc->gfp_mask = orig_mask;
2979}
2980
2981static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat)
2982{
2983 struct lruvec *target_lruvec;
2984 unsigned long refaults;
2985
2986 target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
2987 refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON);
2988 target_lruvec->refaults[0] = refaults;
2989 refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_FILE);
2990 target_lruvec->refaults[1] = refaults;
2991}
2992
2993/*
2994 * This is the main entry point to direct page reclaim.
2995 *
2996 * If a full scan of the inactive list fails to free enough memory then we
2997 * are "out of memory" and something needs to be killed.
2998 *
2999 * If the caller is !__GFP_FS then the probability of a failure is reasonably
3000 * high - the zone may be full of dirty or under-writeback pages, which this
3001 * caller can't do much about. We kick the writeback threads and take explicit
3002 * naps in the hope that some of these pages can be written. But if the
3003 * allocating task holds filesystem locks which prevent writeout this might not
3004 * work, and the allocation attempt will fail.
3005 *
3006 * returns: 0, if no pages reclaimed
3007 * else, the number of pages reclaimed
3008 */
3009static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
3010 struct scan_control *sc)
3011{
3012 int initial_priority = sc->priority;
3013 pg_data_t *last_pgdat;
3014 struct zoneref *z;
3015 struct zone *zone;
3016retry:
3017 delayacct_freepages_start();
3018
3019 if (!cgroup_reclaim(sc))
3020 __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1);
3021
3022 do {
3023 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
3024 sc->priority);
3025 sc->nr_scanned = 0;
3026 shrink_zones(zonelist, sc);
3027
3028 if (sc->nr_reclaimed >= sc->nr_to_reclaim)
3029 break;
3030
3031 if (sc->compaction_ready)
3032 break;
3033
3034 /*
3035 * If we're getting trouble reclaiming, start doing
3036 * writepage even in laptop mode.
3037 */
3038 if (sc->priority < DEF_PRIORITY - 2)
3039 sc->may_writepage = 1;
3040 } while (--sc->priority >= 0);
3041
3042 last_pgdat = NULL;
3043 for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx,
3044 sc->nodemask) {
3045 if (zone->zone_pgdat == last_pgdat)
3046 continue;
3047 last_pgdat = zone->zone_pgdat;
3048
3049 snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat);
3050
3051 if (cgroup_reclaim(sc)) {
3052 struct lruvec *lruvec;
3053
3054 lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup,
3055 zone->zone_pgdat);
3056 clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
3057 }
3058 }
3059
3060 delayacct_freepages_end();
3061
3062 if (sc->nr_reclaimed)
3063 return sc->nr_reclaimed;
3064
3065 /* Aborted reclaim to try compaction? don't OOM, then */
3066 if (sc->compaction_ready)
3067 return 1;
3068
3069 /*
3070 * We make inactive:active ratio decisions based on the node's
3071 * composition of memory, but a restrictive reclaim_idx or a
3072 * memory.low cgroup setting can exempt large amounts of
3073 * memory from reclaim. Neither of which are very common, so
3074 * instead of doing costly eligibility calculations of the
3075 * entire cgroup subtree up front, we assume the estimates are
3076 * good, and retry with forcible deactivation if that fails.
3077 */
3078 if (sc->skipped_deactivate) {
3079 sc->priority = initial_priority;
3080 sc->force_deactivate = 1;
3081 sc->skipped_deactivate = 0;
3082 goto retry;
3083 }
3084
3085 /* Untapped cgroup reserves? Don't OOM, retry. */
3086 if (sc->memcg_low_skipped) {
3087 sc->priority = initial_priority;
3088 sc->force_deactivate = 0;
3089 sc->memcg_low_reclaim = 1;
3090 sc->memcg_low_skipped = 0;
3091 goto retry;
3092 }
3093
3094 return 0;
3095}
3096
3097static bool allow_direct_reclaim(pg_data_t *pgdat)
3098{
3099 struct zone *zone;
3100 unsigned long pfmemalloc_reserve = 0;
3101 unsigned long free_pages = 0;
3102 int i;
3103 bool wmark_ok;
3104
3105 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
3106 return true;
3107
3108 for (i = 0; i <= ZONE_NORMAL; i++) {
3109 zone = &pgdat->node_zones[i];
3110 if (!managed_zone(zone))
3111 continue;
3112
3113 if (!zone_reclaimable_pages(zone))
3114 continue;
3115
3116 pfmemalloc_reserve += min_wmark_pages(zone);
3117 free_pages += zone_page_state(zone, NR_FREE_PAGES);
3118 }
3119
3120 /* If there are no reserves (unexpected config) then do not throttle */
3121 if (!pfmemalloc_reserve)
3122 return true;
3123
3124 wmark_ok = free_pages > pfmemalloc_reserve / 2;
3125
3126 /* kswapd must be awake if processes are being throttled */
3127 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
3128 if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL)
3129 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, ZONE_NORMAL);
3130
3131 wake_up_interruptible(&pgdat->kswapd_wait);
3132 }
3133
3134 return wmark_ok;
3135}
3136
3137/*
3138 * Throttle direct reclaimers if backing storage is backed by the network
3139 * and the PFMEMALLOC reserve for the preferred node is getting dangerously
3140 * depleted. kswapd will continue to make progress and wake the processes
3141 * when the low watermark is reached.
3142 *
3143 * Returns true if a fatal signal was delivered during throttling. If this
3144 * happens, the page allocator should not consider triggering the OOM killer.
3145 */
3146static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
3147 nodemask_t *nodemask)
3148{
3149 struct zoneref *z;
3150 struct zone *zone;
3151 pg_data_t *pgdat = NULL;
3152
3153 /*
3154 * Kernel threads should not be throttled as they may be indirectly
3155 * responsible for cleaning pages necessary for reclaim to make forward
3156 * progress. kjournald for example may enter direct reclaim while
3157 * committing a transaction where throttling it could forcing other
3158 * processes to block on log_wait_commit().
3159 */
3160 if (current->flags & PF_KTHREAD)
3161 goto out;
3162
3163 /*
3164 * If a fatal signal is pending, this process should not throttle.
3165 * It should return quickly so it can exit and free its memory
3166 */
3167 if (fatal_signal_pending(current))
3168 goto out;
3169
3170 /*
3171 * Check if the pfmemalloc reserves are ok by finding the first node
3172 * with a usable ZONE_NORMAL or lower zone. The expectation is that
3173 * GFP_KERNEL will be required for allocating network buffers when
3174 * swapping over the network so ZONE_HIGHMEM is unusable.
3175 *
3176 * Throttling is based on the first usable node and throttled processes
3177 * wait on a queue until kswapd makes progress and wakes them. There
3178 * is an affinity then between processes waking up and where reclaim
3179 * progress has been made assuming the process wakes on the same node.
3180 * More importantly, processes running on remote nodes will not compete
3181 * for remote pfmemalloc reserves and processes on different nodes
3182 * should make reasonable progress.
3183 */
3184 for_each_zone_zonelist_nodemask(zone, z, zonelist,
3185 gfp_zone(gfp_mask), nodemask) {
3186 if (zone_idx(zone) > ZONE_NORMAL)
3187 continue;
3188
3189 /* Throttle based on the first usable node */
3190 pgdat = zone->zone_pgdat;
3191 if (allow_direct_reclaim(pgdat))
3192 goto out;
3193 break;
3194 }
3195
3196 /* If no zone was usable by the allocation flags then do not throttle */
3197 if (!pgdat)
3198 goto out;
3199
3200 /* Account for the throttling */
3201 count_vm_event(PGSCAN_DIRECT_THROTTLE);
3202
3203 /*
3204 * If the caller cannot enter the filesystem, it's possible that it
3205 * is due to the caller holding an FS lock or performing a journal
3206 * transaction in the case of a filesystem like ext[3|4]. In this case,
3207 * it is not safe to block on pfmemalloc_wait as kswapd could be
3208 * blocked waiting on the same lock. Instead, throttle for up to a
3209 * second before continuing.
3210 */
3211 if (!(gfp_mask & __GFP_FS)) {
3212 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
3213 allow_direct_reclaim(pgdat), HZ);
3214
3215 goto check_pending;
3216 }
3217
3218 /* Throttle until kswapd wakes the process */
3219 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
3220 allow_direct_reclaim(pgdat));
3221
3222check_pending:
3223 if (fatal_signal_pending(current))
3224 return true;
3225
3226out:
3227 return false;
3228}
3229
3230unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
3231 gfp_t gfp_mask, nodemask_t *nodemask)
3232{
3233 unsigned long nr_reclaimed;
3234 struct scan_control sc = {
3235 .nr_to_reclaim = SWAP_CLUSTER_MAX,
3236 .gfp_mask = current_gfp_context(gfp_mask),
3237 .reclaim_idx = gfp_zone(gfp_mask),
3238 .order = order,
3239 .nodemask = nodemask,
3240 .priority = DEF_PRIORITY,
3241 .may_writepage = !laptop_mode,
3242 .may_unmap = 1,
3243 .may_swap = 1,
3244 };
3245
3246 /*
3247 * scan_control uses s8 fields for order, priority, and reclaim_idx.
3248 * Confirm they are large enough for max values.
3249 */
3250 BUILD_BUG_ON(MAX_ORDER > S8_MAX);
3251 BUILD_BUG_ON(DEF_PRIORITY > S8_MAX);
3252 BUILD_BUG_ON(MAX_NR_ZONES > S8_MAX);
3253
3254 /*
3255 * Do not enter reclaim if fatal signal was delivered while throttled.
3256 * 1 is returned so that the page allocator does not OOM kill at this
3257 * point.
3258 */
3259 if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
3260 return 1;
3261
3262 set_task_reclaim_state(current, &sc.reclaim_state);
3263 trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask);
3264
3265 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
3266
3267 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
3268 set_task_reclaim_state(current, NULL);
3269
3270 return nr_reclaimed;
3271}
3272
3273#ifdef CONFIG_MEMCG
3274
3275/* Only used by soft limit reclaim. Do not reuse for anything else. */
3276unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
3277 gfp_t gfp_mask, bool noswap,
3278 pg_data_t *pgdat,
3279 unsigned long *nr_scanned)
3280{
3281 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
3282 struct scan_control sc = {
3283 .nr_to_reclaim = SWAP_CLUSTER_MAX,
3284 .target_mem_cgroup = memcg,
3285 .may_writepage = !laptop_mode,
3286 .may_unmap = 1,
3287 .reclaim_idx = MAX_NR_ZONES - 1,
3288 .may_swap = !noswap,
3289 };
3290
3291 WARN_ON_ONCE(!current->reclaim_state);
3292
3293 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
3294 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
3295
3296 trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
3297 sc.gfp_mask);
3298
3299 /*
3300 * NOTE: Although we can get the priority field, using it
3301 * here is not a good idea, since it limits the pages we can scan.
3302 * if we don't reclaim here, the shrink_node from balance_pgdat
3303 * will pick up pages from other mem cgroup's as well. We hack
3304 * the priority and make it zero.
3305 */
3306 shrink_lruvec(lruvec, &sc);
3307
3308 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
3309
3310 *nr_scanned = sc.nr_scanned;
3311
3312 return sc.nr_reclaimed;
3313}
3314
3315unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
3316 unsigned long nr_pages,
3317 gfp_t gfp_mask,
3318 bool may_swap)
3319{
3320 unsigned long nr_reclaimed;
3321 unsigned int noreclaim_flag;
3322 struct scan_control sc = {
3323 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
3324 .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) |
3325 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
3326 .reclaim_idx = MAX_NR_ZONES - 1,
3327 .target_mem_cgroup = memcg,
3328 .priority = DEF_PRIORITY,
3329 .may_writepage = !laptop_mode,
3330 .may_unmap = 1,
3331 .may_swap = may_swap,
3332 };
3333 /*
3334 * Traverse the ZONELIST_FALLBACK zonelist of the current node to put
3335 * equal pressure on all the nodes. This is based on the assumption that
3336 * the reclaim does not bail out early.
3337 */
3338 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
3339
3340 set_task_reclaim_state(current, &sc.reclaim_state);
3341 trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);
3342 noreclaim_flag = memalloc_noreclaim_save();
3343
3344 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
3345
3346 memalloc_noreclaim_restore(noreclaim_flag);
3347 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
3348 set_task_reclaim_state(current, NULL);
3349
3350 return nr_reclaimed;
3351}
3352#endif
3353
3354static void age_active_anon(struct pglist_data *pgdat,
3355 struct scan_control *sc)
3356{
3357 struct mem_cgroup *memcg;
3358 struct lruvec *lruvec;
3359
3360 if (!total_swap_pages)
3361 return;
3362
3363 lruvec = mem_cgroup_lruvec(NULL, pgdat);
3364 if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON))
3365 return;
3366
3367 memcg = mem_cgroup_iter(NULL, NULL, NULL);
3368 do {
3369 lruvec = mem_cgroup_lruvec(memcg, pgdat);
3370 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
3371 sc, LRU_ACTIVE_ANON);
3372 memcg = mem_cgroup_iter(NULL, memcg, NULL);
3373 } while (memcg);
3374}
3375
3376static bool pgdat_watermark_boosted(pg_data_t *pgdat, int highest_zoneidx)
3377{
3378 int i;
3379 struct zone *zone;
3380
3381 /*
3382 * Check for watermark boosts top-down as the higher zones
3383 * are more likely to be boosted. Both watermarks and boosts
3384 * should not be checked at the same time as reclaim would
3385 * start prematurely when there is no boosting and a lower
3386 * zone is balanced.
3387 */
3388 for (i = highest_zoneidx; i >= 0; i--) {
3389 zone = pgdat->node_zones + i;
3390 if (!managed_zone(zone))
3391 continue;
3392
3393 if (zone->watermark_boost)
3394 return true;
3395 }
3396
3397 return false;
3398}
3399
3400/*
3401 * Returns true if there is an eligible zone balanced for the request order
3402 * and highest_zoneidx
3403 */
3404static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx)
3405{
3406 int i;
3407 unsigned long mark = -1;
3408 struct zone *zone;
3409
3410 /*
3411 * Check watermarks bottom-up as lower zones are more likely to
3412 * meet watermarks.
3413 */
3414 for (i = 0; i <= highest_zoneidx; i++) {
3415 zone = pgdat->node_zones + i;
3416
3417 if (!managed_zone(zone))
3418 continue;
3419
3420 mark = high_wmark_pages(zone);
3421 if (zone_watermark_ok_safe(zone, order, mark, highest_zoneidx))
3422 return true;
3423 }
3424
3425 /*
3426 * If a node has no populated zone within highest_zoneidx, it does not
3427 * need balancing by definition. This can happen if a zone-restricted
3428 * allocation tries to wake a remote kswapd.
3429 */
3430 if (mark == -1)
3431 return true;
3432
3433 return false;
3434}
3435
3436/* Clear pgdat state for congested, dirty or under writeback. */
3437static void clear_pgdat_congested(pg_data_t *pgdat)
3438{
3439 struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat);
3440
3441 clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
3442 clear_bit(PGDAT_DIRTY, &pgdat->flags);
3443 clear_bit(PGDAT_WRITEBACK, &pgdat->flags);
3444}
3445
3446/*
3447 * Prepare kswapd for sleeping. This verifies that there are no processes
3448 * waiting in throttle_direct_reclaim() and that watermarks have been met.
3449 *
3450 * Returns true if kswapd is ready to sleep
3451 */
3452static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order,
3453 int highest_zoneidx)
3454{
3455 /*
3456 * The throttled processes are normally woken up in balance_pgdat() as
3457 * soon as allow_direct_reclaim() is true. But there is a potential
3458 * race between when kswapd checks the watermarks and a process gets
3459 * throttled. There is also a potential race if processes get
3460 * throttled, kswapd wakes, a large process exits thereby balancing the
3461 * zones, which causes kswapd to exit balance_pgdat() before reaching
3462 * the wake up checks. If kswapd is going to sleep, no process should
3463 * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
3464 * the wake up is premature, processes will wake kswapd and get
3465 * throttled again. The difference from wake ups in balance_pgdat() is
3466 * that here we are under prepare_to_wait().
3467 */
3468 if (waitqueue_active(&pgdat->pfmemalloc_wait))
3469 wake_up_all(&pgdat->pfmemalloc_wait);
3470
3471 /* Hopeless node, leave it to direct reclaim */
3472 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
3473 return true;
3474
3475 if (pgdat_balanced(pgdat, order, highest_zoneidx)) {
3476 clear_pgdat_congested(pgdat);
3477 return true;
3478 }
3479
3480 return false;
3481}
3482
3483/*
3484 * kswapd shrinks a node of pages that are at or below the highest usable
3485 * zone that is currently unbalanced.
3486 *
3487 * Returns true if kswapd scanned at least the requested number of pages to
3488 * reclaim or if the lack of progress was due to pages under writeback.
3489 * This is used to determine if the scanning priority needs to be raised.
3490 */
3491static bool kswapd_shrink_node(pg_data_t *pgdat,
3492 struct scan_control *sc)
3493{
3494 struct zone *zone;
3495 int z;
3496
3497 /* Reclaim a number of pages proportional to the number of zones */
3498 sc->nr_to_reclaim = 0;
3499 for (z = 0; z <= sc->reclaim_idx; z++) {
3500 zone = pgdat->node_zones + z;
3501 if (!managed_zone(zone))
3502 continue;
3503
3504 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
3505 }
3506
3507 /*
3508 * Historically care was taken to put equal pressure on all zones but
3509 * now pressure is applied based on node LRU order.
3510 */
3511 shrink_node(pgdat, sc);
3512
3513 /*
3514 * Fragmentation may mean that the system cannot be rebalanced for
3515 * high-order allocations. If twice the allocation size has been
3516 * reclaimed then recheck watermarks only at order-0 to prevent
3517 * excessive reclaim. Assume that a process requested a high-order
3518 * can direct reclaim/compact.
3519 */
3520 if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order))
3521 sc->order = 0;
3522
3523 return sc->nr_scanned >= sc->nr_to_reclaim;
3524}
3525
3526/*
3527 * For kswapd, balance_pgdat() will reclaim pages across a node from zones
3528 * that are eligible for use by the caller until at least one zone is
3529 * balanced.
3530 *
3531 * Returns the order kswapd finished reclaiming at.
3532 *
3533 * kswapd scans the zones in the highmem->normal->dma direction. It skips
3534 * zones which have free_pages > high_wmark_pages(zone), but once a zone is
3535 * found to have free_pages <= high_wmark_pages(zone), any page in that zone
3536 * or lower is eligible for reclaim until at least one usable zone is
3537 * balanced.
3538 */
3539static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx)
3540{
3541 int i;
3542 unsigned long nr_soft_reclaimed;
3543 unsigned long nr_soft_scanned;
3544 unsigned long pflags;
3545 unsigned long nr_boost_reclaim;
3546 unsigned long zone_boosts[MAX_NR_ZONES] = { 0, };
3547 bool boosted;
3548 struct zone *zone;
3549 struct scan_control sc = {
3550 .gfp_mask = GFP_KERNEL,
3551 .order = order,
3552 .may_unmap = 1,
3553 };
3554
3555 set_task_reclaim_state(current, &sc.reclaim_state);
3556 psi_memstall_enter(&pflags);
3557 __fs_reclaim_acquire();
3558
3559 count_vm_event(PAGEOUTRUN);
3560
3561 /*
3562 * Account for the reclaim boost. Note that the zone boost is left in
3563 * place so that parallel allocations that are near the watermark will
3564 * stall or direct reclaim until kswapd is finished.
3565 */
3566 nr_boost_reclaim = 0;
3567 for (i = 0; i <= highest_zoneidx; i++) {
3568 zone = pgdat->node_zones + i;
3569 if (!managed_zone(zone))
3570 continue;
3571
3572 nr_boost_reclaim += zone->watermark_boost;
3573 zone_boosts[i] = zone->watermark_boost;
3574 }
3575 boosted = nr_boost_reclaim;
3576
3577restart:
3578 sc.priority = DEF_PRIORITY;
3579 do {
3580 unsigned long nr_reclaimed = sc.nr_reclaimed;
3581 bool raise_priority = true;
3582 bool balanced;
3583 bool ret;
3584
3585 sc.reclaim_idx = highest_zoneidx;
3586
3587 /*
3588 * If the number of buffer_heads exceeds the maximum allowed
3589 * then consider reclaiming from all zones. This has a dual
3590 * purpose -- on 64-bit systems it is expected that
3591 * buffer_heads are stripped during active rotation. On 32-bit
3592 * systems, highmem pages can pin lowmem memory and shrinking
3593 * buffers can relieve lowmem pressure. Reclaim may still not
3594 * go ahead if all eligible zones for the original allocation
3595 * request are balanced to avoid excessive reclaim from kswapd.
3596 */
3597 if (buffer_heads_over_limit) {
3598 for (i = MAX_NR_ZONES - 1; i >= 0; i--) {
3599 zone = pgdat->node_zones + i;
3600 if (!managed_zone(zone))
3601 continue;
3602
3603 sc.reclaim_idx = i;
3604 break;
3605 }
3606 }
3607
3608 /*
3609 * If the pgdat is imbalanced then ignore boosting and preserve
3610 * the watermarks for a later time and restart. Note that the
3611 * zone watermarks will be still reset at the end of balancing
3612 * on the grounds that the normal reclaim should be enough to
3613 * re-evaluate if boosting is required when kswapd next wakes.
3614 */
3615 balanced = pgdat_balanced(pgdat, sc.order, highest_zoneidx);
3616 if (!balanced && nr_boost_reclaim) {
3617 nr_boost_reclaim = 0;
3618 goto restart;
3619 }
3620
3621 /*
3622 * If boosting is not active then only reclaim if there are no
3623 * eligible zones. Note that sc.reclaim_idx is not used as
3624 * buffer_heads_over_limit may have adjusted it.
3625 */
3626 if (!nr_boost_reclaim && balanced)
3627 goto out;
3628
3629 /* Limit the priority of boosting to avoid reclaim writeback */
3630 if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2)
3631 raise_priority = false;
3632
3633 /*
3634 * Do not writeback or swap pages for boosted reclaim. The
3635 * intent is to relieve pressure not issue sub-optimal IO
3636 * from reclaim context. If no pages are reclaimed, the
3637 * reclaim will be aborted.
3638 */
3639 sc.may_writepage = !laptop_mode && !nr_boost_reclaim;
3640 sc.may_swap = !nr_boost_reclaim;
3641
3642 /*
3643 * Do some background aging of the anon list, to give
3644 * pages a chance to be referenced before reclaiming. All
3645 * pages are rotated regardless of classzone as this is
3646 * about consistent aging.
3647 */
3648 age_active_anon(pgdat, &sc);
3649
3650 /*
3651 * If we're getting trouble reclaiming, start doing writepage
3652 * even in laptop mode.
3653 */
3654 if (sc.priority < DEF_PRIORITY - 2)
3655 sc.may_writepage = 1;
3656
3657 /* Call soft limit reclaim before calling shrink_node. */
3658 sc.nr_scanned = 0;
3659 nr_soft_scanned = 0;
3660 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order,
3661 sc.gfp_mask, &nr_soft_scanned);
3662 sc.nr_reclaimed += nr_soft_reclaimed;
3663
3664 /*
3665 * There should be no need to raise the scanning priority if
3666 * enough pages are already being scanned that that high
3667 * watermark would be met at 100% efficiency.
3668 */
3669 if (kswapd_shrink_node(pgdat, &sc))
3670 raise_priority = false;
3671
3672 /*
3673 * If the low watermark is met there is no need for processes
3674 * to be throttled on pfmemalloc_wait as they should not be
3675 * able to safely make forward progress. Wake them
3676 */
3677 if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
3678 allow_direct_reclaim(pgdat))
3679 wake_up_all(&pgdat->pfmemalloc_wait);
3680
3681 /* Check if kswapd should be suspending */
3682 __fs_reclaim_release();
3683 ret = try_to_freeze();
3684 __fs_reclaim_acquire();
3685 if (ret || kthread_should_stop())
3686 break;
3687
3688 /*
3689 * Raise priority if scanning rate is too low or there was no
3690 * progress in reclaiming pages
3691 */
3692 nr_reclaimed = sc.nr_reclaimed - nr_reclaimed;
3693 nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed);
3694
3695 /*
3696 * If reclaim made no progress for a boost, stop reclaim as
3697 * IO cannot be queued and it could be an infinite loop in
3698 * extreme circumstances.
3699 */
3700 if (nr_boost_reclaim && !nr_reclaimed)
3701 break;
3702
3703 if (raise_priority || !nr_reclaimed)
3704 sc.priority--;
3705 } while (sc.priority >= 1);
3706
3707 if (!sc.nr_reclaimed)
3708 pgdat->kswapd_failures++;
3709
3710out:
3711 /* If reclaim was boosted, account for the reclaim done in this pass */
3712 if (boosted) {
3713 unsigned long flags;
3714
3715 for (i = 0; i <= highest_zoneidx; i++) {
3716 if (!zone_boosts[i])
3717 continue;
3718
3719 /* Increments are under the zone lock */
3720 zone = pgdat->node_zones + i;
3721 spin_lock_irqsave(&zone->lock, flags);
3722 zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]);
3723 spin_unlock_irqrestore(&zone->lock, flags);
3724 }
3725
3726 /*
3727 * As there is now likely space, wakeup kcompact to defragment
3728 * pageblocks.
3729 */
3730 wakeup_kcompactd(pgdat, pageblock_order, highest_zoneidx);
3731 }
3732
3733 snapshot_refaults(NULL, pgdat);
3734 __fs_reclaim_release();
3735 psi_memstall_leave(&pflags);
3736 set_task_reclaim_state(current, NULL);
3737
3738 /*
3739 * Return the order kswapd stopped reclaiming at as
3740 * prepare_kswapd_sleep() takes it into account. If another caller
3741 * entered the allocator slow path while kswapd was awake, order will
3742 * remain at the higher level.
3743 */
3744 return sc.order;
3745}
3746
3747/*
3748 * The pgdat->kswapd_highest_zoneidx is used to pass the highest zone index to
3749 * be reclaimed by kswapd from the waker. If the value is MAX_NR_ZONES which is
3750 * not a valid index then either kswapd runs for first time or kswapd couldn't
3751 * sleep after previous reclaim attempt (node is still unbalanced). In that
3752 * case return the zone index of the previous kswapd reclaim cycle.
3753 */
3754static enum zone_type kswapd_highest_zoneidx(pg_data_t *pgdat,
3755 enum zone_type prev_highest_zoneidx)
3756{
3757 enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx);
3758
3759 return curr_idx == MAX_NR_ZONES ? prev_highest_zoneidx : curr_idx;
3760}
3761
3762static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
3763 unsigned int highest_zoneidx)
3764{
3765 long remaining = 0;
3766 DEFINE_WAIT(wait);
3767
3768 if (freezing(current) || kthread_should_stop())
3769 return;
3770
3771 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3772
3773 /*
3774 * Try to sleep for a short interval. Note that kcompactd will only be
3775 * woken if it is possible to sleep for a short interval. This is
3776 * deliberate on the assumption that if reclaim cannot keep an
3777 * eligible zone balanced that it's also unlikely that compaction will
3778 * succeed.
3779 */
3780 if (prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) {
3781 /*
3782 * Compaction records what page blocks it recently failed to
3783 * isolate pages from and skips them in the future scanning.
3784 * When kswapd is going to sleep, it is reasonable to assume
3785 * that pages and compaction may succeed so reset the cache.
3786 */
3787 reset_isolation_suitable(pgdat);
3788
3789 /*
3790 * We have freed the memory, now we should compact it to make
3791 * allocation of the requested order possible.
3792 */
3793 wakeup_kcompactd(pgdat, alloc_order, highest_zoneidx);
3794
3795 remaining = schedule_timeout(HZ/10);
3796
3797 /*
3798 * If woken prematurely then reset kswapd_highest_zoneidx and
3799 * order. The values will either be from a wakeup request or
3800 * the previous request that slept prematurely.
3801 */
3802 if (remaining) {
3803 WRITE_ONCE(pgdat->kswapd_highest_zoneidx,
3804 kswapd_highest_zoneidx(pgdat,
3805 highest_zoneidx));
3806
3807 if (READ_ONCE(pgdat->kswapd_order) < reclaim_order)
3808 WRITE_ONCE(pgdat->kswapd_order, reclaim_order);
3809 }
3810
3811 finish_wait(&pgdat->kswapd_wait, &wait);
3812 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3813 }
3814
3815 /*
3816 * After a short sleep, check if it was a premature sleep. If not, then
3817 * go fully to sleep until explicitly woken up.
3818 */
3819 if (!remaining &&
3820 prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) {
3821 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
3822
3823 /*
3824 * vmstat counters are not perfectly accurate and the estimated
3825 * value for counters such as NR_FREE_PAGES can deviate from the
3826 * true value by nr_online_cpus * threshold. To avoid the zone
3827 * watermarks being breached while under pressure, we reduce the
3828 * per-cpu vmstat threshold while kswapd is awake and restore
3829 * them before going back to sleep.
3830 */
3831 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
3832
3833 if (!kthread_should_stop())
3834 schedule();
3835
3836 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
3837 } else {
3838 if (remaining)
3839 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
3840 else
3841 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
3842 }
3843 finish_wait(&pgdat->kswapd_wait, &wait);
3844}
3845
3846/*
3847 * The background pageout daemon, started as a kernel thread
3848 * from the init process.
3849 *
3850 * This basically trickles out pages so that we have _some_
3851 * free memory available even if there is no other activity
3852 * that frees anything up. This is needed for things like routing
3853 * etc, where we otherwise might have all activity going on in
3854 * asynchronous contexts that cannot page things out.
3855 *
3856 * If there are applications that are active memory-allocators
3857 * (most normal use), this basically shouldn't matter.
3858 */
3859static int kswapd(void *p)
3860{
3861 unsigned int alloc_order, reclaim_order;
3862 unsigned int highest_zoneidx = MAX_NR_ZONES - 1;
3863 pg_data_t *pgdat = (pg_data_t*)p;
3864 struct task_struct *tsk = current;
3865 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
3866
3867 if (!cpumask_empty(cpumask))
3868 set_cpus_allowed_ptr(tsk, cpumask);
3869
3870 /*
3871 * Tell the memory management that we're a "memory allocator",
3872 * and that if we need more memory we should get access to it
3873 * regardless (see "__alloc_pages()"). "kswapd" should
3874 * never get caught in the normal page freeing logic.
3875 *
3876 * (Kswapd normally doesn't need memory anyway, but sometimes
3877 * you need a small amount of memory in order to be able to
3878 * page out something else, and this flag essentially protects
3879 * us from recursively trying to free more memory as we're
3880 * trying to free the first piece of memory in the first place).
3881 */
3882 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
3883 set_freezable();
3884
3885 WRITE_ONCE(pgdat->kswapd_order, 0);
3886 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES);
3887 for ( ; ; ) {
3888 bool ret;
3889
3890 alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
3891 highest_zoneidx = kswapd_highest_zoneidx(pgdat,
3892 highest_zoneidx);
3893
3894kswapd_try_sleep:
3895 kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order,
3896 highest_zoneidx);
3897
3898 /* Read the new order and highest_zoneidx */
3899 alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
3900 highest_zoneidx = kswapd_highest_zoneidx(pgdat,
3901 highest_zoneidx);
3902 WRITE_ONCE(pgdat->kswapd_order, 0);
3903 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES);
3904
3905 ret = try_to_freeze();
3906 if (kthread_should_stop())
3907 break;
3908
3909 /*
3910 * We can speed up thawing tasks if we don't call balance_pgdat
3911 * after returning from the refrigerator
3912 */
3913 if (ret)
3914 continue;
3915
3916 /*
3917 * Reclaim begins at the requested order but if a high-order
3918 * reclaim fails then kswapd falls back to reclaiming for
3919 * order-0. If that happens, kswapd will consider sleeping
3920 * for the order it finished reclaiming at (reclaim_order)
3921 * but kcompactd is woken to compact for the original
3922 * request (alloc_order).
3923 */
3924 trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx,
3925 alloc_order);
3926 reclaim_order = balance_pgdat(pgdat, alloc_order,
3927 highest_zoneidx);
3928 if (reclaim_order < alloc_order)
3929 goto kswapd_try_sleep;
3930 }
3931
3932 tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
3933
3934 return 0;
3935}
3936
3937/*
3938 * A zone is low on free memory or too fragmented for high-order memory. If
3939 * kswapd should reclaim (direct reclaim is deferred), wake it up for the zone's
3940 * pgdat. It will wake up kcompactd after reclaiming memory. If kswapd reclaim
3941 * has failed or is not needed, still wake up kcompactd if only compaction is
3942 * needed.
3943 */
3944void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
3945 enum zone_type highest_zoneidx)
3946{
3947 pg_data_t *pgdat;
3948 enum zone_type curr_idx;
3949
3950 if (!managed_zone(zone))
3951 return;
3952
3953 if (!cpuset_zone_allowed(zone, gfp_flags))
3954 return;
3955
3956 pgdat = zone->zone_pgdat;
3957 curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx);
3958
3959 if (curr_idx == MAX_NR_ZONES || curr_idx < highest_zoneidx)
3960 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, highest_zoneidx);
3961
3962 if (READ_ONCE(pgdat->kswapd_order) < order)
3963 WRITE_ONCE(pgdat->kswapd_order, order);
3964
3965 if (!waitqueue_active(&pgdat->kswapd_wait))
3966 return;
3967
3968 /* Hopeless node, leave it to direct reclaim if possible */
3969 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ||
3970 (pgdat_balanced(pgdat, order, highest_zoneidx) &&
3971 !pgdat_watermark_boosted(pgdat, highest_zoneidx))) {
3972 /*
3973 * There may be plenty of free memory available, but it's too
3974 * fragmented for high-order allocations. Wake up kcompactd
3975 * and rely on compaction_suitable() to determine if it's
3976 * needed. If it fails, it will defer subsequent attempts to
3977 * ratelimit its work.
3978 */
3979 if (!(gfp_flags & __GFP_DIRECT_RECLAIM))
3980 wakeup_kcompactd(pgdat, order, highest_zoneidx);
3981 return;
3982 }
3983
3984 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, highest_zoneidx, order,
3985 gfp_flags);
3986 wake_up_interruptible(&pgdat->kswapd_wait);
3987}
3988
3989#ifdef CONFIG_HIBERNATION
3990/*
3991 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
3992 * freed pages.
3993 *
3994 * Rather than trying to age LRUs the aim is to preserve the overall
3995 * LRU order by reclaiming preferentially
3996 * inactive > active > active referenced > active mapped
3997 */
3998unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
3999{
4000 struct scan_control sc = {
4001 .nr_to_reclaim = nr_to_reclaim,
4002 .gfp_mask = GFP_HIGHUSER_MOVABLE,
4003 .reclaim_idx = MAX_NR_ZONES - 1,
4004 .priority = DEF_PRIORITY,
4005 .may_writepage = 1,
4006 .may_unmap = 1,
4007 .may_swap = 1,
4008 .hibernation_mode = 1,
4009 };
4010 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
4011 unsigned long nr_reclaimed;
4012 unsigned int noreclaim_flag;
4013
4014 fs_reclaim_acquire(sc.gfp_mask);
4015 noreclaim_flag = memalloc_noreclaim_save();
4016 set_task_reclaim_state(current, &sc.reclaim_state);
4017
4018 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
4019
4020 set_task_reclaim_state(current, NULL);
4021 memalloc_noreclaim_restore(noreclaim_flag);
4022 fs_reclaim_release(sc.gfp_mask);
4023
4024 return nr_reclaimed;
4025}
4026#endif /* CONFIG_HIBERNATION */
4027
4028/*
4029 * This kswapd start function will be called by init and node-hot-add.
4030 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
4031 */
4032int kswapd_run(int nid)
4033{
4034 pg_data_t *pgdat = NODE_DATA(nid);
4035 int ret = 0;
4036
4037 if (pgdat->kswapd)
4038 return 0;
4039
4040 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
4041 if (IS_ERR(pgdat->kswapd)) {
4042 /* failure at boot is fatal */
4043 BUG_ON(system_state < SYSTEM_RUNNING);
4044 pr_err("Failed to start kswapd on node %d\n", nid);
4045 ret = PTR_ERR(pgdat->kswapd);
4046 pgdat->kswapd = NULL;
4047 }
4048 return ret;
4049}
4050
4051/*
4052 * Called by memory hotplug when all memory in a node is offlined. Caller must
4053 * hold mem_hotplug_begin/end().
4054 */
4055void kswapd_stop(int nid)
4056{
4057 struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
4058
4059 if (kswapd) {
4060 kthread_stop(kswapd);
4061 NODE_DATA(nid)->kswapd = NULL;
4062 }
4063}
4064
4065static int __init kswapd_init(void)
4066{
4067 int nid;
4068
4069 swap_setup();
4070 for_each_node_state(nid, N_MEMORY)
4071 kswapd_run(nid);
4072 return 0;
4073}
4074
4075module_init(kswapd_init)
4076
4077#ifdef CONFIG_NUMA
4078/*
4079 * Node reclaim mode
4080 *
4081 * If non-zero call node_reclaim when the number of free pages falls below
4082 * the watermarks.
4083 */
4084int node_reclaim_mode __read_mostly;
4085
4086#define RECLAIM_WRITE (1<<0) /* Writeout pages during reclaim */
4087#define RECLAIM_UNMAP (1<<1) /* Unmap pages during reclaim */
4088
4089/*
4090 * Priority for NODE_RECLAIM. This determines the fraction of pages
4091 * of a node considered for each zone_reclaim. 4 scans 1/16th of
4092 * a zone.
4093 */
4094#define NODE_RECLAIM_PRIORITY 4
4095
4096/*
4097 * Percentage of pages in a zone that must be unmapped for node_reclaim to
4098 * occur.
4099 */
4100int sysctl_min_unmapped_ratio = 1;
4101
4102/*
4103 * If the number of slab pages in a zone grows beyond this percentage then
4104 * slab reclaim needs to occur.
4105 */
4106int sysctl_min_slab_ratio = 5;
4107
4108static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat)
4109{
4110 unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED);
4111 unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) +
4112 node_page_state(pgdat, NR_ACTIVE_FILE);
4113
4114 /*
4115 * It's possible for there to be more file mapped pages than
4116 * accounted for by the pages on the file LRU lists because
4117 * tmpfs pages accounted for as ANON can also be FILE_MAPPED
4118 */
4119 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
4120}
4121
4122/* Work out how many page cache pages we can reclaim in this reclaim_mode */
4123static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat)
4124{
4125 unsigned long nr_pagecache_reclaimable;
4126 unsigned long delta = 0;
4127
4128 /*
4129 * If RECLAIM_UNMAP is set, then all file pages are considered
4130 * potentially reclaimable. Otherwise, we have to worry about
4131 * pages like swapcache and node_unmapped_file_pages() provides
4132 * a better estimate
4133 */
4134 if (node_reclaim_mode & RECLAIM_UNMAP)
4135 nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES);
4136 else
4137 nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat);
4138
4139 /* If we can't clean pages, remove dirty pages from consideration */
4140 if (!(node_reclaim_mode & RECLAIM_WRITE))
4141 delta += node_page_state(pgdat, NR_FILE_DIRTY);
4142
4143 /* Watch for any possible underflows due to delta */
4144 if (unlikely(delta > nr_pagecache_reclaimable))
4145 delta = nr_pagecache_reclaimable;
4146
4147 return nr_pagecache_reclaimable - delta;
4148}
4149
4150/*
4151 * Try to free up some pages from this node through reclaim.
4152 */
4153static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
4154{
4155 /* Minimum pages needed in order to stay on node */
4156 const unsigned long nr_pages = 1 << order;
4157 struct task_struct *p = current;
4158 unsigned int noreclaim_flag;
4159 struct scan_control sc = {
4160 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
4161 .gfp_mask = current_gfp_context(gfp_mask),
4162 .order = order,
4163 .priority = NODE_RECLAIM_PRIORITY,
4164 .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
4165 .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
4166 .may_swap = 1,
4167 .reclaim_idx = gfp_zone(gfp_mask),
4168 };
4169
4170 trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,
4171 sc.gfp_mask);
4172
4173 cond_resched();
4174 fs_reclaim_acquire(sc.gfp_mask);
4175 /*
4176 * We need to be able to allocate from the reserves for RECLAIM_UNMAP
4177 * and we also need to be able to write out pages for RECLAIM_WRITE
4178 * and RECLAIM_UNMAP.
4179 */
4180 noreclaim_flag = memalloc_noreclaim_save();
4181 p->flags |= PF_SWAPWRITE;
4182 set_task_reclaim_state(p, &sc.reclaim_state);
4183
4184 if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) {
4185 /*
4186 * Free memory by calling shrink node with increasing
4187 * priorities until we have enough memory freed.
4188 */
4189 do {
4190 shrink_node(pgdat, &sc);
4191 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
4192 }
4193
4194 set_task_reclaim_state(p, NULL);
4195 current->flags &= ~PF_SWAPWRITE;
4196 memalloc_noreclaim_restore(noreclaim_flag);
4197 fs_reclaim_release(sc.gfp_mask);
4198
4199 trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed);
4200
4201 return sc.nr_reclaimed >= nr_pages;
4202}
4203
4204int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
4205{
4206 int ret;
4207
4208 /*
4209 * Node reclaim reclaims unmapped file backed pages and
4210 * slab pages if we are over the defined limits.
4211 *
4212 * A small portion of unmapped file backed pages is needed for
4213 * file I/O otherwise pages read by file I/O will be immediately
4214 * thrown out if the node is overallocated. So we do not reclaim
4215 * if less than a specified percentage of the node is used by
4216 * unmapped file backed pages.
4217 */
4218 if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
4219 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) <=
4220 pgdat->min_slab_pages)
4221 return NODE_RECLAIM_FULL;
4222
4223 /*
4224 * Do not scan if the allocation should not be delayed.
4225 */
4226 if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC))
4227 return NODE_RECLAIM_NOSCAN;
4228
4229 /*
4230 * Only run node reclaim on the local node or on nodes that do not
4231 * have associated processors. This will favor the local processor
4232 * over remote processors and spread off node memory allocations
4233 * as wide as possible.
4234 */
4235 if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id())
4236 return NODE_RECLAIM_NOSCAN;
4237
4238 if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
4239 return NODE_RECLAIM_NOSCAN;
4240
4241 ret = __node_reclaim(pgdat, gfp_mask, order);
4242 clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
4243
4244 if (!ret)
4245 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
4246
4247 return ret;
4248}
4249#endif
4250
4251/**
4252 * check_move_unevictable_pages - check pages for evictability and move to
4253 * appropriate zone lru list
4254 * @pvec: pagevec with lru pages to check
4255 *
4256 * Checks pages for evictability, if an evictable page is in the unevictable
4257 * lru list, moves it to the appropriate evictable lru list. This function
4258 * should be only used for lru pages.
4259 */
4260void check_move_unevictable_pages(struct pagevec *pvec)
4261{
4262 struct lruvec *lruvec;
4263 struct pglist_data *pgdat = NULL;
4264 int pgscanned = 0;
4265 int pgrescued = 0;
4266 int i;
4267
4268 for (i = 0; i < pvec->nr; i++) {
4269 struct page *page = pvec->pages[i];
4270 struct pglist_data *pagepgdat = page_pgdat(page);
4271 int nr_pages;
4272
4273 if (PageTransTail(page))
4274 continue;
4275
4276 nr_pages = thp_nr_pages(page);
4277 pgscanned += nr_pages;
4278
4279 if (pagepgdat != pgdat) {
4280 if (pgdat)
4281 spin_unlock_irq(&pgdat->lru_lock);
4282 pgdat = pagepgdat;
4283 spin_lock_irq(&pgdat->lru_lock);
4284 }
4285 lruvec = mem_cgroup_page_lruvec(page, pgdat);
4286
4287 if (!PageLRU(page) || !PageUnevictable(page))
4288 continue;
4289
4290 if (page_evictable(page)) {
4291 enum lru_list lru = page_lru_base_type(page);
4292
4293 VM_BUG_ON_PAGE(PageActive(page), page);
4294 ClearPageUnevictable(page);
4295 del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
4296 add_page_to_lru_list(page, lruvec, lru);
4297 pgrescued += nr_pages;
4298 }
4299 }
4300
4301 if (pgdat) {
4302 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
4303 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
4304 spin_unlock_irq(&pgdat->lru_lock);
4305 }
4306}
4307EXPORT_SYMBOL_GPL(check_move_unevictable_pages);
1/*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed
9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11 * Multiqueue VM started 5.8.00, Rik van Riel.
12 */
13
14#include <linux/mm.h>
15#include <linux/module.h>
16#include <linux/gfp.h>
17#include <linux/kernel_stat.h>
18#include <linux/swap.h>
19#include <linux/pagemap.h>
20#include <linux/init.h>
21#include <linux/highmem.h>
22#include <linux/vmpressure.h>
23#include <linux/vmstat.h>
24#include <linux/file.h>
25#include <linux/writeback.h>
26#include <linux/blkdev.h>
27#include <linux/buffer_head.h> /* for try_to_release_page(),
28 buffer_heads_over_limit */
29#include <linux/mm_inline.h>
30#include <linux/backing-dev.h>
31#include <linux/rmap.h>
32#include <linux/topology.h>
33#include <linux/cpu.h>
34#include <linux/cpuset.h>
35#include <linux/compaction.h>
36#include <linux/notifier.h>
37#include <linux/rwsem.h>
38#include <linux/delay.h>
39#include <linux/kthread.h>
40#include <linux/freezer.h>
41#include <linux/memcontrol.h>
42#include <linux/delayacct.h>
43#include <linux/sysctl.h>
44#include <linux/oom.h>
45#include <linux/prefetch.h>
46
47#include <asm/tlbflush.h>
48#include <asm/div64.h>
49
50#include <linux/swapops.h>
51#include <linux/balloon_compaction.h>
52
53#include "internal.h"
54
55#define CREATE_TRACE_POINTS
56#include <trace/events/vmscan.h>
57
58struct scan_control {
59 /* Incremented by the number of inactive pages that were scanned */
60 unsigned long nr_scanned;
61
62 /* Number of pages freed so far during a call to shrink_zones() */
63 unsigned long nr_reclaimed;
64
65 /* How many pages shrink_list() should reclaim */
66 unsigned long nr_to_reclaim;
67
68 unsigned long hibernation_mode;
69
70 /* This context's GFP mask */
71 gfp_t gfp_mask;
72
73 int may_writepage;
74
75 /* Can mapped pages be reclaimed? */
76 int may_unmap;
77
78 /* Can pages be swapped as part of reclaim? */
79 int may_swap;
80
81 int order;
82
83 /* Scan (total_size >> priority) pages at once */
84 int priority;
85
86 /*
87 * The memory cgroup that hit its limit and as a result is the
88 * primary target of this reclaim invocation.
89 */
90 struct mem_cgroup *target_mem_cgroup;
91
92 /*
93 * Nodemask of nodes allowed by the caller. If NULL, all nodes
94 * are scanned.
95 */
96 nodemask_t *nodemask;
97};
98
99#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
100
101#ifdef ARCH_HAS_PREFETCH
102#define prefetch_prev_lru_page(_page, _base, _field) \
103 do { \
104 if ((_page)->lru.prev != _base) { \
105 struct page *prev; \
106 \
107 prev = lru_to_page(&(_page->lru)); \
108 prefetch(&prev->_field); \
109 } \
110 } while (0)
111#else
112#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
113#endif
114
115#ifdef ARCH_HAS_PREFETCHW
116#define prefetchw_prev_lru_page(_page, _base, _field) \
117 do { \
118 if ((_page)->lru.prev != _base) { \
119 struct page *prev; \
120 \
121 prev = lru_to_page(&(_page->lru)); \
122 prefetchw(&prev->_field); \
123 } \
124 } while (0)
125#else
126#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
127#endif
128
129/*
130 * From 0 .. 100. Higher means more swappy.
131 */
132int vm_swappiness = 60;
133unsigned long vm_total_pages; /* The total number of pages which the VM controls */
134
135static LIST_HEAD(shrinker_list);
136static DECLARE_RWSEM(shrinker_rwsem);
137
138#ifdef CONFIG_MEMCG
139static bool global_reclaim(struct scan_control *sc)
140{
141 return !sc->target_mem_cgroup;
142}
143#else
144static bool global_reclaim(struct scan_control *sc)
145{
146 return true;
147}
148#endif
149
150static unsigned long zone_reclaimable_pages(struct zone *zone)
151{
152 int nr;
153
154 nr = zone_page_state(zone, NR_ACTIVE_FILE) +
155 zone_page_state(zone, NR_INACTIVE_FILE);
156
157 if (get_nr_swap_pages() > 0)
158 nr += zone_page_state(zone, NR_ACTIVE_ANON) +
159 zone_page_state(zone, NR_INACTIVE_ANON);
160
161 return nr;
162}
163
164bool zone_reclaimable(struct zone *zone)
165{
166 return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
167}
168
169static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
170{
171 if (!mem_cgroup_disabled())
172 return mem_cgroup_get_lru_size(lruvec, lru);
173
174 return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru);
175}
176
177/*
178 * Add a shrinker callback to be called from the vm.
179 */
180int register_shrinker(struct shrinker *shrinker)
181{
182 size_t size = sizeof(*shrinker->nr_deferred);
183
184 /*
185 * If we only have one possible node in the system anyway, save
186 * ourselves the trouble and disable NUMA aware behavior. This way we
187 * will save memory and some small loop time later.
188 */
189 if (nr_node_ids == 1)
190 shrinker->flags &= ~SHRINKER_NUMA_AWARE;
191
192 if (shrinker->flags & SHRINKER_NUMA_AWARE)
193 size *= nr_node_ids;
194
195 shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
196 if (!shrinker->nr_deferred)
197 return -ENOMEM;
198
199 down_write(&shrinker_rwsem);
200 list_add_tail(&shrinker->list, &shrinker_list);
201 up_write(&shrinker_rwsem);
202 return 0;
203}
204EXPORT_SYMBOL(register_shrinker);
205
206/*
207 * Remove one
208 */
209void unregister_shrinker(struct shrinker *shrinker)
210{
211 down_write(&shrinker_rwsem);
212 list_del(&shrinker->list);
213 up_write(&shrinker_rwsem);
214 kfree(shrinker->nr_deferred);
215}
216EXPORT_SYMBOL(unregister_shrinker);
217
218#define SHRINK_BATCH 128
219
220static unsigned long
221shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
222 unsigned long nr_pages_scanned, unsigned long lru_pages)
223{
224 unsigned long freed = 0;
225 unsigned long long delta;
226 long total_scan;
227 long freeable;
228 long nr;
229 long new_nr;
230 int nid = shrinkctl->nid;
231 long batch_size = shrinker->batch ? shrinker->batch
232 : SHRINK_BATCH;
233
234 freeable = shrinker->count_objects(shrinker, shrinkctl);
235 if (freeable == 0)
236 return 0;
237
238 /*
239 * copy the current shrinker scan count into a local variable
240 * and zero it so that other concurrent shrinker invocations
241 * don't also do this scanning work.
242 */
243 nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
244
245 total_scan = nr;
246 delta = (4 * nr_pages_scanned) / shrinker->seeks;
247 delta *= freeable;
248 do_div(delta, lru_pages + 1);
249 total_scan += delta;
250 if (total_scan < 0) {
251 printk(KERN_ERR
252 "shrink_slab: %pF negative objects to delete nr=%ld\n",
253 shrinker->scan_objects, total_scan);
254 total_scan = freeable;
255 }
256
257 /*
258 * We need to avoid excessive windup on filesystem shrinkers
259 * due to large numbers of GFP_NOFS allocations causing the
260 * shrinkers to return -1 all the time. This results in a large
261 * nr being built up so when a shrink that can do some work
262 * comes along it empties the entire cache due to nr >>>
263 * freeable. This is bad for sustaining a working set in
264 * memory.
265 *
266 * Hence only allow the shrinker to scan the entire cache when
267 * a large delta change is calculated directly.
268 */
269 if (delta < freeable / 4)
270 total_scan = min(total_scan, freeable / 2);
271
272 /*
273 * Avoid risking looping forever due to too large nr value:
274 * never try to free more than twice the estimate number of
275 * freeable entries.
276 */
277 if (total_scan > freeable * 2)
278 total_scan = freeable * 2;
279
280 trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
281 nr_pages_scanned, lru_pages,
282 freeable, delta, total_scan);
283
284 /*
285 * Normally, we should not scan less than batch_size objects in one
286 * pass to avoid too frequent shrinker calls, but if the slab has less
287 * than batch_size objects in total and we are really tight on memory,
288 * we will try to reclaim all available objects, otherwise we can end
289 * up failing allocations although there are plenty of reclaimable
290 * objects spread over several slabs with usage less than the
291 * batch_size.
292 *
293 * We detect the "tight on memory" situations by looking at the total
294 * number of objects we want to scan (total_scan). If it is greater
295 * than the total number of objects on slab (freeable), we must be
296 * scanning at high prio and therefore should try to reclaim as much as
297 * possible.
298 */
299 while (total_scan >= batch_size ||
300 total_scan >= freeable) {
301 unsigned long ret;
302 unsigned long nr_to_scan = min(batch_size, total_scan);
303
304 shrinkctl->nr_to_scan = nr_to_scan;
305 ret = shrinker->scan_objects(shrinker, shrinkctl);
306 if (ret == SHRINK_STOP)
307 break;
308 freed += ret;
309
310 count_vm_events(SLABS_SCANNED, nr_to_scan);
311 total_scan -= nr_to_scan;
312
313 cond_resched();
314 }
315
316 /*
317 * move the unused scan count back into the shrinker in a
318 * manner that handles concurrent updates. If we exhausted the
319 * scan, there is no need to do an update.
320 */
321 if (total_scan > 0)
322 new_nr = atomic_long_add_return(total_scan,
323 &shrinker->nr_deferred[nid]);
324 else
325 new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
326
327 trace_mm_shrink_slab_end(shrinker, freed, nr, new_nr);
328 return freed;
329}
330
331/*
332 * Call the shrink functions to age shrinkable caches
333 *
334 * Here we assume it costs one seek to replace a lru page and that it also
335 * takes a seek to recreate a cache object. With this in mind we age equal
336 * percentages of the lru and ageable caches. This should balance the seeks
337 * generated by these structures.
338 *
339 * If the vm encountered mapped pages on the LRU it increase the pressure on
340 * slab to avoid swapping.
341 *
342 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
343 *
344 * `lru_pages' represents the number of on-LRU pages in all the zones which
345 * are eligible for the caller's allocation attempt. It is used for balancing
346 * slab reclaim versus page reclaim.
347 *
348 * Returns the number of slab objects which we shrunk.
349 */
350unsigned long shrink_slab(struct shrink_control *shrinkctl,
351 unsigned long nr_pages_scanned,
352 unsigned long lru_pages)
353{
354 struct shrinker *shrinker;
355 unsigned long freed = 0;
356
357 if (nr_pages_scanned == 0)
358 nr_pages_scanned = SWAP_CLUSTER_MAX;
359
360 if (!down_read_trylock(&shrinker_rwsem)) {
361 /*
362 * If we would return 0, our callers would understand that we
363 * have nothing else to shrink and give up trying. By returning
364 * 1 we keep it going and assume we'll be able to shrink next
365 * time.
366 */
367 freed = 1;
368 goto out;
369 }
370
371 list_for_each_entry(shrinker, &shrinker_list, list) {
372 if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) {
373 shrinkctl->nid = 0;
374 freed += shrink_slab_node(shrinkctl, shrinker,
375 nr_pages_scanned, lru_pages);
376 continue;
377 }
378
379 for_each_node_mask(shrinkctl->nid, shrinkctl->nodes_to_scan) {
380 if (node_online(shrinkctl->nid))
381 freed += shrink_slab_node(shrinkctl, shrinker,
382 nr_pages_scanned, lru_pages);
383
384 }
385 }
386 up_read(&shrinker_rwsem);
387out:
388 cond_resched();
389 return freed;
390}
391
392static inline int is_page_cache_freeable(struct page *page)
393{
394 /*
395 * A freeable page cache page is referenced only by the caller
396 * that isolated the page, the page cache radix tree and
397 * optional buffer heads at page->private.
398 */
399 return page_count(page) - page_has_private(page) == 2;
400}
401
402static int may_write_to_queue(struct backing_dev_info *bdi,
403 struct scan_control *sc)
404{
405 if (current->flags & PF_SWAPWRITE)
406 return 1;
407 if (!bdi_write_congested(bdi))
408 return 1;
409 if (bdi == current->backing_dev_info)
410 return 1;
411 return 0;
412}
413
414/*
415 * We detected a synchronous write error writing a page out. Probably
416 * -ENOSPC. We need to propagate that into the address_space for a subsequent
417 * fsync(), msync() or close().
418 *
419 * The tricky part is that after writepage we cannot touch the mapping: nothing
420 * prevents it from being freed up. But we have a ref on the page and once
421 * that page is locked, the mapping is pinned.
422 *
423 * We're allowed to run sleeping lock_page() here because we know the caller has
424 * __GFP_FS.
425 */
426static void handle_write_error(struct address_space *mapping,
427 struct page *page, int error)
428{
429 lock_page(page);
430 if (page_mapping(page) == mapping)
431 mapping_set_error(mapping, error);
432 unlock_page(page);
433}
434
435/* possible outcome of pageout() */
436typedef enum {
437 /* failed to write page out, page is locked */
438 PAGE_KEEP,
439 /* move page to the active list, page is locked */
440 PAGE_ACTIVATE,
441 /* page has been sent to the disk successfully, page is unlocked */
442 PAGE_SUCCESS,
443 /* page is clean and locked */
444 PAGE_CLEAN,
445} pageout_t;
446
447/*
448 * pageout is called by shrink_page_list() for each dirty page.
449 * Calls ->writepage().
450 */
451static pageout_t pageout(struct page *page, struct address_space *mapping,
452 struct scan_control *sc)
453{
454 /*
455 * If the page is dirty, only perform writeback if that write
456 * will be non-blocking. To prevent this allocation from being
457 * stalled by pagecache activity. But note that there may be
458 * stalls if we need to run get_block(). We could test
459 * PagePrivate for that.
460 *
461 * If this process is currently in __generic_file_aio_write() against
462 * this page's queue, we can perform writeback even if that
463 * will block.
464 *
465 * If the page is swapcache, write it back even if that would
466 * block, for some throttling. This happens by accident, because
467 * swap_backing_dev_info is bust: it doesn't reflect the
468 * congestion state of the swapdevs. Easy to fix, if needed.
469 */
470 if (!is_page_cache_freeable(page))
471 return PAGE_KEEP;
472 if (!mapping) {
473 /*
474 * Some data journaling orphaned pages can have
475 * page->mapping == NULL while being dirty with clean buffers.
476 */
477 if (page_has_private(page)) {
478 if (try_to_free_buffers(page)) {
479 ClearPageDirty(page);
480 printk("%s: orphaned page\n", __func__);
481 return PAGE_CLEAN;
482 }
483 }
484 return PAGE_KEEP;
485 }
486 if (mapping->a_ops->writepage == NULL)
487 return PAGE_ACTIVATE;
488 if (!may_write_to_queue(mapping->backing_dev_info, sc))
489 return PAGE_KEEP;
490
491 if (clear_page_dirty_for_io(page)) {
492 int res;
493 struct writeback_control wbc = {
494 .sync_mode = WB_SYNC_NONE,
495 .nr_to_write = SWAP_CLUSTER_MAX,
496 .range_start = 0,
497 .range_end = LLONG_MAX,
498 .for_reclaim = 1,
499 };
500
501 SetPageReclaim(page);
502 res = mapping->a_ops->writepage(page, &wbc);
503 if (res < 0)
504 handle_write_error(mapping, page, res);
505 if (res == AOP_WRITEPAGE_ACTIVATE) {
506 ClearPageReclaim(page);
507 return PAGE_ACTIVATE;
508 }
509
510 if (!PageWriteback(page)) {
511 /* synchronous write or broken a_ops? */
512 ClearPageReclaim(page);
513 }
514 trace_mm_vmscan_writepage(page, trace_reclaim_flags(page));
515 inc_zone_page_state(page, NR_VMSCAN_WRITE);
516 return PAGE_SUCCESS;
517 }
518
519 return PAGE_CLEAN;
520}
521
522/*
523 * Same as remove_mapping, but if the page is removed from the mapping, it
524 * gets returned with a refcount of 0.
525 */
526static int __remove_mapping(struct address_space *mapping, struct page *page,
527 bool reclaimed)
528{
529 BUG_ON(!PageLocked(page));
530 BUG_ON(mapping != page_mapping(page));
531
532 spin_lock_irq(&mapping->tree_lock);
533 /*
534 * The non racy check for a busy page.
535 *
536 * Must be careful with the order of the tests. When someone has
537 * a ref to the page, it may be possible that they dirty it then
538 * drop the reference. So if PageDirty is tested before page_count
539 * here, then the following race may occur:
540 *
541 * get_user_pages(&page);
542 * [user mapping goes away]
543 * write_to(page);
544 * !PageDirty(page) [good]
545 * SetPageDirty(page);
546 * put_page(page);
547 * !page_count(page) [good, discard it]
548 *
549 * [oops, our write_to data is lost]
550 *
551 * Reversing the order of the tests ensures such a situation cannot
552 * escape unnoticed. The smp_rmb is needed to ensure the page->flags
553 * load is not satisfied before that of page->_count.
554 *
555 * Note that if SetPageDirty is always performed via set_page_dirty,
556 * and thus under tree_lock, then this ordering is not required.
557 */
558 if (!page_freeze_refs(page, 2))
559 goto cannot_free;
560 /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
561 if (unlikely(PageDirty(page))) {
562 page_unfreeze_refs(page, 2);
563 goto cannot_free;
564 }
565
566 if (PageSwapCache(page)) {
567 swp_entry_t swap = { .val = page_private(page) };
568 __delete_from_swap_cache(page);
569 spin_unlock_irq(&mapping->tree_lock);
570 swapcache_free(swap, page);
571 } else {
572 void (*freepage)(struct page *);
573 void *shadow = NULL;
574
575 freepage = mapping->a_ops->freepage;
576 /*
577 * Remember a shadow entry for reclaimed file cache in
578 * order to detect refaults, thus thrashing, later on.
579 *
580 * But don't store shadows in an address space that is
581 * already exiting. This is not just an optizimation,
582 * inode reclaim needs to empty out the radix tree or
583 * the nodes are lost. Don't plant shadows behind its
584 * back.
585 */
586 if (reclaimed && page_is_file_cache(page) &&
587 !mapping_exiting(mapping))
588 shadow = workingset_eviction(mapping, page);
589 __delete_from_page_cache(page, shadow);
590 spin_unlock_irq(&mapping->tree_lock);
591 mem_cgroup_uncharge_cache_page(page);
592
593 if (freepage != NULL)
594 freepage(page);
595 }
596
597 return 1;
598
599cannot_free:
600 spin_unlock_irq(&mapping->tree_lock);
601 return 0;
602}
603
604/*
605 * Attempt to detach a locked page from its ->mapping. If it is dirty or if
606 * someone else has a ref on the page, abort and return 0. If it was
607 * successfully detached, return 1. Assumes the caller has a single ref on
608 * this page.
609 */
610int remove_mapping(struct address_space *mapping, struct page *page)
611{
612 if (__remove_mapping(mapping, page, false)) {
613 /*
614 * Unfreezing the refcount with 1 rather than 2 effectively
615 * drops the pagecache ref for us without requiring another
616 * atomic operation.
617 */
618 page_unfreeze_refs(page, 1);
619 return 1;
620 }
621 return 0;
622}
623
624/**
625 * putback_lru_page - put previously isolated page onto appropriate LRU list
626 * @page: page to be put back to appropriate lru list
627 *
628 * Add previously isolated @page to appropriate LRU list.
629 * Page may still be unevictable for other reasons.
630 *
631 * lru_lock must not be held, interrupts must be enabled.
632 */
633void putback_lru_page(struct page *page)
634{
635 bool is_unevictable;
636 int was_unevictable = PageUnevictable(page);
637
638 VM_BUG_ON_PAGE(PageLRU(page), page);
639
640redo:
641 ClearPageUnevictable(page);
642
643 if (page_evictable(page)) {
644 /*
645 * For evictable pages, we can use the cache.
646 * In event of a race, worst case is we end up with an
647 * unevictable page on [in]active list.
648 * We know how to handle that.
649 */
650 is_unevictable = false;
651 lru_cache_add(page);
652 } else {
653 /*
654 * Put unevictable pages directly on zone's unevictable
655 * list.
656 */
657 is_unevictable = true;
658 add_page_to_unevictable_list(page);
659 /*
660 * When racing with an mlock or AS_UNEVICTABLE clearing
661 * (page is unlocked) make sure that if the other thread
662 * does not observe our setting of PG_lru and fails
663 * isolation/check_move_unevictable_pages,
664 * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
665 * the page back to the evictable list.
666 *
667 * The other side is TestClearPageMlocked() or shmem_lock().
668 */
669 smp_mb();
670 }
671
672 /*
673 * page's status can change while we move it among lru. If an evictable
674 * page is on unevictable list, it never be freed. To avoid that,
675 * check after we added it to the list, again.
676 */
677 if (is_unevictable && page_evictable(page)) {
678 if (!isolate_lru_page(page)) {
679 put_page(page);
680 goto redo;
681 }
682 /* This means someone else dropped this page from LRU
683 * So, it will be freed or putback to LRU again. There is
684 * nothing to do here.
685 */
686 }
687
688 if (was_unevictable && !is_unevictable)
689 count_vm_event(UNEVICTABLE_PGRESCUED);
690 else if (!was_unevictable && is_unevictable)
691 count_vm_event(UNEVICTABLE_PGCULLED);
692
693 put_page(page); /* drop ref from isolate */
694}
695
696enum page_references {
697 PAGEREF_RECLAIM,
698 PAGEREF_RECLAIM_CLEAN,
699 PAGEREF_KEEP,
700 PAGEREF_ACTIVATE,
701};
702
703static enum page_references page_check_references(struct page *page,
704 struct scan_control *sc)
705{
706 int referenced_ptes, referenced_page;
707 unsigned long vm_flags;
708
709 referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
710 &vm_flags);
711 referenced_page = TestClearPageReferenced(page);
712
713 /*
714 * Mlock lost the isolation race with us. Let try_to_unmap()
715 * move the page to the unevictable list.
716 */
717 if (vm_flags & VM_LOCKED)
718 return PAGEREF_RECLAIM;
719
720 if (referenced_ptes) {
721 if (PageSwapBacked(page))
722 return PAGEREF_ACTIVATE;
723 /*
724 * All mapped pages start out with page table
725 * references from the instantiating fault, so we need
726 * to look twice if a mapped file page is used more
727 * than once.
728 *
729 * Mark it and spare it for another trip around the
730 * inactive list. Another page table reference will
731 * lead to its activation.
732 *
733 * Note: the mark is set for activated pages as well
734 * so that recently deactivated but used pages are
735 * quickly recovered.
736 */
737 SetPageReferenced(page);
738
739 if (referenced_page || referenced_ptes > 1)
740 return PAGEREF_ACTIVATE;
741
742 /*
743 * Activate file-backed executable pages after first usage.
744 */
745 if (vm_flags & VM_EXEC)
746 return PAGEREF_ACTIVATE;
747
748 return PAGEREF_KEEP;
749 }
750
751 /* Reclaim if clean, defer dirty pages to writeback */
752 if (referenced_page && !PageSwapBacked(page))
753 return PAGEREF_RECLAIM_CLEAN;
754
755 return PAGEREF_RECLAIM;
756}
757
758/* Check if a page is dirty or under writeback */
759static void page_check_dirty_writeback(struct page *page,
760 bool *dirty, bool *writeback)
761{
762 struct address_space *mapping;
763
764 /*
765 * Anonymous pages are not handled by flushers and must be written
766 * from reclaim context. Do not stall reclaim based on them
767 */
768 if (!page_is_file_cache(page)) {
769 *dirty = false;
770 *writeback = false;
771 return;
772 }
773
774 /* By default assume that the page flags are accurate */
775 *dirty = PageDirty(page);
776 *writeback = PageWriteback(page);
777
778 /* Verify dirty/writeback state if the filesystem supports it */
779 if (!page_has_private(page))
780 return;
781
782 mapping = page_mapping(page);
783 if (mapping && mapping->a_ops->is_dirty_writeback)
784 mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
785}
786
787/*
788 * shrink_page_list() returns the number of reclaimed pages
789 */
790static unsigned long shrink_page_list(struct list_head *page_list,
791 struct zone *zone,
792 struct scan_control *sc,
793 enum ttu_flags ttu_flags,
794 unsigned long *ret_nr_dirty,
795 unsigned long *ret_nr_unqueued_dirty,
796 unsigned long *ret_nr_congested,
797 unsigned long *ret_nr_writeback,
798 unsigned long *ret_nr_immediate,
799 bool force_reclaim)
800{
801 LIST_HEAD(ret_pages);
802 LIST_HEAD(free_pages);
803 int pgactivate = 0;
804 unsigned long nr_unqueued_dirty = 0;
805 unsigned long nr_dirty = 0;
806 unsigned long nr_congested = 0;
807 unsigned long nr_reclaimed = 0;
808 unsigned long nr_writeback = 0;
809 unsigned long nr_immediate = 0;
810
811 cond_resched();
812
813 mem_cgroup_uncharge_start();
814 while (!list_empty(page_list)) {
815 struct address_space *mapping;
816 struct page *page;
817 int may_enter_fs;
818 enum page_references references = PAGEREF_RECLAIM_CLEAN;
819 bool dirty, writeback;
820
821 cond_resched();
822
823 page = lru_to_page(page_list);
824 list_del(&page->lru);
825
826 if (!trylock_page(page))
827 goto keep;
828
829 VM_BUG_ON_PAGE(PageActive(page), page);
830 VM_BUG_ON_PAGE(page_zone(page) != zone, page);
831
832 sc->nr_scanned++;
833
834 if (unlikely(!page_evictable(page)))
835 goto cull_mlocked;
836
837 if (!sc->may_unmap && page_mapped(page))
838 goto keep_locked;
839
840 /* Double the slab pressure for mapped and swapcache pages */
841 if (page_mapped(page) || PageSwapCache(page))
842 sc->nr_scanned++;
843
844 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
845 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
846
847 /*
848 * The number of dirty pages determines if a zone is marked
849 * reclaim_congested which affects wait_iff_congested. kswapd
850 * will stall and start writing pages if the tail of the LRU
851 * is all dirty unqueued pages.
852 */
853 page_check_dirty_writeback(page, &dirty, &writeback);
854 if (dirty || writeback)
855 nr_dirty++;
856
857 if (dirty && !writeback)
858 nr_unqueued_dirty++;
859
860 /*
861 * Treat this page as congested if the underlying BDI is or if
862 * pages are cycling through the LRU so quickly that the
863 * pages marked for immediate reclaim are making it to the
864 * end of the LRU a second time.
865 */
866 mapping = page_mapping(page);
867 if ((mapping && bdi_write_congested(mapping->backing_dev_info)) ||
868 (writeback && PageReclaim(page)))
869 nr_congested++;
870
871 /*
872 * If a page at the tail of the LRU is under writeback, there
873 * are three cases to consider.
874 *
875 * 1) If reclaim is encountering an excessive number of pages
876 * under writeback and this page is both under writeback and
877 * PageReclaim then it indicates that pages are being queued
878 * for IO but are being recycled through the LRU before the
879 * IO can complete. Waiting on the page itself risks an
880 * indefinite stall if it is impossible to writeback the
881 * page due to IO error or disconnected storage so instead
882 * note that the LRU is being scanned too quickly and the
883 * caller can stall after page list has been processed.
884 *
885 * 2) Global reclaim encounters a page, memcg encounters a
886 * page that is not marked for immediate reclaim or
887 * the caller does not have __GFP_IO. In this case mark
888 * the page for immediate reclaim and continue scanning.
889 *
890 * __GFP_IO is checked because a loop driver thread might
891 * enter reclaim, and deadlock if it waits on a page for
892 * which it is needed to do the write (loop masks off
893 * __GFP_IO|__GFP_FS for this reason); but more thought
894 * would probably show more reasons.
895 *
896 * Don't require __GFP_FS, since we're not going into the
897 * FS, just waiting on its writeback completion. Worryingly,
898 * ext4 gfs2 and xfs allocate pages with
899 * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing
900 * may_enter_fs here is liable to OOM on them.
901 *
902 * 3) memcg encounters a page that is not already marked
903 * PageReclaim. memcg does not have any dirty pages
904 * throttling so we could easily OOM just because too many
905 * pages are in writeback and there is nothing else to
906 * reclaim. Wait for the writeback to complete.
907 */
908 if (PageWriteback(page)) {
909 /* Case 1 above */
910 if (current_is_kswapd() &&
911 PageReclaim(page) &&
912 zone_is_reclaim_writeback(zone)) {
913 nr_immediate++;
914 goto keep_locked;
915
916 /* Case 2 above */
917 } else if (global_reclaim(sc) ||
918 !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) {
919 /*
920 * This is slightly racy - end_page_writeback()
921 * might have just cleared PageReclaim, then
922 * setting PageReclaim here end up interpreted
923 * as PageReadahead - but that does not matter
924 * enough to care. What we do want is for this
925 * page to have PageReclaim set next time memcg
926 * reclaim reaches the tests above, so it will
927 * then wait_on_page_writeback() to avoid OOM;
928 * and it's also appropriate in global reclaim.
929 */
930 SetPageReclaim(page);
931 nr_writeback++;
932
933 goto keep_locked;
934
935 /* Case 3 above */
936 } else {
937 wait_on_page_writeback(page);
938 }
939 }
940
941 if (!force_reclaim)
942 references = page_check_references(page, sc);
943
944 switch (references) {
945 case PAGEREF_ACTIVATE:
946 goto activate_locked;
947 case PAGEREF_KEEP:
948 goto keep_locked;
949 case PAGEREF_RECLAIM:
950 case PAGEREF_RECLAIM_CLEAN:
951 ; /* try to reclaim the page below */
952 }
953
954 /*
955 * Anonymous process memory has backing store?
956 * Try to allocate it some swap space here.
957 */
958 if (PageAnon(page) && !PageSwapCache(page)) {
959 if (!(sc->gfp_mask & __GFP_IO))
960 goto keep_locked;
961 if (!add_to_swap(page, page_list))
962 goto activate_locked;
963 may_enter_fs = 1;
964
965 /* Adding to swap updated mapping */
966 mapping = page_mapping(page);
967 }
968
969 /*
970 * The page is mapped into the page tables of one or more
971 * processes. Try to unmap it here.
972 */
973 if (page_mapped(page) && mapping) {
974 switch (try_to_unmap(page, ttu_flags)) {
975 case SWAP_FAIL:
976 goto activate_locked;
977 case SWAP_AGAIN:
978 goto keep_locked;
979 case SWAP_MLOCK:
980 goto cull_mlocked;
981 case SWAP_SUCCESS:
982 ; /* try to free the page below */
983 }
984 }
985
986 if (PageDirty(page)) {
987 /*
988 * Only kswapd can writeback filesystem pages to
989 * avoid risk of stack overflow but only writeback
990 * if many dirty pages have been encountered.
991 */
992 if (page_is_file_cache(page) &&
993 (!current_is_kswapd() ||
994 !zone_is_reclaim_dirty(zone))) {
995 /*
996 * Immediately reclaim when written back.
997 * Similar in principal to deactivate_page()
998 * except we already have the page isolated
999 * and know it's dirty
1000 */
1001 inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE);
1002 SetPageReclaim(page);
1003
1004 goto keep_locked;
1005 }
1006
1007 if (references == PAGEREF_RECLAIM_CLEAN)
1008 goto keep_locked;
1009 if (!may_enter_fs)
1010 goto keep_locked;
1011 if (!sc->may_writepage)
1012 goto keep_locked;
1013
1014 /* Page is dirty, try to write it out here */
1015 switch (pageout(page, mapping, sc)) {
1016 case PAGE_KEEP:
1017 goto keep_locked;
1018 case PAGE_ACTIVATE:
1019 goto activate_locked;
1020 case PAGE_SUCCESS:
1021 if (PageWriteback(page))
1022 goto keep;
1023 if (PageDirty(page))
1024 goto keep;
1025
1026 /*
1027 * A synchronous write - probably a ramdisk. Go
1028 * ahead and try to reclaim the page.
1029 */
1030 if (!trylock_page(page))
1031 goto keep;
1032 if (PageDirty(page) || PageWriteback(page))
1033 goto keep_locked;
1034 mapping = page_mapping(page);
1035 case PAGE_CLEAN:
1036 ; /* try to free the page below */
1037 }
1038 }
1039
1040 /*
1041 * If the page has buffers, try to free the buffer mappings
1042 * associated with this page. If we succeed we try to free
1043 * the page as well.
1044 *
1045 * We do this even if the page is PageDirty().
1046 * try_to_release_page() does not perform I/O, but it is
1047 * possible for a page to have PageDirty set, but it is actually
1048 * clean (all its buffers are clean). This happens if the
1049 * buffers were written out directly, with submit_bh(). ext3
1050 * will do this, as well as the blockdev mapping.
1051 * try_to_release_page() will discover that cleanness and will
1052 * drop the buffers and mark the page clean - it can be freed.
1053 *
1054 * Rarely, pages can have buffers and no ->mapping. These are
1055 * the pages which were not successfully invalidated in
1056 * truncate_complete_page(). We try to drop those buffers here
1057 * and if that worked, and the page is no longer mapped into
1058 * process address space (page_count == 1) it can be freed.
1059 * Otherwise, leave the page on the LRU so it is swappable.
1060 */
1061 if (page_has_private(page)) {
1062 if (!try_to_release_page(page, sc->gfp_mask))
1063 goto activate_locked;
1064 if (!mapping && page_count(page) == 1) {
1065 unlock_page(page);
1066 if (put_page_testzero(page))
1067 goto free_it;
1068 else {
1069 /*
1070 * rare race with speculative reference.
1071 * the speculative reference will free
1072 * this page shortly, so we may
1073 * increment nr_reclaimed here (and
1074 * leave it off the LRU).
1075 */
1076 nr_reclaimed++;
1077 continue;
1078 }
1079 }
1080 }
1081
1082 if (!mapping || !__remove_mapping(mapping, page, true))
1083 goto keep_locked;
1084
1085 /*
1086 * At this point, we have no other references and there is
1087 * no way to pick any more up (removed from LRU, removed
1088 * from pagecache). Can use non-atomic bitops now (and
1089 * we obviously don't have to worry about waking up a process
1090 * waiting on the page lock, because there are no references.
1091 */
1092 __clear_page_locked(page);
1093free_it:
1094 nr_reclaimed++;
1095
1096 /*
1097 * Is there need to periodically free_page_list? It would
1098 * appear not as the counts should be low
1099 */
1100 list_add(&page->lru, &free_pages);
1101 continue;
1102
1103cull_mlocked:
1104 if (PageSwapCache(page))
1105 try_to_free_swap(page);
1106 unlock_page(page);
1107 putback_lru_page(page);
1108 continue;
1109
1110activate_locked:
1111 /* Not a candidate for swapping, so reclaim swap space. */
1112 if (PageSwapCache(page) && vm_swap_full())
1113 try_to_free_swap(page);
1114 VM_BUG_ON_PAGE(PageActive(page), page);
1115 SetPageActive(page);
1116 pgactivate++;
1117keep_locked:
1118 unlock_page(page);
1119keep:
1120 list_add(&page->lru, &ret_pages);
1121 VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
1122 }
1123
1124 free_hot_cold_page_list(&free_pages, 1);
1125
1126 list_splice(&ret_pages, page_list);
1127 count_vm_events(PGACTIVATE, pgactivate);
1128 mem_cgroup_uncharge_end();
1129 *ret_nr_dirty += nr_dirty;
1130 *ret_nr_congested += nr_congested;
1131 *ret_nr_unqueued_dirty += nr_unqueued_dirty;
1132 *ret_nr_writeback += nr_writeback;
1133 *ret_nr_immediate += nr_immediate;
1134 return nr_reclaimed;
1135}
1136
1137unsigned long reclaim_clean_pages_from_list(struct zone *zone,
1138 struct list_head *page_list)
1139{
1140 struct scan_control sc = {
1141 .gfp_mask = GFP_KERNEL,
1142 .priority = DEF_PRIORITY,
1143 .may_unmap = 1,
1144 };
1145 unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5;
1146 struct page *page, *next;
1147 LIST_HEAD(clean_pages);
1148
1149 list_for_each_entry_safe(page, next, page_list, lru) {
1150 if (page_is_file_cache(page) && !PageDirty(page) &&
1151 !isolated_balloon_page(page)) {
1152 ClearPageActive(page);
1153 list_move(&page->lru, &clean_pages);
1154 }
1155 }
1156
1157 ret = shrink_page_list(&clean_pages, zone, &sc,
1158 TTU_UNMAP|TTU_IGNORE_ACCESS,
1159 &dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
1160 list_splice(&clean_pages, page_list);
1161 mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
1162 return ret;
1163}
1164
1165/*
1166 * Attempt to remove the specified page from its LRU. Only take this page
1167 * if it is of the appropriate PageActive status. Pages which are being
1168 * freed elsewhere are also ignored.
1169 *
1170 * page: page to consider
1171 * mode: one of the LRU isolation modes defined above
1172 *
1173 * returns 0 on success, -ve errno on failure.
1174 */
1175int __isolate_lru_page(struct page *page, isolate_mode_t mode)
1176{
1177 int ret = -EINVAL;
1178
1179 /* Only take pages on the LRU. */
1180 if (!PageLRU(page))
1181 return ret;
1182
1183 /* Compaction should not handle unevictable pages but CMA can do so */
1184 if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
1185 return ret;
1186
1187 ret = -EBUSY;
1188
1189 /*
1190 * To minimise LRU disruption, the caller can indicate that it only
1191 * wants to isolate pages it will be able to operate on without
1192 * blocking - clean pages for the most part.
1193 *
1194 * ISOLATE_CLEAN means that only clean pages should be isolated. This
1195 * is used by reclaim when it is cannot write to backing storage
1196 *
1197 * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
1198 * that it is possible to migrate without blocking
1199 */
1200 if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) {
1201 /* All the caller can do on PageWriteback is block */
1202 if (PageWriteback(page))
1203 return ret;
1204
1205 if (PageDirty(page)) {
1206 struct address_space *mapping;
1207
1208 /* ISOLATE_CLEAN means only clean pages */
1209 if (mode & ISOLATE_CLEAN)
1210 return ret;
1211
1212 /*
1213 * Only pages without mappings or that have a
1214 * ->migratepage callback are possible to migrate
1215 * without blocking
1216 */
1217 mapping = page_mapping(page);
1218 if (mapping && !mapping->a_ops->migratepage)
1219 return ret;
1220 }
1221 }
1222
1223 if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
1224 return ret;
1225
1226 if (likely(get_page_unless_zero(page))) {
1227 /*
1228 * Be careful not to clear PageLRU until after we're
1229 * sure the page is not being freed elsewhere -- the
1230 * page release code relies on it.
1231 */
1232 ClearPageLRU(page);
1233 ret = 0;
1234 }
1235
1236 return ret;
1237}
1238
1239/*
1240 * zone->lru_lock is heavily contended. Some of the functions that
1241 * shrink the lists perform better by taking out a batch of pages
1242 * and working on them outside the LRU lock.
1243 *
1244 * For pagecache intensive workloads, this function is the hottest
1245 * spot in the kernel (apart from copy_*_user functions).
1246 *
1247 * Appropriate locks must be held before calling this function.
1248 *
1249 * @nr_to_scan: The number of pages to look through on the list.
1250 * @lruvec: The LRU vector to pull pages from.
1251 * @dst: The temp list to put pages on to.
1252 * @nr_scanned: The number of pages that were scanned.
1253 * @sc: The scan_control struct for this reclaim session
1254 * @mode: One of the LRU isolation modes
1255 * @lru: LRU list id for isolating
1256 *
1257 * returns how many pages were moved onto *@dst.
1258 */
1259static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1260 struct lruvec *lruvec, struct list_head *dst,
1261 unsigned long *nr_scanned, struct scan_control *sc,
1262 isolate_mode_t mode, enum lru_list lru)
1263{
1264 struct list_head *src = &lruvec->lists[lru];
1265 unsigned long nr_taken = 0;
1266 unsigned long scan;
1267
1268 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
1269 struct page *page;
1270 int nr_pages;
1271
1272 page = lru_to_page(src);
1273 prefetchw_prev_lru_page(page, src, flags);
1274
1275 VM_BUG_ON_PAGE(!PageLRU(page), page);
1276
1277 switch (__isolate_lru_page(page, mode)) {
1278 case 0:
1279 nr_pages = hpage_nr_pages(page);
1280 mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
1281 list_move(&page->lru, dst);
1282 nr_taken += nr_pages;
1283 break;
1284
1285 case -EBUSY:
1286 /* else it is being freed elsewhere */
1287 list_move(&page->lru, src);
1288 continue;
1289
1290 default:
1291 BUG();
1292 }
1293 }
1294
1295 *nr_scanned = scan;
1296 trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan,
1297 nr_taken, mode, is_file_lru(lru));
1298 return nr_taken;
1299}
1300
1301/**
1302 * isolate_lru_page - tries to isolate a page from its LRU list
1303 * @page: page to isolate from its LRU list
1304 *
1305 * Isolates a @page from an LRU list, clears PageLRU and adjusts the
1306 * vmstat statistic corresponding to whatever LRU list the page was on.
1307 *
1308 * Returns 0 if the page was removed from an LRU list.
1309 * Returns -EBUSY if the page was not on an LRU list.
1310 *
1311 * The returned page will have PageLRU() cleared. If it was found on
1312 * the active list, it will have PageActive set. If it was found on
1313 * the unevictable list, it will have the PageUnevictable bit set. That flag
1314 * may need to be cleared by the caller before letting the page go.
1315 *
1316 * The vmstat statistic corresponding to the list on which the page was
1317 * found will be decremented.
1318 *
1319 * Restrictions:
1320 * (1) Must be called with an elevated refcount on the page. This is a
1321 * fundamentnal difference from isolate_lru_pages (which is called
1322 * without a stable reference).
1323 * (2) the lru_lock must not be held.
1324 * (3) interrupts must be enabled.
1325 */
1326int isolate_lru_page(struct page *page)
1327{
1328 int ret = -EBUSY;
1329
1330 VM_BUG_ON_PAGE(!page_count(page), page);
1331
1332 if (PageLRU(page)) {
1333 struct zone *zone = page_zone(page);
1334 struct lruvec *lruvec;
1335
1336 spin_lock_irq(&zone->lru_lock);
1337 lruvec = mem_cgroup_page_lruvec(page, zone);
1338 if (PageLRU(page)) {
1339 int lru = page_lru(page);
1340 get_page(page);
1341 ClearPageLRU(page);
1342 del_page_from_lru_list(page, lruvec, lru);
1343 ret = 0;
1344 }
1345 spin_unlock_irq(&zone->lru_lock);
1346 }
1347 return ret;
1348}
1349
1350/*
1351 * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
1352 * then get resheduled. When there are massive number of tasks doing page
1353 * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
1354 * the LRU list will go small and be scanned faster than necessary, leading to
1355 * unnecessary swapping, thrashing and OOM.
1356 */
1357static int too_many_isolated(struct zone *zone, int file,
1358 struct scan_control *sc)
1359{
1360 unsigned long inactive, isolated;
1361
1362 if (current_is_kswapd())
1363 return 0;
1364
1365 if (!global_reclaim(sc))
1366 return 0;
1367
1368 if (file) {
1369 inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1370 isolated = zone_page_state(zone, NR_ISOLATED_FILE);
1371 } else {
1372 inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1373 isolated = zone_page_state(zone, NR_ISOLATED_ANON);
1374 }
1375
1376 /*
1377 * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
1378 * won't get blocked by normal direct-reclaimers, forming a circular
1379 * deadlock.
1380 */
1381 if ((sc->gfp_mask & GFP_IOFS) == GFP_IOFS)
1382 inactive >>= 3;
1383
1384 return isolated > inactive;
1385}
1386
1387static noinline_for_stack void
1388putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
1389{
1390 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1391 struct zone *zone = lruvec_zone(lruvec);
1392 LIST_HEAD(pages_to_free);
1393
1394 /*
1395 * Put back any unfreeable pages.
1396 */
1397 while (!list_empty(page_list)) {
1398 struct page *page = lru_to_page(page_list);
1399 int lru;
1400
1401 VM_BUG_ON_PAGE(PageLRU(page), page);
1402 list_del(&page->lru);
1403 if (unlikely(!page_evictable(page))) {
1404 spin_unlock_irq(&zone->lru_lock);
1405 putback_lru_page(page);
1406 spin_lock_irq(&zone->lru_lock);
1407 continue;
1408 }
1409
1410 lruvec = mem_cgroup_page_lruvec(page, zone);
1411
1412 SetPageLRU(page);
1413 lru = page_lru(page);
1414 add_page_to_lru_list(page, lruvec, lru);
1415
1416 if (is_active_lru(lru)) {
1417 int file = is_file_lru(lru);
1418 int numpages = hpage_nr_pages(page);
1419 reclaim_stat->recent_rotated[file] += numpages;
1420 }
1421 if (put_page_testzero(page)) {
1422 __ClearPageLRU(page);
1423 __ClearPageActive(page);
1424 del_page_from_lru_list(page, lruvec, lru);
1425
1426 if (unlikely(PageCompound(page))) {
1427 spin_unlock_irq(&zone->lru_lock);
1428 (*get_compound_page_dtor(page))(page);
1429 spin_lock_irq(&zone->lru_lock);
1430 } else
1431 list_add(&page->lru, &pages_to_free);
1432 }
1433 }
1434
1435 /*
1436 * To save our caller's stack, now use input list for pages to free.
1437 */
1438 list_splice(&pages_to_free, page_list);
1439}
1440
1441/*
1442 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
1443 * of reclaimed pages
1444 */
1445static noinline_for_stack unsigned long
1446shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1447 struct scan_control *sc, enum lru_list lru)
1448{
1449 LIST_HEAD(page_list);
1450 unsigned long nr_scanned;
1451 unsigned long nr_reclaimed = 0;
1452 unsigned long nr_taken;
1453 unsigned long nr_dirty = 0;
1454 unsigned long nr_congested = 0;
1455 unsigned long nr_unqueued_dirty = 0;
1456 unsigned long nr_writeback = 0;
1457 unsigned long nr_immediate = 0;
1458 isolate_mode_t isolate_mode = 0;
1459 int file = is_file_lru(lru);
1460 struct zone *zone = lruvec_zone(lruvec);
1461 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1462
1463 while (unlikely(too_many_isolated(zone, file, sc))) {
1464 congestion_wait(BLK_RW_ASYNC, HZ/10);
1465
1466 /* We are about to die and free our memory. Return now. */
1467 if (fatal_signal_pending(current))
1468 return SWAP_CLUSTER_MAX;
1469 }
1470
1471 lru_add_drain();
1472
1473 if (!sc->may_unmap)
1474 isolate_mode |= ISOLATE_UNMAPPED;
1475 if (!sc->may_writepage)
1476 isolate_mode |= ISOLATE_CLEAN;
1477
1478 spin_lock_irq(&zone->lru_lock);
1479
1480 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
1481 &nr_scanned, sc, isolate_mode, lru);
1482
1483 __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
1484 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
1485
1486 if (global_reclaim(sc)) {
1487 zone->pages_scanned += nr_scanned;
1488 if (current_is_kswapd())
1489 __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned);
1490 else
1491 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned);
1492 }
1493 spin_unlock_irq(&zone->lru_lock);
1494
1495 if (nr_taken == 0)
1496 return 0;
1497
1498 nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
1499 &nr_dirty, &nr_unqueued_dirty, &nr_congested,
1500 &nr_writeback, &nr_immediate,
1501 false);
1502
1503 spin_lock_irq(&zone->lru_lock);
1504
1505 reclaim_stat->recent_scanned[file] += nr_taken;
1506
1507 if (global_reclaim(sc)) {
1508 if (current_is_kswapd())
1509 __count_zone_vm_events(PGSTEAL_KSWAPD, zone,
1510 nr_reclaimed);
1511 else
1512 __count_zone_vm_events(PGSTEAL_DIRECT, zone,
1513 nr_reclaimed);
1514 }
1515
1516 putback_inactive_pages(lruvec, &page_list);
1517
1518 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
1519
1520 spin_unlock_irq(&zone->lru_lock);
1521
1522 free_hot_cold_page_list(&page_list, 1);
1523
1524 /*
1525 * If reclaim is isolating dirty pages under writeback, it implies
1526 * that the long-lived page allocation rate is exceeding the page
1527 * laundering rate. Either the global limits are not being effective
1528 * at throttling processes due to the page distribution throughout
1529 * zones or there is heavy usage of a slow backing device. The
1530 * only option is to throttle from reclaim context which is not ideal
1531 * as there is no guarantee the dirtying process is throttled in the
1532 * same way balance_dirty_pages() manages.
1533 *
1534 * Once a zone is flagged ZONE_WRITEBACK, kswapd will count the number
1535 * of pages under pages flagged for immediate reclaim and stall if any
1536 * are encountered in the nr_immediate check below.
1537 */
1538 if (nr_writeback && nr_writeback == nr_taken)
1539 zone_set_flag(zone, ZONE_WRITEBACK);
1540
1541 /*
1542 * memcg will stall in page writeback so only consider forcibly
1543 * stalling for global reclaim
1544 */
1545 if (global_reclaim(sc)) {
1546 /*
1547 * Tag a zone as congested if all the dirty pages scanned were
1548 * backed by a congested BDI and wait_iff_congested will stall.
1549 */
1550 if (nr_dirty && nr_dirty == nr_congested)
1551 zone_set_flag(zone, ZONE_CONGESTED);
1552
1553 /*
1554 * If dirty pages are scanned that are not queued for IO, it
1555 * implies that flushers are not keeping up. In this case, flag
1556 * the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing
1557 * pages from reclaim context. It will forcibly stall in the
1558 * next check.
1559 */
1560 if (nr_unqueued_dirty == nr_taken)
1561 zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
1562
1563 /*
1564 * In addition, if kswapd scans pages marked marked for
1565 * immediate reclaim and under writeback (nr_immediate), it
1566 * implies that pages are cycling through the LRU faster than
1567 * they are written so also forcibly stall.
1568 */
1569 if (nr_unqueued_dirty == nr_taken || nr_immediate)
1570 congestion_wait(BLK_RW_ASYNC, HZ/10);
1571 }
1572
1573 /*
1574 * Stall direct reclaim for IO completions if underlying BDIs or zone
1575 * is congested. Allow kswapd to continue until it starts encountering
1576 * unqueued dirty pages or cycling through the LRU too quickly.
1577 */
1578 if (!sc->hibernation_mode && !current_is_kswapd())
1579 wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
1580
1581 trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
1582 zone_idx(zone),
1583 nr_scanned, nr_reclaimed,
1584 sc->priority,
1585 trace_shrink_flags(file));
1586 return nr_reclaimed;
1587}
1588
1589/*
1590 * This moves pages from the active list to the inactive list.
1591 *
1592 * We move them the other way if the page is referenced by one or more
1593 * processes, from rmap.
1594 *
1595 * If the pages are mostly unmapped, the processing is fast and it is
1596 * appropriate to hold zone->lru_lock across the whole operation. But if
1597 * the pages are mapped, the processing is slow (page_referenced()) so we
1598 * should drop zone->lru_lock around each page. It's impossible to balance
1599 * this, so instead we remove the pages from the LRU while processing them.
1600 * It is safe to rely on PG_active against the non-LRU pages in here because
1601 * nobody will play with that bit on a non-LRU page.
1602 *
1603 * The downside is that we have to touch page->_count against each page.
1604 * But we had to alter page->flags anyway.
1605 */
1606
1607static void move_active_pages_to_lru(struct lruvec *lruvec,
1608 struct list_head *list,
1609 struct list_head *pages_to_free,
1610 enum lru_list lru)
1611{
1612 struct zone *zone = lruvec_zone(lruvec);
1613 unsigned long pgmoved = 0;
1614 struct page *page;
1615 int nr_pages;
1616
1617 while (!list_empty(list)) {
1618 page = lru_to_page(list);
1619 lruvec = mem_cgroup_page_lruvec(page, zone);
1620
1621 VM_BUG_ON_PAGE(PageLRU(page), page);
1622 SetPageLRU(page);
1623
1624 nr_pages = hpage_nr_pages(page);
1625 mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
1626 list_move(&page->lru, &lruvec->lists[lru]);
1627 pgmoved += nr_pages;
1628
1629 if (put_page_testzero(page)) {
1630 __ClearPageLRU(page);
1631 __ClearPageActive(page);
1632 del_page_from_lru_list(page, lruvec, lru);
1633
1634 if (unlikely(PageCompound(page))) {
1635 spin_unlock_irq(&zone->lru_lock);
1636 (*get_compound_page_dtor(page))(page);
1637 spin_lock_irq(&zone->lru_lock);
1638 } else
1639 list_add(&page->lru, pages_to_free);
1640 }
1641 }
1642 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1643 if (!is_active_lru(lru))
1644 __count_vm_events(PGDEACTIVATE, pgmoved);
1645}
1646
1647static void shrink_active_list(unsigned long nr_to_scan,
1648 struct lruvec *lruvec,
1649 struct scan_control *sc,
1650 enum lru_list lru)
1651{
1652 unsigned long nr_taken;
1653 unsigned long nr_scanned;
1654 unsigned long vm_flags;
1655 LIST_HEAD(l_hold); /* The pages which were snipped off */
1656 LIST_HEAD(l_active);
1657 LIST_HEAD(l_inactive);
1658 struct page *page;
1659 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1660 unsigned long nr_rotated = 0;
1661 isolate_mode_t isolate_mode = 0;
1662 int file = is_file_lru(lru);
1663 struct zone *zone = lruvec_zone(lruvec);
1664
1665 lru_add_drain();
1666
1667 if (!sc->may_unmap)
1668 isolate_mode |= ISOLATE_UNMAPPED;
1669 if (!sc->may_writepage)
1670 isolate_mode |= ISOLATE_CLEAN;
1671
1672 spin_lock_irq(&zone->lru_lock);
1673
1674 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
1675 &nr_scanned, sc, isolate_mode, lru);
1676 if (global_reclaim(sc))
1677 zone->pages_scanned += nr_scanned;
1678
1679 reclaim_stat->recent_scanned[file] += nr_taken;
1680
1681 __count_zone_vm_events(PGREFILL, zone, nr_scanned);
1682 __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
1683 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
1684 spin_unlock_irq(&zone->lru_lock);
1685
1686 while (!list_empty(&l_hold)) {
1687 cond_resched();
1688 page = lru_to_page(&l_hold);
1689 list_del(&page->lru);
1690
1691 if (unlikely(!page_evictable(page))) {
1692 putback_lru_page(page);
1693 continue;
1694 }
1695
1696 if (unlikely(buffer_heads_over_limit)) {
1697 if (page_has_private(page) && trylock_page(page)) {
1698 if (page_has_private(page))
1699 try_to_release_page(page, 0);
1700 unlock_page(page);
1701 }
1702 }
1703
1704 if (page_referenced(page, 0, sc->target_mem_cgroup,
1705 &vm_flags)) {
1706 nr_rotated += hpage_nr_pages(page);
1707 /*
1708 * Identify referenced, file-backed active pages and
1709 * give them one more trip around the active list. So
1710 * that executable code get better chances to stay in
1711 * memory under moderate memory pressure. Anon pages
1712 * are not likely to be evicted by use-once streaming
1713 * IO, plus JVM can create lots of anon VM_EXEC pages,
1714 * so we ignore them here.
1715 */
1716 if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
1717 list_add(&page->lru, &l_active);
1718 continue;
1719 }
1720 }
1721
1722 ClearPageActive(page); /* we are de-activating */
1723 list_add(&page->lru, &l_inactive);
1724 }
1725
1726 /*
1727 * Move pages back to the lru list.
1728 */
1729 spin_lock_irq(&zone->lru_lock);
1730 /*
1731 * Count referenced pages from currently used mappings as rotated,
1732 * even though only some of them are actually re-activated. This
1733 * helps balance scan pressure between file and anonymous pages in
1734 * get_scan_ratio.
1735 */
1736 reclaim_stat->recent_rotated[file] += nr_rotated;
1737
1738 move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
1739 move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
1740 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
1741 spin_unlock_irq(&zone->lru_lock);
1742
1743 free_hot_cold_page_list(&l_hold, 1);
1744}
1745
1746#ifdef CONFIG_SWAP
1747static int inactive_anon_is_low_global(struct zone *zone)
1748{
1749 unsigned long active, inactive;
1750
1751 active = zone_page_state(zone, NR_ACTIVE_ANON);
1752 inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1753
1754 if (inactive * zone->inactive_ratio < active)
1755 return 1;
1756
1757 return 0;
1758}
1759
1760/**
1761 * inactive_anon_is_low - check if anonymous pages need to be deactivated
1762 * @lruvec: LRU vector to check
1763 *
1764 * Returns true if the zone does not have enough inactive anon pages,
1765 * meaning some active anon pages need to be deactivated.
1766 */
1767static int inactive_anon_is_low(struct lruvec *lruvec)
1768{
1769 /*
1770 * If we don't have swap space, anonymous page deactivation
1771 * is pointless.
1772 */
1773 if (!total_swap_pages)
1774 return 0;
1775
1776 if (!mem_cgroup_disabled())
1777 return mem_cgroup_inactive_anon_is_low(lruvec);
1778
1779 return inactive_anon_is_low_global(lruvec_zone(lruvec));
1780}
1781#else
1782static inline int inactive_anon_is_low(struct lruvec *lruvec)
1783{
1784 return 0;
1785}
1786#endif
1787
1788/**
1789 * inactive_file_is_low - check if file pages need to be deactivated
1790 * @lruvec: LRU vector to check
1791 *
1792 * When the system is doing streaming IO, memory pressure here
1793 * ensures that active file pages get deactivated, until more
1794 * than half of the file pages are on the inactive list.
1795 *
1796 * Once we get to that situation, protect the system's working
1797 * set from being evicted by disabling active file page aging.
1798 *
1799 * This uses a different ratio than the anonymous pages, because
1800 * the page cache uses a use-once replacement algorithm.
1801 */
1802static int inactive_file_is_low(struct lruvec *lruvec)
1803{
1804 unsigned long inactive;
1805 unsigned long active;
1806
1807 inactive = get_lru_size(lruvec, LRU_INACTIVE_FILE);
1808 active = get_lru_size(lruvec, LRU_ACTIVE_FILE);
1809
1810 return active > inactive;
1811}
1812
1813static int inactive_list_is_low(struct lruvec *lruvec, enum lru_list lru)
1814{
1815 if (is_file_lru(lru))
1816 return inactive_file_is_low(lruvec);
1817 else
1818 return inactive_anon_is_low(lruvec);
1819}
1820
1821static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1822 struct lruvec *lruvec, struct scan_control *sc)
1823{
1824 if (is_active_lru(lru)) {
1825 if (inactive_list_is_low(lruvec, lru))
1826 shrink_active_list(nr_to_scan, lruvec, sc, lru);
1827 return 0;
1828 }
1829
1830 return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
1831}
1832
1833static int vmscan_swappiness(struct scan_control *sc)
1834{
1835 if (global_reclaim(sc))
1836 return vm_swappiness;
1837 return mem_cgroup_swappiness(sc->target_mem_cgroup);
1838}
1839
1840enum scan_balance {
1841 SCAN_EQUAL,
1842 SCAN_FRACT,
1843 SCAN_ANON,
1844 SCAN_FILE,
1845};
1846
1847/*
1848 * Determine how aggressively the anon and file LRU lists should be
1849 * scanned. The relative value of each set of LRU lists is determined
1850 * by looking at the fraction of the pages scanned we did rotate back
1851 * onto the active list instead of evict.
1852 *
1853 * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
1854 * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
1855 */
1856static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
1857 unsigned long *nr)
1858{
1859 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1860 u64 fraction[2];
1861 u64 denominator = 0; /* gcc */
1862 struct zone *zone = lruvec_zone(lruvec);
1863 unsigned long anon_prio, file_prio;
1864 enum scan_balance scan_balance;
1865 unsigned long anon, file;
1866 bool force_scan = false;
1867 unsigned long ap, fp;
1868 enum lru_list lru;
1869
1870 /*
1871 * If the zone or memcg is small, nr[l] can be 0. This
1872 * results in no scanning on this priority and a potential
1873 * priority drop. Global direct reclaim can go to the next
1874 * zone and tends to have no problems. Global kswapd is for
1875 * zone balancing and it needs to scan a minimum amount. When
1876 * reclaiming for a memcg, a priority drop can cause high
1877 * latencies, so it's better to scan a minimum amount there as
1878 * well.
1879 */
1880 if (current_is_kswapd() && !zone_reclaimable(zone))
1881 force_scan = true;
1882 if (!global_reclaim(sc))
1883 force_scan = true;
1884
1885 /* If we have no swap space, do not bother scanning anon pages. */
1886 if (!sc->may_swap || (get_nr_swap_pages() <= 0)) {
1887 scan_balance = SCAN_FILE;
1888 goto out;
1889 }
1890
1891 /*
1892 * Global reclaim will swap to prevent OOM even with no
1893 * swappiness, but memcg users want to use this knob to
1894 * disable swapping for individual groups completely when
1895 * using the memory controller's swap limit feature would be
1896 * too expensive.
1897 */
1898 if (!global_reclaim(sc) && !vmscan_swappiness(sc)) {
1899 scan_balance = SCAN_FILE;
1900 goto out;
1901 }
1902
1903 /*
1904 * Do not apply any pressure balancing cleverness when the
1905 * system is close to OOM, scan both anon and file equally
1906 * (unless the swappiness setting disagrees with swapping).
1907 */
1908 if (!sc->priority && vmscan_swappiness(sc)) {
1909 scan_balance = SCAN_EQUAL;
1910 goto out;
1911 }
1912
1913 anon = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
1914 get_lru_size(lruvec, LRU_INACTIVE_ANON);
1915 file = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
1916 get_lru_size(lruvec, LRU_INACTIVE_FILE);
1917
1918 /*
1919 * Prevent the reclaimer from falling into the cache trap: as
1920 * cache pages start out inactive, every cache fault will tip
1921 * the scan balance towards the file LRU. And as the file LRU
1922 * shrinks, so does the window for rotation from references.
1923 * This means we have a runaway feedback loop where a tiny
1924 * thrashing file LRU becomes infinitely more attractive than
1925 * anon pages. Try to detect this based on file LRU size.
1926 */
1927 if (global_reclaim(sc)) {
1928 unsigned long free = zone_page_state(zone, NR_FREE_PAGES);
1929
1930 if (unlikely(file + free <= high_wmark_pages(zone))) {
1931 scan_balance = SCAN_ANON;
1932 goto out;
1933 }
1934 }
1935
1936 /*
1937 * There is enough inactive page cache, do not reclaim
1938 * anything from the anonymous working set right now.
1939 */
1940 if (!inactive_file_is_low(lruvec)) {
1941 scan_balance = SCAN_FILE;
1942 goto out;
1943 }
1944
1945 scan_balance = SCAN_FRACT;
1946
1947 /*
1948 * With swappiness at 100, anonymous and file have the same priority.
1949 * This scanning priority is essentially the inverse of IO cost.
1950 */
1951 anon_prio = vmscan_swappiness(sc);
1952 file_prio = 200 - anon_prio;
1953
1954 /*
1955 * OK, so we have swap space and a fair amount of page cache
1956 * pages. We use the recently rotated / recently scanned
1957 * ratios to determine how valuable each cache is.
1958 *
1959 * Because workloads change over time (and to avoid overflow)
1960 * we keep these statistics as a floating average, which ends
1961 * up weighing recent references more than old ones.
1962 *
1963 * anon in [0], file in [1]
1964 */
1965 spin_lock_irq(&zone->lru_lock);
1966 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
1967 reclaim_stat->recent_scanned[0] /= 2;
1968 reclaim_stat->recent_rotated[0] /= 2;
1969 }
1970
1971 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
1972 reclaim_stat->recent_scanned[1] /= 2;
1973 reclaim_stat->recent_rotated[1] /= 2;
1974 }
1975
1976 /*
1977 * The amount of pressure on anon vs file pages is inversely
1978 * proportional to the fraction of recently scanned pages on
1979 * each list that were recently referenced and in active use.
1980 */
1981 ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
1982 ap /= reclaim_stat->recent_rotated[0] + 1;
1983
1984 fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
1985 fp /= reclaim_stat->recent_rotated[1] + 1;
1986 spin_unlock_irq(&zone->lru_lock);
1987
1988 fraction[0] = ap;
1989 fraction[1] = fp;
1990 denominator = ap + fp + 1;
1991out:
1992 for_each_evictable_lru(lru) {
1993 int file = is_file_lru(lru);
1994 unsigned long size;
1995 unsigned long scan;
1996
1997 size = get_lru_size(lruvec, lru);
1998 scan = size >> sc->priority;
1999
2000 if (!scan && force_scan)
2001 scan = min(size, SWAP_CLUSTER_MAX);
2002
2003 switch (scan_balance) {
2004 case SCAN_EQUAL:
2005 /* Scan lists relative to size */
2006 break;
2007 case SCAN_FRACT:
2008 /*
2009 * Scan types proportional to swappiness and
2010 * their relative recent reclaim efficiency.
2011 */
2012 scan = div64_u64(scan * fraction[file], denominator);
2013 break;
2014 case SCAN_FILE:
2015 case SCAN_ANON:
2016 /* Scan one type exclusively */
2017 if ((scan_balance == SCAN_FILE) != file)
2018 scan = 0;
2019 break;
2020 default:
2021 /* Look ma, no brain */
2022 BUG();
2023 }
2024 nr[lru] = scan;
2025 }
2026}
2027
2028/*
2029 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
2030 */
2031static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
2032{
2033 unsigned long nr[NR_LRU_LISTS];
2034 unsigned long targets[NR_LRU_LISTS];
2035 unsigned long nr_to_scan;
2036 enum lru_list lru;
2037 unsigned long nr_reclaimed = 0;
2038 unsigned long nr_to_reclaim = sc->nr_to_reclaim;
2039 struct blk_plug plug;
2040 bool scan_adjusted = false;
2041
2042 get_scan_count(lruvec, sc, nr);
2043
2044 /* Record the original scan target for proportional adjustments later */
2045 memcpy(targets, nr, sizeof(nr));
2046
2047 blk_start_plug(&plug);
2048 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
2049 nr[LRU_INACTIVE_FILE]) {
2050 unsigned long nr_anon, nr_file, percentage;
2051 unsigned long nr_scanned;
2052
2053 for_each_evictable_lru(lru) {
2054 if (nr[lru]) {
2055 nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
2056 nr[lru] -= nr_to_scan;
2057
2058 nr_reclaimed += shrink_list(lru, nr_to_scan,
2059 lruvec, sc);
2060 }
2061 }
2062
2063 if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
2064 continue;
2065
2066 /*
2067 * For global direct reclaim, reclaim only the number of pages
2068 * requested. Less care is taken to scan proportionally as it
2069 * is more important to minimise direct reclaim stall latency
2070 * than it is to properly age the LRU lists.
2071 */
2072 if (global_reclaim(sc) && !current_is_kswapd())
2073 break;
2074
2075 /*
2076 * For kswapd and memcg, reclaim at least the number of pages
2077 * requested. Ensure that the anon and file LRUs shrink
2078 * proportionally what was requested by get_scan_count(). We
2079 * stop reclaiming one LRU and reduce the amount scanning
2080 * proportional to the original scan target.
2081 */
2082 nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
2083 nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
2084
2085 if (nr_file > nr_anon) {
2086 unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
2087 targets[LRU_ACTIVE_ANON] + 1;
2088 lru = LRU_BASE;
2089 percentage = nr_anon * 100 / scan_target;
2090 } else {
2091 unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
2092 targets[LRU_ACTIVE_FILE] + 1;
2093 lru = LRU_FILE;
2094 percentage = nr_file * 100 / scan_target;
2095 }
2096
2097 /* Stop scanning the smaller of the LRU */
2098 nr[lru] = 0;
2099 nr[lru + LRU_ACTIVE] = 0;
2100
2101 /*
2102 * Recalculate the other LRU scan count based on its original
2103 * scan target and the percentage scanning already complete
2104 */
2105 lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
2106 nr_scanned = targets[lru] - nr[lru];
2107 nr[lru] = targets[lru] * (100 - percentage) / 100;
2108 nr[lru] -= min(nr[lru], nr_scanned);
2109
2110 lru += LRU_ACTIVE;
2111 nr_scanned = targets[lru] - nr[lru];
2112 nr[lru] = targets[lru] * (100 - percentage) / 100;
2113 nr[lru] -= min(nr[lru], nr_scanned);
2114
2115 scan_adjusted = true;
2116 }
2117 blk_finish_plug(&plug);
2118 sc->nr_reclaimed += nr_reclaimed;
2119
2120 /*
2121 * Even if we did not try to evict anon pages at all, we want to
2122 * rebalance the anon lru active/inactive ratio.
2123 */
2124 if (inactive_anon_is_low(lruvec))
2125 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
2126 sc, LRU_ACTIVE_ANON);
2127
2128 throttle_vm_writeout(sc->gfp_mask);
2129}
2130
2131/* Use reclaim/compaction for costly allocs or under memory pressure */
2132static bool in_reclaim_compaction(struct scan_control *sc)
2133{
2134 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
2135 (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
2136 sc->priority < DEF_PRIORITY - 2))
2137 return true;
2138
2139 return false;
2140}
2141
2142/*
2143 * Reclaim/compaction is used for high-order allocation requests. It reclaims
2144 * order-0 pages before compacting the zone. should_continue_reclaim() returns
2145 * true if more pages should be reclaimed such that when the page allocator
2146 * calls try_to_compact_zone() that it will have enough free pages to succeed.
2147 * It will give up earlier than that if there is difficulty reclaiming pages.
2148 */
2149static inline bool should_continue_reclaim(struct zone *zone,
2150 unsigned long nr_reclaimed,
2151 unsigned long nr_scanned,
2152 struct scan_control *sc)
2153{
2154 unsigned long pages_for_compaction;
2155 unsigned long inactive_lru_pages;
2156
2157 /* If not in reclaim/compaction mode, stop */
2158 if (!in_reclaim_compaction(sc))
2159 return false;
2160
2161 /* Consider stopping depending on scan and reclaim activity */
2162 if (sc->gfp_mask & __GFP_REPEAT) {
2163 /*
2164 * For __GFP_REPEAT allocations, stop reclaiming if the
2165 * full LRU list has been scanned and we are still failing
2166 * to reclaim pages. This full LRU scan is potentially
2167 * expensive but a __GFP_REPEAT caller really wants to succeed
2168 */
2169 if (!nr_reclaimed && !nr_scanned)
2170 return false;
2171 } else {
2172 /*
2173 * For non-__GFP_REPEAT allocations which can presumably
2174 * fail without consequence, stop if we failed to reclaim
2175 * any pages from the last SWAP_CLUSTER_MAX number of
2176 * pages that were scanned. This will return to the
2177 * caller faster at the risk reclaim/compaction and
2178 * the resulting allocation attempt fails
2179 */
2180 if (!nr_reclaimed)
2181 return false;
2182 }
2183
2184 /*
2185 * If we have not reclaimed enough pages for compaction and the
2186 * inactive lists are large enough, continue reclaiming
2187 */
2188 pages_for_compaction = (2UL << sc->order);
2189 inactive_lru_pages = zone_page_state(zone, NR_INACTIVE_FILE);
2190 if (get_nr_swap_pages() > 0)
2191 inactive_lru_pages += zone_page_state(zone, NR_INACTIVE_ANON);
2192 if (sc->nr_reclaimed < pages_for_compaction &&
2193 inactive_lru_pages > pages_for_compaction)
2194 return true;
2195
2196 /* If compaction would go ahead or the allocation would succeed, stop */
2197 switch (compaction_suitable(zone, sc->order)) {
2198 case COMPACT_PARTIAL:
2199 case COMPACT_CONTINUE:
2200 return false;
2201 default:
2202 return true;
2203 }
2204}
2205
2206static void shrink_zone(struct zone *zone, struct scan_control *sc)
2207{
2208 unsigned long nr_reclaimed, nr_scanned;
2209
2210 do {
2211 struct mem_cgroup *root = sc->target_mem_cgroup;
2212 struct mem_cgroup_reclaim_cookie reclaim = {
2213 .zone = zone,
2214 .priority = sc->priority,
2215 };
2216 struct mem_cgroup *memcg;
2217
2218 nr_reclaimed = sc->nr_reclaimed;
2219 nr_scanned = sc->nr_scanned;
2220
2221 memcg = mem_cgroup_iter(root, NULL, &reclaim);
2222 do {
2223 struct lruvec *lruvec;
2224
2225 lruvec = mem_cgroup_zone_lruvec(zone, memcg);
2226
2227 shrink_lruvec(lruvec, sc);
2228
2229 /*
2230 * Direct reclaim and kswapd have to scan all memory
2231 * cgroups to fulfill the overall scan target for the
2232 * zone.
2233 *
2234 * Limit reclaim, on the other hand, only cares about
2235 * nr_to_reclaim pages to be reclaimed and it will
2236 * retry with decreasing priority if one round over the
2237 * whole hierarchy is not sufficient.
2238 */
2239 if (!global_reclaim(sc) &&
2240 sc->nr_reclaimed >= sc->nr_to_reclaim) {
2241 mem_cgroup_iter_break(root, memcg);
2242 break;
2243 }
2244 memcg = mem_cgroup_iter(root, memcg, &reclaim);
2245 } while (memcg);
2246
2247 vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
2248 sc->nr_scanned - nr_scanned,
2249 sc->nr_reclaimed - nr_reclaimed);
2250
2251 } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed,
2252 sc->nr_scanned - nr_scanned, sc));
2253}
2254
2255/* Returns true if compaction should go ahead for a high-order request */
2256static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
2257{
2258 unsigned long balance_gap, watermark;
2259 bool watermark_ok;
2260
2261 /* Do not consider compaction for orders reclaim is meant to satisfy */
2262 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER)
2263 return false;
2264
2265 /*
2266 * Compaction takes time to run and there are potentially other
2267 * callers using the pages just freed. Continue reclaiming until
2268 * there is a buffer of free pages available to give compaction
2269 * a reasonable chance of completing and allocating the page
2270 */
2271 balance_gap = min(low_wmark_pages(zone),
2272 (zone->managed_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
2273 KSWAPD_ZONE_BALANCE_GAP_RATIO);
2274 watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order);
2275 watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0);
2276
2277 /*
2278 * If compaction is deferred, reclaim up to a point where
2279 * compaction will have a chance of success when re-enabled
2280 */
2281 if (compaction_deferred(zone, sc->order))
2282 return watermark_ok;
2283
2284 /* If compaction is not ready to start, keep reclaiming */
2285 if (!compaction_suitable(zone, sc->order))
2286 return false;
2287
2288 return watermark_ok;
2289}
2290
2291/*
2292 * This is the direct reclaim path, for page-allocating processes. We only
2293 * try to reclaim pages from zones which will satisfy the caller's allocation
2294 * request.
2295 *
2296 * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
2297 * Because:
2298 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
2299 * allocation or
2300 * b) The target zone may be at high_wmark_pages(zone) but the lower zones
2301 * must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
2302 * zone defense algorithm.
2303 *
2304 * If a zone is deemed to be full of pinned pages then just give it a light
2305 * scan then give up on it.
2306 *
2307 * This function returns true if a zone is being reclaimed for a costly
2308 * high-order allocation and compaction is ready to begin. This indicates to
2309 * the caller that it should consider retrying the allocation instead of
2310 * further reclaim.
2311 */
2312static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2313{
2314 struct zoneref *z;
2315 struct zone *zone;
2316 unsigned long nr_soft_reclaimed;
2317 unsigned long nr_soft_scanned;
2318 unsigned long lru_pages = 0;
2319 bool aborted_reclaim = false;
2320 struct reclaim_state *reclaim_state = current->reclaim_state;
2321 gfp_t orig_mask;
2322 struct shrink_control shrink = {
2323 .gfp_mask = sc->gfp_mask,
2324 };
2325 enum zone_type requested_highidx = gfp_zone(sc->gfp_mask);
2326
2327 /*
2328 * If the number of buffer_heads in the machine exceeds the maximum
2329 * allowed level, force direct reclaim to scan the highmem zone as
2330 * highmem pages could be pinning lowmem pages storing buffer_heads
2331 */
2332 orig_mask = sc->gfp_mask;
2333 if (buffer_heads_over_limit)
2334 sc->gfp_mask |= __GFP_HIGHMEM;
2335
2336 nodes_clear(shrink.nodes_to_scan);
2337
2338 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2339 gfp_zone(sc->gfp_mask), sc->nodemask) {
2340 if (!populated_zone(zone))
2341 continue;
2342 /*
2343 * Take care memory controller reclaiming has small influence
2344 * to global LRU.
2345 */
2346 if (global_reclaim(sc)) {
2347 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2348 continue;
2349
2350 lru_pages += zone_reclaimable_pages(zone);
2351 node_set(zone_to_nid(zone), shrink.nodes_to_scan);
2352
2353 if (sc->priority != DEF_PRIORITY &&
2354 !zone_reclaimable(zone))
2355 continue; /* Let kswapd poll it */
2356 if (IS_ENABLED(CONFIG_COMPACTION)) {
2357 /*
2358 * If we already have plenty of memory free for
2359 * compaction in this zone, don't free any more.
2360 * Even though compaction is invoked for any
2361 * non-zero order, only frequent costly order
2362 * reclamation is disruptive enough to become a
2363 * noticeable problem, like transparent huge
2364 * page allocations.
2365 */
2366 if ((zonelist_zone_idx(z) <= requested_highidx)
2367 && compaction_ready(zone, sc)) {
2368 aborted_reclaim = true;
2369 continue;
2370 }
2371 }
2372 /*
2373 * This steals pages from memory cgroups over softlimit
2374 * and returns the number of reclaimed pages and
2375 * scanned pages. This works for global memory pressure
2376 * and balancing, not for a memcg's limit.
2377 */
2378 nr_soft_scanned = 0;
2379 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
2380 sc->order, sc->gfp_mask,
2381 &nr_soft_scanned);
2382 sc->nr_reclaimed += nr_soft_reclaimed;
2383 sc->nr_scanned += nr_soft_scanned;
2384 /* need some check for avoid more shrink_zone() */
2385 }
2386
2387 shrink_zone(zone, sc);
2388 }
2389
2390 /*
2391 * Don't shrink slabs when reclaiming memory from over limit cgroups
2392 * but do shrink slab at least once when aborting reclaim for
2393 * compaction to avoid unevenly scanning file/anon LRU pages over slab
2394 * pages.
2395 */
2396 if (global_reclaim(sc)) {
2397 shrink_slab(&shrink, sc->nr_scanned, lru_pages);
2398 if (reclaim_state) {
2399 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2400 reclaim_state->reclaimed_slab = 0;
2401 }
2402 }
2403
2404 /*
2405 * Restore to original mask to avoid the impact on the caller if we
2406 * promoted it to __GFP_HIGHMEM.
2407 */
2408 sc->gfp_mask = orig_mask;
2409
2410 return aborted_reclaim;
2411}
2412
2413/* All zones in zonelist are unreclaimable? */
2414static bool all_unreclaimable(struct zonelist *zonelist,
2415 struct scan_control *sc)
2416{
2417 struct zoneref *z;
2418 struct zone *zone;
2419
2420 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2421 gfp_zone(sc->gfp_mask), sc->nodemask) {
2422 if (!populated_zone(zone))
2423 continue;
2424 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2425 continue;
2426 if (zone_reclaimable(zone))
2427 return false;
2428 }
2429
2430 return true;
2431}
2432
2433/*
2434 * This is the main entry point to direct page reclaim.
2435 *
2436 * If a full scan of the inactive list fails to free enough memory then we
2437 * are "out of memory" and something needs to be killed.
2438 *
2439 * If the caller is !__GFP_FS then the probability of a failure is reasonably
2440 * high - the zone may be full of dirty or under-writeback pages, which this
2441 * caller can't do much about. We kick the writeback threads and take explicit
2442 * naps in the hope that some of these pages can be written. But if the
2443 * allocating task holds filesystem locks which prevent writeout this might not
2444 * work, and the allocation attempt will fail.
2445 *
2446 * returns: 0, if no pages reclaimed
2447 * else, the number of pages reclaimed
2448 */
2449static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2450 struct scan_control *sc)
2451{
2452 unsigned long total_scanned = 0;
2453 unsigned long writeback_threshold;
2454 bool aborted_reclaim;
2455
2456 delayacct_freepages_start();
2457
2458 if (global_reclaim(sc))
2459 count_vm_event(ALLOCSTALL);
2460
2461 do {
2462 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
2463 sc->priority);
2464 sc->nr_scanned = 0;
2465 aborted_reclaim = shrink_zones(zonelist, sc);
2466
2467 total_scanned += sc->nr_scanned;
2468 if (sc->nr_reclaimed >= sc->nr_to_reclaim)
2469 goto out;
2470
2471 /*
2472 * If we're getting trouble reclaiming, start doing
2473 * writepage even in laptop mode.
2474 */
2475 if (sc->priority < DEF_PRIORITY - 2)
2476 sc->may_writepage = 1;
2477
2478 /*
2479 * Try to write back as many pages as we just scanned. This
2480 * tends to cause slow streaming writers to write data to the
2481 * disk smoothly, at the dirtying rate, which is nice. But
2482 * that's undesirable in laptop mode, where we *want* lumpy
2483 * writeout. So in laptop mode, write out the whole world.
2484 */
2485 writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
2486 if (total_scanned > writeback_threshold) {
2487 wakeup_flusher_threads(laptop_mode ? 0 : total_scanned,
2488 WB_REASON_TRY_TO_FREE_PAGES);
2489 sc->may_writepage = 1;
2490 }
2491 } while (--sc->priority >= 0 && !aborted_reclaim);
2492
2493out:
2494 delayacct_freepages_end();
2495
2496 if (sc->nr_reclaimed)
2497 return sc->nr_reclaimed;
2498
2499 /*
2500 * As hibernation is going on, kswapd is freezed so that it can't mark
2501 * the zone into all_unreclaimable. Thus bypassing all_unreclaimable
2502 * check.
2503 */
2504 if (oom_killer_disabled)
2505 return 0;
2506
2507 /* Aborted reclaim to try compaction? don't OOM, then */
2508 if (aborted_reclaim)
2509 return 1;
2510
2511 /* top priority shrink_zones still had more to do? don't OOM, then */
2512 if (global_reclaim(sc) && !all_unreclaimable(zonelist, sc))
2513 return 1;
2514
2515 return 0;
2516}
2517
2518static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
2519{
2520 struct zone *zone;
2521 unsigned long pfmemalloc_reserve = 0;
2522 unsigned long free_pages = 0;
2523 int i;
2524 bool wmark_ok;
2525
2526 for (i = 0; i <= ZONE_NORMAL; i++) {
2527 zone = &pgdat->node_zones[i];
2528 pfmemalloc_reserve += min_wmark_pages(zone);
2529 free_pages += zone_page_state(zone, NR_FREE_PAGES);
2530 }
2531
2532 wmark_ok = free_pages > pfmemalloc_reserve / 2;
2533
2534 /* kswapd must be awake if processes are being throttled */
2535 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
2536 pgdat->classzone_idx = min(pgdat->classzone_idx,
2537 (enum zone_type)ZONE_NORMAL);
2538 wake_up_interruptible(&pgdat->kswapd_wait);
2539 }
2540
2541 return wmark_ok;
2542}
2543
2544/*
2545 * Throttle direct reclaimers if backing storage is backed by the network
2546 * and the PFMEMALLOC reserve for the preferred node is getting dangerously
2547 * depleted. kswapd will continue to make progress and wake the processes
2548 * when the low watermark is reached.
2549 *
2550 * Returns true if a fatal signal was delivered during throttling. If this
2551 * happens, the page allocator should not consider triggering the OOM killer.
2552 */
2553static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
2554 nodemask_t *nodemask)
2555{
2556 struct zone *zone;
2557 int high_zoneidx = gfp_zone(gfp_mask);
2558 pg_data_t *pgdat;
2559
2560 /*
2561 * Kernel threads should not be throttled as they may be indirectly
2562 * responsible for cleaning pages necessary for reclaim to make forward
2563 * progress. kjournald for example may enter direct reclaim while
2564 * committing a transaction where throttling it could forcing other
2565 * processes to block on log_wait_commit().
2566 */
2567 if (current->flags & PF_KTHREAD)
2568 goto out;
2569
2570 /*
2571 * If a fatal signal is pending, this process should not throttle.
2572 * It should return quickly so it can exit and free its memory
2573 */
2574 if (fatal_signal_pending(current))
2575 goto out;
2576
2577 /* Check if the pfmemalloc reserves are ok */
2578 first_zones_zonelist(zonelist, high_zoneidx, NULL, &zone);
2579 pgdat = zone->zone_pgdat;
2580 if (pfmemalloc_watermark_ok(pgdat))
2581 goto out;
2582
2583 /* Account for the throttling */
2584 count_vm_event(PGSCAN_DIRECT_THROTTLE);
2585
2586 /*
2587 * If the caller cannot enter the filesystem, it's possible that it
2588 * is due to the caller holding an FS lock or performing a journal
2589 * transaction in the case of a filesystem like ext[3|4]. In this case,
2590 * it is not safe to block on pfmemalloc_wait as kswapd could be
2591 * blocked waiting on the same lock. Instead, throttle for up to a
2592 * second before continuing.
2593 */
2594 if (!(gfp_mask & __GFP_FS)) {
2595 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
2596 pfmemalloc_watermark_ok(pgdat), HZ);
2597
2598 goto check_pending;
2599 }
2600
2601 /* Throttle until kswapd wakes the process */
2602 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
2603 pfmemalloc_watermark_ok(pgdat));
2604
2605check_pending:
2606 if (fatal_signal_pending(current))
2607 return true;
2608
2609out:
2610 return false;
2611}
2612
2613unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
2614 gfp_t gfp_mask, nodemask_t *nodemask)
2615{
2616 unsigned long nr_reclaimed;
2617 struct scan_control sc = {
2618 .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
2619 .may_writepage = !laptop_mode,
2620 .nr_to_reclaim = SWAP_CLUSTER_MAX,
2621 .may_unmap = 1,
2622 .may_swap = 1,
2623 .order = order,
2624 .priority = DEF_PRIORITY,
2625 .target_mem_cgroup = NULL,
2626 .nodemask = nodemask,
2627 };
2628
2629 /*
2630 * Do not enter reclaim if fatal signal was delivered while throttled.
2631 * 1 is returned so that the page allocator does not OOM kill at this
2632 * point.
2633 */
2634 if (throttle_direct_reclaim(gfp_mask, zonelist, nodemask))
2635 return 1;
2636
2637 trace_mm_vmscan_direct_reclaim_begin(order,
2638 sc.may_writepage,
2639 gfp_mask);
2640
2641 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
2642
2643 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
2644
2645 return nr_reclaimed;
2646}
2647
2648#ifdef CONFIG_MEMCG
2649
2650unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
2651 gfp_t gfp_mask, bool noswap,
2652 struct zone *zone,
2653 unsigned long *nr_scanned)
2654{
2655 struct scan_control sc = {
2656 .nr_scanned = 0,
2657 .nr_to_reclaim = SWAP_CLUSTER_MAX,
2658 .may_writepage = !laptop_mode,
2659 .may_unmap = 1,
2660 .may_swap = !noswap,
2661 .order = 0,
2662 .priority = 0,
2663 .target_mem_cgroup = memcg,
2664 };
2665 struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
2666
2667 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2668 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
2669
2670 trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
2671 sc.may_writepage,
2672 sc.gfp_mask);
2673
2674 /*
2675 * NOTE: Although we can get the priority field, using it
2676 * here is not a good idea, since it limits the pages we can scan.
2677 * if we don't reclaim here, the shrink_zone from balance_pgdat
2678 * will pick up pages from other mem cgroup's as well. We hack
2679 * the priority and make it zero.
2680 */
2681 shrink_lruvec(lruvec, &sc);
2682
2683 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
2684
2685 *nr_scanned = sc.nr_scanned;
2686 return sc.nr_reclaimed;
2687}
2688
2689unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
2690 gfp_t gfp_mask,
2691 bool noswap)
2692{
2693 struct zonelist *zonelist;
2694 unsigned long nr_reclaimed;
2695 int nid;
2696 struct scan_control sc = {
2697 .may_writepage = !laptop_mode,
2698 .may_unmap = 1,
2699 .may_swap = !noswap,
2700 .nr_to_reclaim = SWAP_CLUSTER_MAX,
2701 .order = 0,
2702 .priority = DEF_PRIORITY,
2703 .target_mem_cgroup = memcg,
2704 .nodemask = NULL, /* we don't care the placement */
2705 .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2706 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
2707 };
2708
2709 /*
2710 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
2711 * take care of from where we get pages. So the node where we start the
2712 * scan does not need to be the current node.
2713 */
2714 nid = mem_cgroup_select_victim_node(memcg);
2715
2716 zonelist = NODE_DATA(nid)->node_zonelists;
2717
2718 trace_mm_vmscan_memcg_reclaim_begin(0,
2719 sc.may_writepage,
2720 sc.gfp_mask);
2721
2722 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
2723
2724 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
2725
2726 return nr_reclaimed;
2727}
2728#endif
2729
2730static void age_active_anon(struct zone *zone, struct scan_control *sc)
2731{
2732 struct mem_cgroup *memcg;
2733
2734 if (!total_swap_pages)
2735 return;
2736
2737 memcg = mem_cgroup_iter(NULL, NULL, NULL);
2738 do {
2739 struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
2740
2741 if (inactive_anon_is_low(lruvec))
2742 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
2743 sc, LRU_ACTIVE_ANON);
2744
2745 memcg = mem_cgroup_iter(NULL, memcg, NULL);
2746 } while (memcg);
2747}
2748
2749static bool zone_balanced(struct zone *zone, int order,
2750 unsigned long balance_gap, int classzone_idx)
2751{
2752 if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
2753 balance_gap, classzone_idx, 0))
2754 return false;
2755
2756 if (IS_ENABLED(CONFIG_COMPACTION) && order &&
2757 !compaction_suitable(zone, order))
2758 return false;
2759
2760 return true;
2761}
2762
2763/*
2764 * pgdat_balanced() is used when checking if a node is balanced.
2765 *
2766 * For order-0, all zones must be balanced!
2767 *
2768 * For high-order allocations only zones that meet watermarks and are in a
2769 * zone allowed by the callers classzone_idx are added to balanced_pages. The
2770 * total of balanced pages must be at least 25% of the zones allowed by
2771 * classzone_idx for the node to be considered balanced. Forcing all zones to
2772 * be balanced for high orders can cause excessive reclaim when there are
2773 * imbalanced zones.
2774 * The choice of 25% is due to
2775 * o a 16M DMA zone that is balanced will not balance a zone on any
2776 * reasonable sized machine
2777 * o On all other machines, the top zone must be at least a reasonable
2778 * percentage of the middle zones. For example, on 32-bit x86, highmem
2779 * would need to be at least 256M for it to be balance a whole node.
2780 * Similarly, on x86-64 the Normal zone would need to be at least 1G
2781 * to balance a node on its own. These seemed like reasonable ratios.
2782 */
2783static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
2784{
2785 unsigned long managed_pages = 0;
2786 unsigned long balanced_pages = 0;
2787 int i;
2788
2789 /* Check the watermark levels */
2790 for (i = 0; i <= classzone_idx; i++) {
2791 struct zone *zone = pgdat->node_zones + i;
2792
2793 if (!populated_zone(zone))
2794 continue;
2795
2796 managed_pages += zone->managed_pages;
2797
2798 /*
2799 * A special case here:
2800 *
2801 * balance_pgdat() skips over all_unreclaimable after
2802 * DEF_PRIORITY. Effectively, it considers them balanced so
2803 * they must be considered balanced here as well!
2804 */
2805 if (!zone_reclaimable(zone)) {
2806 balanced_pages += zone->managed_pages;
2807 continue;
2808 }
2809
2810 if (zone_balanced(zone, order, 0, i))
2811 balanced_pages += zone->managed_pages;
2812 else if (!order)
2813 return false;
2814 }
2815
2816 if (order)
2817 return balanced_pages >= (managed_pages >> 2);
2818 else
2819 return true;
2820}
2821
2822/*
2823 * Prepare kswapd for sleeping. This verifies that there are no processes
2824 * waiting in throttle_direct_reclaim() and that watermarks have been met.
2825 *
2826 * Returns true if kswapd is ready to sleep
2827 */
2828static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
2829 int classzone_idx)
2830{
2831 /* If a direct reclaimer woke kswapd within HZ/10, it's premature */
2832 if (remaining)
2833 return false;
2834
2835 /*
2836 * There is a potential race between when kswapd checks its watermarks
2837 * and a process gets throttled. There is also a potential race if
2838 * processes get throttled, kswapd wakes, a large process exits therby
2839 * balancing the zones that causes kswapd to miss a wakeup. If kswapd
2840 * is going to sleep, no process should be sleeping on pfmemalloc_wait
2841 * so wake them now if necessary. If necessary, processes will wake
2842 * kswapd and get throttled again
2843 */
2844 if (waitqueue_active(&pgdat->pfmemalloc_wait)) {
2845 wake_up(&pgdat->pfmemalloc_wait);
2846 return false;
2847 }
2848
2849 return pgdat_balanced(pgdat, order, classzone_idx);
2850}
2851
2852/*
2853 * kswapd shrinks the zone by the number of pages required to reach
2854 * the high watermark.
2855 *
2856 * Returns true if kswapd scanned at least the requested number of pages to
2857 * reclaim or if the lack of progress was due to pages under writeback.
2858 * This is used to determine if the scanning priority needs to be raised.
2859 */
2860static bool kswapd_shrink_zone(struct zone *zone,
2861 int classzone_idx,
2862 struct scan_control *sc,
2863 unsigned long lru_pages,
2864 unsigned long *nr_attempted)
2865{
2866 int testorder = sc->order;
2867 unsigned long balance_gap;
2868 struct reclaim_state *reclaim_state = current->reclaim_state;
2869 struct shrink_control shrink = {
2870 .gfp_mask = sc->gfp_mask,
2871 };
2872 bool lowmem_pressure;
2873
2874 /* Reclaim above the high watermark. */
2875 sc->nr_to_reclaim = max(SWAP_CLUSTER_MAX, high_wmark_pages(zone));
2876
2877 /*
2878 * Kswapd reclaims only single pages with compaction enabled. Trying
2879 * too hard to reclaim until contiguous free pages have become
2880 * available can hurt performance by evicting too much useful data
2881 * from memory. Do not reclaim more than needed for compaction.
2882 */
2883 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
2884 compaction_suitable(zone, sc->order) !=
2885 COMPACT_SKIPPED)
2886 testorder = 0;
2887
2888 /*
2889 * We put equal pressure on every zone, unless one zone has way too
2890 * many pages free already. The "too many pages" is defined as the
2891 * high wmark plus a "gap" where the gap is either the low
2892 * watermark or 1% of the zone, whichever is smaller.
2893 */
2894 balance_gap = min(low_wmark_pages(zone),
2895 (zone->managed_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
2896 KSWAPD_ZONE_BALANCE_GAP_RATIO);
2897
2898 /*
2899 * If there is no low memory pressure or the zone is balanced then no
2900 * reclaim is necessary
2901 */
2902 lowmem_pressure = (buffer_heads_over_limit && is_highmem(zone));
2903 if (!lowmem_pressure && zone_balanced(zone, testorder,
2904 balance_gap, classzone_idx))
2905 return true;
2906
2907 shrink_zone(zone, sc);
2908 nodes_clear(shrink.nodes_to_scan);
2909 node_set(zone_to_nid(zone), shrink.nodes_to_scan);
2910
2911 reclaim_state->reclaimed_slab = 0;
2912 shrink_slab(&shrink, sc->nr_scanned, lru_pages);
2913 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2914
2915 /* Account for the number of pages attempted to reclaim */
2916 *nr_attempted += sc->nr_to_reclaim;
2917
2918 zone_clear_flag(zone, ZONE_WRITEBACK);
2919
2920 /*
2921 * If a zone reaches its high watermark, consider it to be no longer
2922 * congested. It's possible there are dirty pages backed by congested
2923 * BDIs but as pressure is relieved, speculatively avoid congestion
2924 * waits.
2925 */
2926 if (zone_reclaimable(zone) &&
2927 zone_balanced(zone, testorder, 0, classzone_idx)) {
2928 zone_clear_flag(zone, ZONE_CONGESTED);
2929 zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
2930 }
2931
2932 return sc->nr_scanned >= sc->nr_to_reclaim;
2933}
2934
2935/*
2936 * For kswapd, balance_pgdat() will work across all this node's zones until
2937 * they are all at high_wmark_pages(zone).
2938 *
2939 * Returns the final order kswapd was reclaiming at
2940 *
2941 * There is special handling here for zones which are full of pinned pages.
2942 * This can happen if the pages are all mlocked, or if they are all used by
2943 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
2944 * What we do is to detect the case where all pages in the zone have been
2945 * scanned twice and there has been zero successful reclaim. Mark the zone as
2946 * dead and from now on, only perform a short scan. Basically we're polling
2947 * the zone for when the problem goes away.
2948 *
2949 * kswapd scans the zones in the highmem->normal->dma direction. It skips
2950 * zones which have free_pages > high_wmark_pages(zone), but once a zone is
2951 * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
2952 * lower zones regardless of the number of free pages in the lower zones. This
2953 * interoperates with the page allocator fallback scheme to ensure that aging
2954 * of pages is balanced across the zones.
2955 */
2956static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2957 int *classzone_idx)
2958{
2959 int i;
2960 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
2961 unsigned long nr_soft_reclaimed;
2962 unsigned long nr_soft_scanned;
2963 struct scan_control sc = {
2964 .gfp_mask = GFP_KERNEL,
2965 .priority = DEF_PRIORITY,
2966 .may_unmap = 1,
2967 .may_swap = 1,
2968 .may_writepage = !laptop_mode,
2969 .order = order,
2970 .target_mem_cgroup = NULL,
2971 };
2972 count_vm_event(PAGEOUTRUN);
2973
2974 do {
2975 unsigned long lru_pages = 0;
2976 unsigned long nr_attempted = 0;
2977 bool raise_priority = true;
2978 bool pgdat_needs_compaction = (order > 0);
2979
2980 sc.nr_reclaimed = 0;
2981
2982 /*
2983 * Scan in the highmem->dma direction for the highest
2984 * zone which needs scanning
2985 */
2986 for (i = pgdat->nr_zones - 1; i >= 0; i--) {
2987 struct zone *zone = pgdat->node_zones + i;
2988
2989 if (!populated_zone(zone))
2990 continue;
2991
2992 if (sc.priority != DEF_PRIORITY &&
2993 !zone_reclaimable(zone))
2994 continue;
2995
2996 /*
2997 * Do some background aging of the anon list, to give
2998 * pages a chance to be referenced before reclaiming.
2999 */
3000 age_active_anon(zone, &sc);
3001
3002 /*
3003 * If the number of buffer_heads in the machine
3004 * exceeds the maximum allowed level and this node
3005 * has a highmem zone, force kswapd to reclaim from
3006 * it to relieve lowmem pressure.
3007 */
3008 if (buffer_heads_over_limit && is_highmem_idx(i)) {
3009 end_zone = i;
3010 break;
3011 }
3012
3013 if (!zone_balanced(zone, order, 0, 0)) {
3014 end_zone = i;
3015 break;
3016 } else {
3017 /*
3018 * If balanced, clear the dirty and congested
3019 * flags
3020 */
3021 zone_clear_flag(zone, ZONE_CONGESTED);
3022 zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
3023 }
3024 }
3025
3026 if (i < 0)
3027 goto out;
3028
3029 for (i = 0; i <= end_zone; i++) {
3030 struct zone *zone = pgdat->node_zones + i;
3031
3032 if (!populated_zone(zone))
3033 continue;
3034
3035 lru_pages += zone_reclaimable_pages(zone);
3036
3037 /*
3038 * If any zone is currently balanced then kswapd will
3039 * not call compaction as it is expected that the
3040 * necessary pages are already available.
3041 */
3042 if (pgdat_needs_compaction &&
3043 zone_watermark_ok(zone, order,
3044 low_wmark_pages(zone),
3045 *classzone_idx, 0))
3046 pgdat_needs_compaction = false;
3047 }
3048
3049 /*
3050 * If we're getting trouble reclaiming, start doing writepage
3051 * even in laptop mode.
3052 */
3053 if (sc.priority < DEF_PRIORITY - 2)
3054 sc.may_writepage = 1;
3055
3056 /*
3057 * Now scan the zone in the dma->highmem direction, stopping
3058 * at the last zone which needs scanning.
3059 *
3060 * We do this because the page allocator works in the opposite
3061 * direction. This prevents the page allocator from allocating
3062 * pages behind kswapd's direction of progress, which would
3063 * cause too much scanning of the lower zones.
3064 */
3065 for (i = 0; i <= end_zone; i++) {
3066 struct zone *zone = pgdat->node_zones + i;
3067
3068 if (!populated_zone(zone))
3069 continue;
3070
3071 if (sc.priority != DEF_PRIORITY &&
3072 !zone_reclaimable(zone))
3073 continue;
3074
3075 sc.nr_scanned = 0;
3076
3077 nr_soft_scanned = 0;
3078 /*
3079 * Call soft limit reclaim before calling shrink_zone.
3080 */
3081 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
3082 order, sc.gfp_mask,
3083 &nr_soft_scanned);
3084 sc.nr_reclaimed += nr_soft_reclaimed;
3085
3086 /*
3087 * There should be no need to raise the scanning
3088 * priority if enough pages are already being scanned
3089 * that that high watermark would be met at 100%
3090 * efficiency.
3091 */
3092 if (kswapd_shrink_zone(zone, end_zone, &sc,
3093 lru_pages, &nr_attempted))
3094 raise_priority = false;
3095 }
3096
3097 /*
3098 * If the low watermark is met there is no need for processes
3099 * to be throttled on pfmemalloc_wait as they should not be
3100 * able to safely make forward progress. Wake them
3101 */
3102 if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
3103 pfmemalloc_watermark_ok(pgdat))
3104 wake_up(&pgdat->pfmemalloc_wait);
3105
3106 /*
3107 * Fragmentation may mean that the system cannot be rebalanced
3108 * for high-order allocations in all zones. If twice the
3109 * allocation size has been reclaimed and the zones are still
3110 * not balanced then recheck the watermarks at order-0 to
3111 * prevent kswapd reclaiming excessively. Assume that a
3112 * process requested a high-order can direct reclaim/compact.
3113 */
3114 if (order && sc.nr_reclaimed >= 2UL << order)
3115 order = sc.order = 0;
3116
3117 /* Check if kswapd should be suspending */
3118 if (try_to_freeze() || kthread_should_stop())
3119 break;
3120
3121 /*
3122 * Compact if necessary and kswapd is reclaiming at least the
3123 * high watermark number of pages as requsted
3124 */
3125 if (pgdat_needs_compaction && sc.nr_reclaimed > nr_attempted)
3126 compact_pgdat(pgdat, order);
3127
3128 /*
3129 * Raise priority if scanning rate is too low or there was no
3130 * progress in reclaiming pages
3131 */
3132 if (raise_priority || !sc.nr_reclaimed)
3133 sc.priority--;
3134 } while (sc.priority >= 1 &&
3135 !pgdat_balanced(pgdat, order, *classzone_idx));
3136
3137out:
3138 /*
3139 * Return the order we were reclaiming at so prepare_kswapd_sleep()
3140 * makes a decision on the order we were last reclaiming at. However,
3141 * if another caller entered the allocator slow path while kswapd
3142 * was awake, order will remain at the higher level
3143 */
3144 *classzone_idx = end_zone;
3145 return order;
3146}
3147
3148static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
3149{
3150 long remaining = 0;
3151 DEFINE_WAIT(wait);
3152
3153 if (freezing(current) || kthread_should_stop())
3154 return;
3155
3156 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3157
3158 /* Try to sleep for a short interval */
3159 if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) {
3160 remaining = schedule_timeout(HZ/10);
3161 finish_wait(&pgdat->kswapd_wait, &wait);
3162 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3163 }
3164
3165 /*
3166 * After a short sleep, check if it was a premature sleep. If not, then
3167 * go fully to sleep until explicitly woken up.
3168 */
3169 if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) {
3170 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
3171
3172 /*
3173 * vmstat counters are not perfectly accurate and the estimated
3174 * value for counters such as NR_FREE_PAGES can deviate from the
3175 * true value by nr_online_cpus * threshold. To avoid the zone
3176 * watermarks being breached while under pressure, we reduce the
3177 * per-cpu vmstat threshold while kswapd is awake and restore
3178 * them before going back to sleep.
3179 */
3180 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
3181
3182 /*
3183 * Compaction records what page blocks it recently failed to
3184 * isolate pages from and skips them in the future scanning.
3185 * When kswapd is going to sleep, it is reasonable to assume
3186 * that pages and compaction may succeed so reset the cache.
3187 */
3188 reset_isolation_suitable(pgdat);
3189
3190 if (!kthread_should_stop())
3191 schedule();
3192
3193 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
3194 } else {
3195 if (remaining)
3196 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
3197 else
3198 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
3199 }
3200 finish_wait(&pgdat->kswapd_wait, &wait);
3201}
3202
3203/*
3204 * The background pageout daemon, started as a kernel thread
3205 * from the init process.
3206 *
3207 * This basically trickles out pages so that we have _some_
3208 * free memory available even if there is no other activity
3209 * that frees anything up. This is needed for things like routing
3210 * etc, where we otherwise might have all activity going on in
3211 * asynchronous contexts that cannot page things out.
3212 *
3213 * If there are applications that are active memory-allocators
3214 * (most normal use), this basically shouldn't matter.
3215 */
3216static int kswapd(void *p)
3217{
3218 unsigned long order, new_order;
3219 unsigned balanced_order;
3220 int classzone_idx, new_classzone_idx;
3221 int balanced_classzone_idx;
3222 pg_data_t *pgdat = (pg_data_t*)p;
3223 struct task_struct *tsk = current;
3224
3225 struct reclaim_state reclaim_state = {
3226 .reclaimed_slab = 0,
3227 };
3228 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
3229
3230 lockdep_set_current_reclaim_state(GFP_KERNEL);
3231
3232 if (!cpumask_empty(cpumask))
3233 set_cpus_allowed_ptr(tsk, cpumask);
3234 current->reclaim_state = &reclaim_state;
3235
3236 /*
3237 * Tell the memory management that we're a "memory allocator",
3238 * and that if we need more memory we should get access to it
3239 * regardless (see "__alloc_pages()"). "kswapd" should
3240 * never get caught in the normal page freeing logic.
3241 *
3242 * (Kswapd normally doesn't need memory anyway, but sometimes
3243 * you need a small amount of memory in order to be able to
3244 * page out something else, and this flag essentially protects
3245 * us from recursively trying to free more memory as we're
3246 * trying to free the first piece of memory in the first place).
3247 */
3248 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
3249 set_freezable();
3250
3251 order = new_order = 0;
3252 balanced_order = 0;
3253 classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
3254 balanced_classzone_idx = classzone_idx;
3255 for ( ; ; ) {
3256 bool ret;
3257
3258 /*
3259 * If the last balance_pgdat was unsuccessful it's unlikely a
3260 * new request of a similar or harder type will succeed soon
3261 * so consider going to sleep on the basis we reclaimed at
3262 */
3263 if (balanced_classzone_idx >= new_classzone_idx &&
3264 balanced_order == new_order) {
3265 new_order = pgdat->kswapd_max_order;
3266 new_classzone_idx = pgdat->classzone_idx;
3267 pgdat->kswapd_max_order = 0;
3268 pgdat->classzone_idx = pgdat->nr_zones - 1;
3269 }
3270
3271 if (order < new_order || classzone_idx > new_classzone_idx) {
3272 /*
3273 * Don't sleep if someone wants a larger 'order'
3274 * allocation or has tigher zone constraints
3275 */
3276 order = new_order;
3277 classzone_idx = new_classzone_idx;
3278 } else {
3279 kswapd_try_to_sleep(pgdat, balanced_order,
3280 balanced_classzone_idx);
3281 order = pgdat->kswapd_max_order;
3282 classzone_idx = pgdat->classzone_idx;
3283 new_order = order;
3284 new_classzone_idx = classzone_idx;
3285 pgdat->kswapd_max_order = 0;
3286 pgdat->classzone_idx = pgdat->nr_zones - 1;
3287 }
3288
3289 ret = try_to_freeze();
3290 if (kthread_should_stop())
3291 break;
3292
3293 /*
3294 * We can speed up thawing tasks if we don't call balance_pgdat
3295 * after returning from the refrigerator
3296 */
3297 if (!ret) {
3298 trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
3299 balanced_classzone_idx = classzone_idx;
3300 balanced_order = balance_pgdat(pgdat, order,
3301 &balanced_classzone_idx);
3302 }
3303 }
3304
3305 current->reclaim_state = NULL;
3306 return 0;
3307}
3308
3309/*
3310 * A zone is low on free memory, so wake its kswapd task to service it.
3311 */
3312void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
3313{
3314 pg_data_t *pgdat;
3315
3316 if (!populated_zone(zone))
3317 return;
3318
3319 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
3320 return;
3321 pgdat = zone->zone_pgdat;
3322 if (pgdat->kswapd_max_order < order) {
3323 pgdat->kswapd_max_order = order;
3324 pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx);
3325 }
3326 if (!waitqueue_active(&pgdat->kswapd_wait))
3327 return;
3328 if (zone_balanced(zone, order, 0, 0))
3329 return;
3330
3331 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
3332 wake_up_interruptible(&pgdat->kswapd_wait);
3333}
3334
3335#ifdef CONFIG_HIBERNATION
3336/*
3337 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
3338 * freed pages.
3339 *
3340 * Rather than trying to age LRUs the aim is to preserve the overall
3341 * LRU order by reclaiming preferentially
3342 * inactive > active > active referenced > active mapped
3343 */
3344unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
3345{
3346 struct reclaim_state reclaim_state;
3347 struct scan_control sc = {
3348 .gfp_mask = GFP_HIGHUSER_MOVABLE,
3349 .may_swap = 1,
3350 .may_unmap = 1,
3351 .may_writepage = 1,
3352 .nr_to_reclaim = nr_to_reclaim,
3353 .hibernation_mode = 1,
3354 .order = 0,
3355 .priority = DEF_PRIORITY,
3356 };
3357 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
3358 struct task_struct *p = current;
3359 unsigned long nr_reclaimed;
3360
3361 p->flags |= PF_MEMALLOC;
3362 lockdep_set_current_reclaim_state(sc.gfp_mask);
3363 reclaim_state.reclaimed_slab = 0;
3364 p->reclaim_state = &reclaim_state;
3365
3366 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
3367
3368 p->reclaim_state = NULL;
3369 lockdep_clear_current_reclaim_state();
3370 p->flags &= ~PF_MEMALLOC;
3371
3372 return nr_reclaimed;
3373}
3374#endif /* CONFIG_HIBERNATION */
3375
3376/* It's optimal to keep kswapds on the same CPUs as their memory, but
3377 not required for correctness. So if the last cpu in a node goes
3378 away, we get changed to run anywhere: as the first one comes back,
3379 restore their cpu bindings. */
3380static int cpu_callback(struct notifier_block *nfb, unsigned long action,
3381 void *hcpu)
3382{
3383 int nid;
3384
3385 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
3386 for_each_node_state(nid, N_MEMORY) {
3387 pg_data_t *pgdat = NODE_DATA(nid);
3388 const struct cpumask *mask;
3389
3390 mask = cpumask_of_node(pgdat->node_id);
3391
3392 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
3393 /* One of our CPUs online: restore mask */
3394 set_cpus_allowed_ptr(pgdat->kswapd, mask);
3395 }
3396 }
3397 return NOTIFY_OK;
3398}
3399
3400/*
3401 * This kswapd start function will be called by init and node-hot-add.
3402 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
3403 */
3404int kswapd_run(int nid)
3405{
3406 pg_data_t *pgdat = NODE_DATA(nid);
3407 int ret = 0;
3408
3409 if (pgdat->kswapd)
3410 return 0;
3411
3412 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
3413 if (IS_ERR(pgdat->kswapd)) {
3414 /* failure at boot is fatal */
3415 BUG_ON(system_state == SYSTEM_BOOTING);
3416 pr_err("Failed to start kswapd on node %d\n", nid);
3417 ret = PTR_ERR(pgdat->kswapd);
3418 pgdat->kswapd = NULL;
3419 }
3420 return ret;
3421}
3422
3423/*
3424 * Called by memory hotplug when all memory in a node is offlined. Caller must
3425 * hold lock_memory_hotplug().
3426 */
3427void kswapd_stop(int nid)
3428{
3429 struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
3430
3431 if (kswapd) {
3432 kthread_stop(kswapd);
3433 NODE_DATA(nid)->kswapd = NULL;
3434 }
3435}
3436
3437static int __init kswapd_init(void)
3438{
3439 int nid;
3440
3441 swap_setup();
3442 for_each_node_state(nid, N_MEMORY)
3443 kswapd_run(nid);
3444 hotcpu_notifier(cpu_callback, 0);
3445 return 0;
3446}
3447
3448module_init(kswapd_init)
3449
3450#ifdef CONFIG_NUMA
3451/*
3452 * Zone reclaim mode
3453 *
3454 * If non-zero call zone_reclaim when the number of free pages falls below
3455 * the watermarks.
3456 */
3457int zone_reclaim_mode __read_mostly;
3458
3459#define RECLAIM_OFF 0
3460#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
3461#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
3462#define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
3463
3464/*
3465 * Priority for ZONE_RECLAIM. This determines the fraction of pages
3466 * of a node considered for each zone_reclaim. 4 scans 1/16th of
3467 * a zone.
3468 */
3469#define ZONE_RECLAIM_PRIORITY 4
3470
3471/*
3472 * Percentage of pages in a zone that must be unmapped for zone_reclaim to
3473 * occur.
3474 */
3475int sysctl_min_unmapped_ratio = 1;
3476
3477/*
3478 * If the number of slab pages in a zone grows beyond this percentage then
3479 * slab reclaim needs to occur.
3480 */
3481int sysctl_min_slab_ratio = 5;
3482
3483static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
3484{
3485 unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
3486 unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
3487 zone_page_state(zone, NR_ACTIVE_FILE);
3488
3489 /*
3490 * It's possible for there to be more file mapped pages than
3491 * accounted for by the pages on the file LRU lists because
3492 * tmpfs pages accounted for as ANON can also be FILE_MAPPED
3493 */
3494 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
3495}
3496
3497/* Work out how many page cache pages we can reclaim in this reclaim_mode */
3498static long zone_pagecache_reclaimable(struct zone *zone)
3499{
3500 long nr_pagecache_reclaimable;
3501 long delta = 0;
3502
3503 /*
3504 * If RECLAIM_SWAP is set, then all file pages are considered
3505 * potentially reclaimable. Otherwise, we have to worry about
3506 * pages like swapcache and zone_unmapped_file_pages() provides
3507 * a better estimate
3508 */
3509 if (zone_reclaim_mode & RECLAIM_SWAP)
3510 nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
3511 else
3512 nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
3513
3514 /* If we can't clean pages, remove dirty pages from consideration */
3515 if (!(zone_reclaim_mode & RECLAIM_WRITE))
3516 delta += zone_page_state(zone, NR_FILE_DIRTY);
3517
3518 /* Watch for any possible underflows due to delta */
3519 if (unlikely(delta > nr_pagecache_reclaimable))
3520 delta = nr_pagecache_reclaimable;
3521
3522 return nr_pagecache_reclaimable - delta;
3523}
3524
3525/*
3526 * Try to free up some pages from this zone through reclaim.
3527 */
3528static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3529{
3530 /* Minimum pages needed in order to stay on node */
3531 const unsigned long nr_pages = 1 << order;
3532 struct task_struct *p = current;
3533 struct reclaim_state reclaim_state;
3534 struct scan_control sc = {
3535 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
3536 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
3537 .may_swap = 1,
3538 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
3539 .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
3540 .order = order,
3541 .priority = ZONE_RECLAIM_PRIORITY,
3542 };
3543 struct shrink_control shrink = {
3544 .gfp_mask = sc.gfp_mask,
3545 };
3546 unsigned long nr_slab_pages0, nr_slab_pages1;
3547
3548 cond_resched();
3549 /*
3550 * We need to be able to allocate from the reserves for RECLAIM_SWAP
3551 * and we also need to be able to write out pages for RECLAIM_WRITE
3552 * and RECLAIM_SWAP.
3553 */
3554 p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
3555 lockdep_set_current_reclaim_state(gfp_mask);
3556 reclaim_state.reclaimed_slab = 0;
3557 p->reclaim_state = &reclaim_state;
3558
3559 if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
3560 /*
3561 * Free memory by calling shrink zone with increasing
3562 * priorities until we have enough memory freed.
3563 */
3564 do {
3565 shrink_zone(zone, &sc);
3566 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
3567 }
3568
3569 nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
3570 if (nr_slab_pages0 > zone->min_slab_pages) {
3571 /*
3572 * shrink_slab() does not currently allow us to determine how
3573 * many pages were freed in this zone. So we take the current
3574 * number of slab pages and shake the slab until it is reduced
3575 * by the same nr_pages that we used for reclaiming unmapped
3576 * pages.
3577 */
3578 nodes_clear(shrink.nodes_to_scan);
3579 node_set(zone_to_nid(zone), shrink.nodes_to_scan);
3580 for (;;) {
3581 unsigned long lru_pages = zone_reclaimable_pages(zone);
3582
3583 /* No reclaimable slab or very low memory pressure */
3584 if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages))
3585 break;
3586
3587 /* Freed enough memory */
3588 nr_slab_pages1 = zone_page_state(zone,
3589 NR_SLAB_RECLAIMABLE);
3590 if (nr_slab_pages1 + nr_pages <= nr_slab_pages0)
3591 break;
3592 }
3593
3594 /*
3595 * Update nr_reclaimed by the number of slab pages we
3596 * reclaimed from this zone.
3597 */
3598 nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
3599 if (nr_slab_pages1 < nr_slab_pages0)
3600 sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1;
3601 }
3602
3603 p->reclaim_state = NULL;
3604 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
3605 lockdep_clear_current_reclaim_state();
3606 return sc.nr_reclaimed >= nr_pages;
3607}
3608
3609int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3610{
3611 int node_id;
3612 int ret;
3613
3614 /*
3615 * Zone reclaim reclaims unmapped file backed pages and
3616 * slab pages if we are over the defined limits.
3617 *
3618 * A small portion of unmapped file backed pages is needed for
3619 * file I/O otherwise pages read by file I/O will be immediately
3620 * thrown out if the zone is overallocated. So we do not reclaim
3621 * if less than a specified percentage of the zone is used by
3622 * unmapped file backed pages.
3623 */
3624 if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
3625 zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
3626 return ZONE_RECLAIM_FULL;
3627
3628 if (!zone_reclaimable(zone))
3629 return ZONE_RECLAIM_FULL;
3630
3631 /*
3632 * Do not scan if the allocation should not be delayed.
3633 */
3634 if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
3635 return ZONE_RECLAIM_NOSCAN;
3636
3637 /*
3638 * Only run zone reclaim on the local zone or on zones that do not
3639 * have associated processors. This will favor the local processor
3640 * over remote processors and spread off node memory allocations
3641 * as wide as possible.
3642 */
3643 node_id = zone_to_nid(zone);
3644 if (node_state(node_id, N_CPU) && node_id != numa_node_id())
3645 return ZONE_RECLAIM_NOSCAN;
3646
3647 if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
3648 return ZONE_RECLAIM_NOSCAN;
3649
3650 ret = __zone_reclaim(zone, gfp_mask, order);
3651 zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
3652
3653 if (!ret)
3654 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
3655
3656 return ret;
3657}
3658#endif
3659
3660/*
3661 * page_evictable - test whether a page is evictable
3662 * @page: the page to test
3663 *
3664 * Test whether page is evictable--i.e., should be placed on active/inactive
3665 * lists vs unevictable list.
3666 *
3667 * Reasons page might not be evictable:
3668 * (1) page's mapping marked unevictable
3669 * (2) page is part of an mlocked VMA
3670 *
3671 */
3672int page_evictable(struct page *page)
3673{
3674 return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
3675}
3676
3677#ifdef CONFIG_SHMEM
3678/**
3679 * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
3680 * @pages: array of pages to check
3681 * @nr_pages: number of pages to check
3682 *
3683 * Checks pages for evictability and moves them to the appropriate lru list.
3684 *
3685 * This function is only used for SysV IPC SHM_UNLOCK.
3686 */
3687void check_move_unevictable_pages(struct page **pages, int nr_pages)
3688{
3689 struct lruvec *lruvec;
3690 struct zone *zone = NULL;
3691 int pgscanned = 0;
3692 int pgrescued = 0;
3693 int i;
3694
3695 for (i = 0; i < nr_pages; i++) {
3696 struct page *page = pages[i];
3697 struct zone *pagezone;
3698
3699 pgscanned++;
3700 pagezone = page_zone(page);
3701 if (pagezone != zone) {
3702 if (zone)
3703 spin_unlock_irq(&zone->lru_lock);
3704 zone = pagezone;
3705 spin_lock_irq(&zone->lru_lock);
3706 }
3707 lruvec = mem_cgroup_page_lruvec(page, zone);
3708
3709 if (!PageLRU(page) || !PageUnevictable(page))
3710 continue;
3711
3712 if (page_evictable(page)) {
3713 enum lru_list lru = page_lru_base_type(page);
3714
3715 VM_BUG_ON_PAGE(PageActive(page), page);
3716 ClearPageUnevictable(page);
3717 del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
3718 add_page_to_lru_list(page, lruvec, lru);
3719 pgrescued++;
3720 }
3721 }
3722
3723 if (zone) {
3724 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
3725 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
3726 spin_unlock_irq(&zone->lru_lock);
3727 }
3728}
3729#endif /* CONFIG_SHMEM */
3730
3731static void warn_scan_unevictable_pages(void)
3732{
3733 printk_once(KERN_WARNING
3734 "%s: The scan_unevictable_pages sysctl/node-interface has been "
3735 "disabled for lack of a legitimate use case. If you have "
3736 "one, please send an email to linux-mm@kvack.org.\n",
3737 current->comm);
3738}
3739
3740/*
3741 * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of
3742 * all nodes' unevictable lists for evictable pages
3743 */
3744unsigned long scan_unevictable_pages;
3745
3746int scan_unevictable_handler(struct ctl_table *table, int write,
3747 void __user *buffer,
3748 size_t *length, loff_t *ppos)
3749{
3750 warn_scan_unevictable_pages();
3751 proc_doulongvec_minmax(table, write, buffer, length, ppos);
3752 scan_unevictable_pages = 0;
3753 return 0;
3754}
3755
3756#ifdef CONFIG_NUMA
3757/*
3758 * per node 'scan_unevictable_pages' attribute. On demand re-scan of
3759 * a specified node's per zone unevictable lists for evictable pages.
3760 */
3761
3762static ssize_t read_scan_unevictable_node(struct device *dev,
3763 struct device_attribute *attr,
3764 char *buf)
3765{
3766 warn_scan_unevictable_pages();
3767 return sprintf(buf, "0\n"); /* always zero; should fit... */
3768}
3769
3770static ssize_t write_scan_unevictable_node(struct device *dev,
3771 struct device_attribute *attr,
3772 const char *buf, size_t count)
3773{
3774 warn_scan_unevictable_pages();
3775 return 1;
3776}
3777
3778
3779static DEVICE_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
3780 read_scan_unevictable_node,
3781 write_scan_unevictable_node);
3782
3783int scan_unevictable_register_node(struct node *node)
3784{
3785 return device_create_file(&node->dev, &dev_attr_scan_unevictable_pages);
3786}
3787
3788void scan_unevictable_unregister_node(struct node *node)
3789{
3790 device_remove_file(&node->dev, &dev_attr_scan_unevictable_pages);
3791}
3792#endif