Loading...
1/*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed
9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11 * Multiqueue VM started 5.8.00, Rik van Riel.
12 */
13
14#include <linux/mm.h>
15#include <linux/module.h>
16#include <linux/gfp.h>
17#include <linux/kernel_stat.h>
18#include <linux/swap.h>
19#include <linux/pagemap.h>
20#include <linux/init.h>
21#include <linux/highmem.h>
22#include <linux/vmstat.h>
23#include <linux/file.h>
24#include <linux/writeback.h>
25#include <linux/blkdev.h>
26#include <linux/buffer_head.h> /* for try_to_release_page(),
27 buffer_heads_over_limit */
28#include <linux/mm_inline.h>
29#include <linux/pagevec.h>
30#include <linux/backing-dev.h>
31#include <linux/rmap.h>
32#include <linux/topology.h>
33#include <linux/cpu.h>
34#include <linux/cpuset.h>
35#include <linux/compaction.h>
36#include <linux/notifier.h>
37#include <linux/rwsem.h>
38#include <linux/delay.h>
39#include <linux/kthread.h>
40#include <linux/freezer.h>
41#include <linux/memcontrol.h>
42#include <linux/delayacct.h>
43#include <linux/sysctl.h>
44#include <linux/oom.h>
45#include <linux/prefetch.h>
46
47#include <asm/tlbflush.h>
48#include <asm/div64.h>
49
50#include <linux/swapops.h>
51
52#include "internal.h"
53
54#define CREATE_TRACE_POINTS
55#include <trace/events/vmscan.h>
56
57/*
58 * reclaim_mode determines how the inactive list is shrunk
59 * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages
60 * RECLAIM_MODE_ASYNC: Do not block
61 * RECLAIM_MODE_SYNC: Allow blocking e.g. call wait_on_page_writeback
62 * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference
63 * page from the LRU and reclaim all pages within a
64 * naturally aligned range
65 * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number of
66 * order-0 pages and then compact the zone
67 */
68typedef unsigned __bitwise__ reclaim_mode_t;
69#define RECLAIM_MODE_SINGLE ((__force reclaim_mode_t)0x01u)
70#define RECLAIM_MODE_ASYNC ((__force reclaim_mode_t)0x02u)
71#define RECLAIM_MODE_SYNC ((__force reclaim_mode_t)0x04u)
72#define RECLAIM_MODE_LUMPYRECLAIM ((__force reclaim_mode_t)0x08u)
73#define RECLAIM_MODE_COMPACTION ((__force reclaim_mode_t)0x10u)
74
75struct scan_control {
76 /* Incremented by the number of inactive pages that were scanned */
77 unsigned long nr_scanned;
78
79 /* Number of pages freed so far during a call to shrink_zones() */
80 unsigned long nr_reclaimed;
81
82 /* How many pages shrink_list() should reclaim */
83 unsigned long nr_to_reclaim;
84
85 unsigned long hibernation_mode;
86
87 /* This context's GFP mask */
88 gfp_t gfp_mask;
89
90 int may_writepage;
91
92 /* Can mapped pages be reclaimed? */
93 int may_unmap;
94
95 /* Can pages be swapped as part of reclaim? */
96 int may_swap;
97
98 int order;
99
100 /*
101 * Intend to reclaim enough continuous memory rather than reclaim
102 * enough amount of memory. i.e, mode for high order allocation.
103 */
104 reclaim_mode_t reclaim_mode;
105
106 /* Which cgroup do we reclaim from */
107 struct mem_cgroup *mem_cgroup;
108
109 /*
110 * Nodemask of nodes allowed by the caller. If NULL, all nodes
111 * are scanned.
112 */
113 nodemask_t *nodemask;
114};
115
116#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
117
118#ifdef ARCH_HAS_PREFETCH
119#define prefetch_prev_lru_page(_page, _base, _field) \
120 do { \
121 if ((_page)->lru.prev != _base) { \
122 struct page *prev; \
123 \
124 prev = lru_to_page(&(_page->lru)); \
125 prefetch(&prev->_field); \
126 } \
127 } while (0)
128#else
129#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
130#endif
131
132#ifdef ARCH_HAS_PREFETCHW
133#define prefetchw_prev_lru_page(_page, _base, _field) \
134 do { \
135 if ((_page)->lru.prev != _base) { \
136 struct page *prev; \
137 \
138 prev = lru_to_page(&(_page->lru)); \
139 prefetchw(&prev->_field); \
140 } \
141 } while (0)
142#else
143#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
144#endif
145
146/*
147 * From 0 .. 100. Higher means more swappy.
148 */
149int vm_swappiness = 60;
150long vm_total_pages; /* The total number of pages which the VM controls */
151
152static LIST_HEAD(shrinker_list);
153static DECLARE_RWSEM(shrinker_rwsem);
154
155#ifdef CONFIG_CGROUP_MEM_RES_CTLR
156#define scanning_global_lru(sc) (!(sc)->mem_cgroup)
157#else
158#define scanning_global_lru(sc) (1)
159#endif
160
161static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
162 struct scan_control *sc)
163{
164 if (!scanning_global_lru(sc))
165 return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone);
166
167 return &zone->reclaim_stat;
168}
169
170static unsigned long zone_nr_lru_pages(struct zone *zone,
171 struct scan_control *sc, enum lru_list lru)
172{
173 if (!scanning_global_lru(sc))
174 return mem_cgroup_zone_nr_lru_pages(sc->mem_cgroup,
175 zone_to_nid(zone), zone_idx(zone), BIT(lru));
176
177 return zone_page_state(zone, NR_LRU_BASE + lru);
178}
179
180
181/*
182 * Add a shrinker callback to be called from the vm
183 */
184void register_shrinker(struct shrinker *shrinker)
185{
186 shrinker->nr = 0;
187 down_write(&shrinker_rwsem);
188 list_add_tail(&shrinker->list, &shrinker_list);
189 up_write(&shrinker_rwsem);
190}
191EXPORT_SYMBOL(register_shrinker);
192
193/*
194 * Remove one
195 */
196void unregister_shrinker(struct shrinker *shrinker)
197{
198 down_write(&shrinker_rwsem);
199 list_del(&shrinker->list);
200 up_write(&shrinker_rwsem);
201}
202EXPORT_SYMBOL(unregister_shrinker);
203
204static inline int do_shrinker_shrink(struct shrinker *shrinker,
205 struct shrink_control *sc,
206 unsigned long nr_to_scan)
207{
208 sc->nr_to_scan = nr_to_scan;
209 return (*shrinker->shrink)(shrinker, sc);
210}
211
212#define SHRINK_BATCH 128
213/*
214 * Call the shrink functions to age shrinkable caches
215 *
216 * Here we assume it costs one seek to replace a lru page and that it also
217 * takes a seek to recreate a cache object. With this in mind we age equal
218 * percentages of the lru and ageable caches. This should balance the seeks
219 * generated by these structures.
220 *
221 * If the vm encountered mapped pages on the LRU it increase the pressure on
222 * slab to avoid swapping.
223 *
224 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
225 *
226 * `lru_pages' represents the number of on-LRU pages in all the zones which
227 * are eligible for the caller's allocation attempt. It is used for balancing
228 * slab reclaim versus page reclaim.
229 *
230 * Returns the number of slab objects which we shrunk.
231 */
232unsigned long shrink_slab(struct shrink_control *shrink,
233 unsigned long nr_pages_scanned,
234 unsigned long lru_pages)
235{
236 struct shrinker *shrinker;
237 unsigned long ret = 0;
238
239 if (nr_pages_scanned == 0)
240 nr_pages_scanned = SWAP_CLUSTER_MAX;
241
242 if (!down_read_trylock(&shrinker_rwsem)) {
243 /* Assume we'll be able to shrink next time */
244 ret = 1;
245 goto out;
246 }
247
248 list_for_each_entry(shrinker, &shrinker_list, list) {
249 unsigned long long delta;
250 unsigned long total_scan;
251 unsigned long max_pass;
252 int shrink_ret = 0;
253 long nr;
254 long new_nr;
255 long batch_size = shrinker->batch ? shrinker->batch
256 : SHRINK_BATCH;
257
258 /*
259 * copy the current shrinker scan count into a local variable
260 * and zero it so that other concurrent shrinker invocations
261 * don't also do this scanning work.
262 */
263 do {
264 nr = shrinker->nr;
265 } while (cmpxchg(&shrinker->nr, nr, 0) != nr);
266
267 total_scan = nr;
268 max_pass = do_shrinker_shrink(shrinker, shrink, 0);
269 delta = (4 * nr_pages_scanned) / shrinker->seeks;
270 delta *= max_pass;
271 do_div(delta, lru_pages + 1);
272 total_scan += delta;
273 if (total_scan < 0) {
274 printk(KERN_ERR "shrink_slab: %pF negative objects to "
275 "delete nr=%ld\n",
276 shrinker->shrink, total_scan);
277 total_scan = max_pass;
278 }
279
280 /*
281 * We need to avoid excessive windup on filesystem shrinkers
282 * due to large numbers of GFP_NOFS allocations causing the
283 * shrinkers to return -1 all the time. This results in a large
284 * nr being built up so when a shrink that can do some work
285 * comes along it empties the entire cache due to nr >>>
286 * max_pass. This is bad for sustaining a working set in
287 * memory.
288 *
289 * Hence only allow the shrinker to scan the entire cache when
290 * a large delta change is calculated directly.
291 */
292 if (delta < max_pass / 4)
293 total_scan = min(total_scan, max_pass / 2);
294
295 /*
296 * Avoid risking looping forever due to too large nr value:
297 * never try to free more than twice the estimate number of
298 * freeable entries.
299 */
300 if (total_scan > max_pass * 2)
301 total_scan = max_pass * 2;
302
303 trace_mm_shrink_slab_start(shrinker, shrink, nr,
304 nr_pages_scanned, lru_pages,
305 max_pass, delta, total_scan);
306
307 while (total_scan >= batch_size) {
308 int nr_before;
309
310 nr_before = do_shrinker_shrink(shrinker, shrink, 0);
311 shrink_ret = do_shrinker_shrink(shrinker, shrink,
312 batch_size);
313 if (shrink_ret == -1)
314 break;
315 if (shrink_ret < nr_before)
316 ret += nr_before - shrink_ret;
317 count_vm_events(SLABS_SCANNED, batch_size);
318 total_scan -= batch_size;
319
320 cond_resched();
321 }
322
323 /*
324 * move the unused scan count back into the shrinker in a
325 * manner that handles concurrent updates. If we exhausted the
326 * scan, there is no need to do an update.
327 */
328 do {
329 nr = shrinker->nr;
330 new_nr = total_scan + nr;
331 if (total_scan <= 0)
332 break;
333 } while (cmpxchg(&shrinker->nr, nr, new_nr) != nr);
334
335 trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr);
336 }
337 up_read(&shrinker_rwsem);
338out:
339 cond_resched();
340 return ret;
341}
342
343static void set_reclaim_mode(int priority, struct scan_control *sc,
344 bool sync)
345{
346 reclaim_mode_t syncmode = sync ? RECLAIM_MODE_SYNC : RECLAIM_MODE_ASYNC;
347
348 /*
349 * Initially assume we are entering either lumpy reclaim or
350 * reclaim/compaction.Depending on the order, we will either set the
351 * sync mode or just reclaim order-0 pages later.
352 */
353 if (COMPACTION_BUILD)
354 sc->reclaim_mode = RECLAIM_MODE_COMPACTION;
355 else
356 sc->reclaim_mode = RECLAIM_MODE_LUMPYRECLAIM;
357
358 /*
359 * Avoid using lumpy reclaim or reclaim/compaction if possible by
360 * restricting when its set to either costly allocations or when
361 * under memory pressure
362 */
363 if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
364 sc->reclaim_mode |= syncmode;
365 else if (sc->order && priority < DEF_PRIORITY - 2)
366 sc->reclaim_mode |= syncmode;
367 else
368 sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
369}
370
371static void reset_reclaim_mode(struct scan_control *sc)
372{
373 sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
374}
375
376static inline int is_page_cache_freeable(struct page *page)
377{
378 /*
379 * A freeable page cache page is referenced only by the caller
380 * that isolated the page, the page cache radix tree and
381 * optional buffer heads at page->private.
382 */
383 return page_count(page) - page_has_private(page) == 2;
384}
385
386static int may_write_to_queue(struct backing_dev_info *bdi,
387 struct scan_control *sc)
388{
389 if (current->flags & PF_SWAPWRITE)
390 return 1;
391 if (!bdi_write_congested(bdi))
392 return 1;
393 if (bdi == current->backing_dev_info)
394 return 1;
395
396 /* lumpy reclaim for hugepage often need a lot of write */
397 if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
398 return 1;
399 return 0;
400}
401
402/*
403 * We detected a synchronous write error writing a page out. Probably
404 * -ENOSPC. We need to propagate that into the address_space for a subsequent
405 * fsync(), msync() or close().
406 *
407 * The tricky part is that after writepage we cannot touch the mapping: nothing
408 * prevents it from being freed up. But we have a ref on the page and once
409 * that page is locked, the mapping is pinned.
410 *
411 * We're allowed to run sleeping lock_page() here because we know the caller has
412 * __GFP_FS.
413 */
414static void handle_write_error(struct address_space *mapping,
415 struct page *page, int error)
416{
417 lock_page(page);
418 if (page_mapping(page) == mapping)
419 mapping_set_error(mapping, error);
420 unlock_page(page);
421}
422
423/* possible outcome of pageout() */
424typedef enum {
425 /* failed to write page out, page is locked */
426 PAGE_KEEP,
427 /* move page to the active list, page is locked */
428 PAGE_ACTIVATE,
429 /* page has been sent to the disk successfully, page is unlocked */
430 PAGE_SUCCESS,
431 /* page is clean and locked */
432 PAGE_CLEAN,
433} pageout_t;
434
435/*
436 * pageout is called by shrink_page_list() for each dirty page.
437 * Calls ->writepage().
438 */
439static pageout_t pageout(struct page *page, struct address_space *mapping,
440 struct scan_control *sc)
441{
442 /*
443 * If the page is dirty, only perform writeback if that write
444 * will be non-blocking. To prevent this allocation from being
445 * stalled by pagecache activity. But note that there may be
446 * stalls if we need to run get_block(). We could test
447 * PagePrivate for that.
448 *
449 * If this process is currently in __generic_file_aio_write() against
450 * this page's queue, we can perform writeback even if that
451 * will block.
452 *
453 * If the page is swapcache, write it back even if that would
454 * block, for some throttling. This happens by accident, because
455 * swap_backing_dev_info is bust: it doesn't reflect the
456 * congestion state of the swapdevs. Easy to fix, if needed.
457 */
458 if (!is_page_cache_freeable(page))
459 return PAGE_KEEP;
460 if (!mapping) {
461 /*
462 * Some data journaling orphaned pages can have
463 * page->mapping == NULL while being dirty with clean buffers.
464 */
465 if (page_has_private(page)) {
466 if (try_to_free_buffers(page)) {
467 ClearPageDirty(page);
468 printk("%s: orphaned page\n", __func__);
469 return PAGE_CLEAN;
470 }
471 }
472 return PAGE_KEEP;
473 }
474 if (mapping->a_ops->writepage == NULL)
475 return PAGE_ACTIVATE;
476 if (!may_write_to_queue(mapping->backing_dev_info, sc))
477 return PAGE_KEEP;
478
479 if (clear_page_dirty_for_io(page)) {
480 int res;
481 struct writeback_control wbc = {
482 .sync_mode = WB_SYNC_NONE,
483 .nr_to_write = SWAP_CLUSTER_MAX,
484 .range_start = 0,
485 .range_end = LLONG_MAX,
486 .for_reclaim = 1,
487 };
488
489 SetPageReclaim(page);
490 res = mapping->a_ops->writepage(page, &wbc);
491 if (res < 0)
492 handle_write_error(mapping, page, res);
493 if (res == AOP_WRITEPAGE_ACTIVATE) {
494 ClearPageReclaim(page);
495 return PAGE_ACTIVATE;
496 }
497
498 /*
499 * Wait on writeback if requested to. This happens when
500 * direct reclaiming a large contiguous area and the
501 * first attempt to free a range of pages fails.
502 */
503 if (PageWriteback(page) &&
504 (sc->reclaim_mode & RECLAIM_MODE_SYNC))
505 wait_on_page_writeback(page);
506
507 if (!PageWriteback(page)) {
508 /* synchronous write or broken a_ops? */
509 ClearPageReclaim(page);
510 }
511 trace_mm_vmscan_writepage(page,
512 trace_reclaim_flags(page, sc->reclaim_mode));
513 inc_zone_page_state(page, NR_VMSCAN_WRITE);
514 return PAGE_SUCCESS;
515 }
516
517 return PAGE_CLEAN;
518}
519
520/*
521 * Same as remove_mapping, but if the page is removed from the mapping, it
522 * gets returned with a refcount of 0.
523 */
524static int __remove_mapping(struct address_space *mapping, struct page *page)
525{
526 BUG_ON(!PageLocked(page));
527 BUG_ON(mapping != page_mapping(page));
528
529 spin_lock_irq(&mapping->tree_lock);
530 /*
531 * The non racy check for a busy page.
532 *
533 * Must be careful with the order of the tests. When someone has
534 * a ref to the page, it may be possible that they dirty it then
535 * drop the reference. So if PageDirty is tested before page_count
536 * here, then the following race may occur:
537 *
538 * get_user_pages(&page);
539 * [user mapping goes away]
540 * write_to(page);
541 * !PageDirty(page) [good]
542 * SetPageDirty(page);
543 * put_page(page);
544 * !page_count(page) [good, discard it]
545 *
546 * [oops, our write_to data is lost]
547 *
548 * Reversing the order of the tests ensures such a situation cannot
549 * escape unnoticed. The smp_rmb is needed to ensure the page->flags
550 * load is not satisfied before that of page->_count.
551 *
552 * Note that if SetPageDirty is always performed via set_page_dirty,
553 * and thus under tree_lock, then this ordering is not required.
554 */
555 if (!page_freeze_refs(page, 2))
556 goto cannot_free;
557 /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
558 if (unlikely(PageDirty(page))) {
559 page_unfreeze_refs(page, 2);
560 goto cannot_free;
561 }
562
563 if (PageSwapCache(page)) {
564 swp_entry_t swap = { .val = page_private(page) };
565 __delete_from_swap_cache(page);
566 spin_unlock_irq(&mapping->tree_lock);
567 swapcache_free(swap, page);
568 } else {
569 void (*freepage)(struct page *);
570
571 freepage = mapping->a_ops->freepage;
572
573 __delete_from_page_cache(page);
574 spin_unlock_irq(&mapping->tree_lock);
575 mem_cgroup_uncharge_cache_page(page);
576
577 if (freepage != NULL)
578 freepage(page);
579 }
580
581 return 1;
582
583cannot_free:
584 spin_unlock_irq(&mapping->tree_lock);
585 return 0;
586}
587
588/*
589 * Attempt to detach a locked page from its ->mapping. If it is dirty or if
590 * someone else has a ref on the page, abort and return 0. If it was
591 * successfully detached, return 1. Assumes the caller has a single ref on
592 * this page.
593 */
594int remove_mapping(struct address_space *mapping, struct page *page)
595{
596 if (__remove_mapping(mapping, page)) {
597 /*
598 * Unfreezing the refcount with 1 rather than 2 effectively
599 * drops the pagecache ref for us without requiring another
600 * atomic operation.
601 */
602 page_unfreeze_refs(page, 1);
603 return 1;
604 }
605 return 0;
606}
607
608/**
609 * putback_lru_page - put previously isolated page onto appropriate LRU list
610 * @page: page to be put back to appropriate lru list
611 *
612 * Add previously isolated @page to appropriate LRU list.
613 * Page may still be unevictable for other reasons.
614 *
615 * lru_lock must not be held, interrupts must be enabled.
616 */
617void putback_lru_page(struct page *page)
618{
619 int lru;
620 int active = !!TestClearPageActive(page);
621 int was_unevictable = PageUnevictable(page);
622
623 VM_BUG_ON(PageLRU(page));
624
625redo:
626 ClearPageUnevictable(page);
627
628 if (page_evictable(page, NULL)) {
629 /*
630 * For evictable pages, we can use the cache.
631 * In event of a race, worst case is we end up with an
632 * unevictable page on [in]active list.
633 * We know how to handle that.
634 */
635 lru = active + page_lru_base_type(page);
636 lru_cache_add_lru(page, lru);
637 } else {
638 /*
639 * Put unevictable pages directly on zone's unevictable
640 * list.
641 */
642 lru = LRU_UNEVICTABLE;
643 add_page_to_unevictable_list(page);
644 /*
645 * When racing with an mlock clearing (page is
646 * unlocked), make sure that if the other thread does
647 * not observe our setting of PG_lru and fails
648 * isolation, we see PG_mlocked cleared below and move
649 * the page back to the evictable list.
650 *
651 * The other side is TestClearPageMlocked().
652 */
653 smp_mb();
654 }
655
656 /*
657 * page's status can change while we move it among lru. If an evictable
658 * page is on unevictable list, it never be freed. To avoid that,
659 * check after we added it to the list, again.
660 */
661 if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) {
662 if (!isolate_lru_page(page)) {
663 put_page(page);
664 goto redo;
665 }
666 /* This means someone else dropped this page from LRU
667 * So, it will be freed or putback to LRU again. There is
668 * nothing to do here.
669 */
670 }
671
672 if (was_unevictable && lru != LRU_UNEVICTABLE)
673 count_vm_event(UNEVICTABLE_PGRESCUED);
674 else if (!was_unevictable && lru == LRU_UNEVICTABLE)
675 count_vm_event(UNEVICTABLE_PGCULLED);
676
677 put_page(page); /* drop ref from isolate */
678}
679
680enum page_references {
681 PAGEREF_RECLAIM,
682 PAGEREF_RECLAIM_CLEAN,
683 PAGEREF_KEEP,
684 PAGEREF_ACTIVATE,
685};
686
687static enum page_references page_check_references(struct page *page,
688 struct scan_control *sc)
689{
690 int referenced_ptes, referenced_page;
691 unsigned long vm_flags;
692
693 referenced_ptes = page_referenced(page, 1, sc->mem_cgroup, &vm_flags);
694 referenced_page = TestClearPageReferenced(page);
695
696 /* Lumpy reclaim - ignore references */
697 if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
698 return PAGEREF_RECLAIM;
699
700 /*
701 * Mlock lost the isolation race with us. Let try_to_unmap()
702 * move the page to the unevictable list.
703 */
704 if (vm_flags & VM_LOCKED)
705 return PAGEREF_RECLAIM;
706
707 if (referenced_ptes) {
708 if (PageAnon(page))
709 return PAGEREF_ACTIVATE;
710 /*
711 * All mapped pages start out with page table
712 * references from the instantiating fault, so we need
713 * to look twice if a mapped file page is used more
714 * than once.
715 *
716 * Mark it and spare it for another trip around the
717 * inactive list. Another page table reference will
718 * lead to its activation.
719 *
720 * Note: the mark is set for activated pages as well
721 * so that recently deactivated but used pages are
722 * quickly recovered.
723 */
724 SetPageReferenced(page);
725
726 if (referenced_page)
727 return PAGEREF_ACTIVATE;
728
729 return PAGEREF_KEEP;
730 }
731
732 /* Reclaim if clean, defer dirty pages to writeback */
733 if (referenced_page && !PageSwapBacked(page))
734 return PAGEREF_RECLAIM_CLEAN;
735
736 return PAGEREF_RECLAIM;
737}
738
739static noinline_for_stack void free_page_list(struct list_head *free_pages)
740{
741 struct pagevec freed_pvec;
742 struct page *page, *tmp;
743
744 pagevec_init(&freed_pvec, 1);
745
746 list_for_each_entry_safe(page, tmp, free_pages, lru) {
747 list_del(&page->lru);
748 if (!pagevec_add(&freed_pvec, page)) {
749 __pagevec_free(&freed_pvec);
750 pagevec_reinit(&freed_pvec);
751 }
752 }
753
754 pagevec_free(&freed_pvec);
755}
756
757/*
758 * shrink_page_list() returns the number of reclaimed pages
759 */
760static unsigned long shrink_page_list(struct list_head *page_list,
761 struct zone *zone,
762 struct scan_control *sc)
763{
764 LIST_HEAD(ret_pages);
765 LIST_HEAD(free_pages);
766 int pgactivate = 0;
767 unsigned long nr_dirty = 0;
768 unsigned long nr_congested = 0;
769 unsigned long nr_reclaimed = 0;
770
771 cond_resched();
772
773 while (!list_empty(page_list)) {
774 enum page_references references;
775 struct address_space *mapping;
776 struct page *page;
777 int may_enter_fs;
778
779 cond_resched();
780
781 page = lru_to_page(page_list);
782 list_del(&page->lru);
783
784 if (!trylock_page(page))
785 goto keep;
786
787 VM_BUG_ON(PageActive(page));
788 VM_BUG_ON(page_zone(page) != zone);
789
790 sc->nr_scanned++;
791
792 if (unlikely(!page_evictable(page, NULL)))
793 goto cull_mlocked;
794
795 if (!sc->may_unmap && page_mapped(page))
796 goto keep_locked;
797
798 /* Double the slab pressure for mapped and swapcache pages */
799 if (page_mapped(page) || PageSwapCache(page))
800 sc->nr_scanned++;
801
802 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
803 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
804
805 if (PageWriteback(page)) {
806 /*
807 * Synchronous reclaim is performed in two passes,
808 * first an asynchronous pass over the list to
809 * start parallel writeback, and a second synchronous
810 * pass to wait for the IO to complete. Wait here
811 * for any page for which writeback has already
812 * started.
813 */
814 if ((sc->reclaim_mode & RECLAIM_MODE_SYNC) &&
815 may_enter_fs)
816 wait_on_page_writeback(page);
817 else {
818 unlock_page(page);
819 goto keep_lumpy;
820 }
821 }
822
823 references = page_check_references(page, sc);
824 switch (references) {
825 case PAGEREF_ACTIVATE:
826 goto activate_locked;
827 case PAGEREF_KEEP:
828 goto keep_locked;
829 case PAGEREF_RECLAIM:
830 case PAGEREF_RECLAIM_CLEAN:
831 ; /* try to reclaim the page below */
832 }
833
834 /*
835 * Anonymous process memory has backing store?
836 * Try to allocate it some swap space here.
837 */
838 if (PageAnon(page) && !PageSwapCache(page)) {
839 if (!(sc->gfp_mask & __GFP_IO))
840 goto keep_locked;
841 if (!add_to_swap(page))
842 goto activate_locked;
843 may_enter_fs = 1;
844 }
845
846 mapping = page_mapping(page);
847
848 /*
849 * The page is mapped into the page tables of one or more
850 * processes. Try to unmap it here.
851 */
852 if (page_mapped(page) && mapping) {
853 switch (try_to_unmap(page, TTU_UNMAP)) {
854 case SWAP_FAIL:
855 goto activate_locked;
856 case SWAP_AGAIN:
857 goto keep_locked;
858 case SWAP_MLOCK:
859 goto cull_mlocked;
860 case SWAP_SUCCESS:
861 ; /* try to free the page below */
862 }
863 }
864
865 if (PageDirty(page)) {
866 nr_dirty++;
867
868 if (references == PAGEREF_RECLAIM_CLEAN)
869 goto keep_locked;
870 if (!may_enter_fs)
871 goto keep_locked;
872 if (!sc->may_writepage)
873 goto keep_locked;
874
875 /* Page is dirty, try to write it out here */
876 switch (pageout(page, mapping, sc)) {
877 case PAGE_KEEP:
878 nr_congested++;
879 goto keep_locked;
880 case PAGE_ACTIVATE:
881 goto activate_locked;
882 case PAGE_SUCCESS:
883 if (PageWriteback(page))
884 goto keep_lumpy;
885 if (PageDirty(page))
886 goto keep;
887
888 /*
889 * A synchronous write - probably a ramdisk. Go
890 * ahead and try to reclaim the page.
891 */
892 if (!trylock_page(page))
893 goto keep;
894 if (PageDirty(page) || PageWriteback(page))
895 goto keep_locked;
896 mapping = page_mapping(page);
897 case PAGE_CLEAN:
898 ; /* try to free the page below */
899 }
900 }
901
902 /*
903 * If the page has buffers, try to free the buffer mappings
904 * associated with this page. If we succeed we try to free
905 * the page as well.
906 *
907 * We do this even if the page is PageDirty().
908 * try_to_release_page() does not perform I/O, but it is
909 * possible for a page to have PageDirty set, but it is actually
910 * clean (all its buffers are clean). This happens if the
911 * buffers were written out directly, with submit_bh(). ext3
912 * will do this, as well as the blockdev mapping.
913 * try_to_release_page() will discover that cleanness and will
914 * drop the buffers and mark the page clean - it can be freed.
915 *
916 * Rarely, pages can have buffers and no ->mapping. These are
917 * the pages which were not successfully invalidated in
918 * truncate_complete_page(). We try to drop those buffers here
919 * and if that worked, and the page is no longer mapped into
920 * process address space (page_count == 1) it can be freed.
921 * Otherwise, leave the page on the LRU so it is swappable.
922 */
923 if (page_has_private(page)) {
924 if (!try_to_release_page(page, sc->gfp_mask))
925 goto activate_locked;
926 if (!mapping && page_count(page) == 1) {
927 unlock_page(page);
928 if (put_page_testzero(page))
929 goto free_it;
930 else {
931 /*
932 * rare race with speculative reference.
933 * the speculative reference will free
934 * this page shortly, so we may
935 * increment nr_reclaimed here (and
936 * leave it off the LRU).
937 */
938 nr_reclaimed++;
939 continue;
940 }
941 }
942 }
943
944 if (!mapping || !__remove_mapping(mapping, page))
945 goto keep_locked;
946
947 /*
948 * At this point, we have no other references and there is
949 * no way to pick any more up (removed from LRU, removed
950 * from pagecache). Can use non-atomic bitops now (and
951 * we obviously don't have to worry about waking up a process
952 * waiting on the page lock, because there are no references.
953 */
954 __clear_page_locked(page);
955free_it:
956 nr_reclaimed++;
957
958 /*
959 * Is there need to periodically free_page_list? It would
960 * appear not as the counts should be low
961 */
962 list_add(&page->lru, &free_pages);
963 continue;
964
965cull_mlocked:
966 if (PageSwapCache(page))
967 try_to_free_swap(page);
968 unlock_page(page);
969 putback_lru_page(page);
970 reset_reclaim_mode(sc);
971 continue;
972
973activate_locked:
974 /* Not a candidate for swapping, so reclaim swap space. */
975 if (PageSwapCache(page) && vm_swap_full())
976 try_to_free_swap(page);
977 VM_BUG_ON(PageActive(page));
978 SetPageActive(page);
979 pgactivate++;
980keep_locked:
981 unlock_page(page);
982keep:
983 reset_reclaim_mode(sc);
984keep_lumpy:
985 list_add(&page->lru, &ret_pages);
986 VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
987 }
988
989 /*
990 * Tag a zone as congested if all the dirty pages encountered were
991 * backed by a congested BDI. In this case, reclaimers should just
992 * back off and wait for congestion to clear because further reclaim
993 * will encounter the same problem
994 */
995 if (nr_dirty && nr_dirty == nr_congested && scanning_global_lru(sc))
996 zone_set_flag(zone, ZONE_CONGESTED);
997
998 free_page_list(&free_pages);
999
1000 list_splice(&ret_pages, page_list);
1001 count_vm_events(PGACTIVATE, pgactivate);
1002 return nr_reclaimed;
1003}
1004
1005/*
1006 * Attempt to remove the specified page from its LRU. Only take this page
1007 * if it is of the appropriate PageActive status. Pages which are being
1008 * freed elsewhere are also ignored.
1009 *
1010 * page: page to consider
1011 * mode: one of the LRU isolation modes defined above
1012 *
1013 * returns 0 on success, -ve errno on failure.
1014 */
1015int __isolate_lru_page(struct page *page, int mode, int file)
1016{
1017 int ret = -EINVAL;
1018
1019 /* Only take pages on the LRU. */
1020 if (!PageLRU(page))
1021 return ret;
1022
1023 /*
1024 * When checking the active state, we need to be sure we are
1025 * dealing with comparible boolean values. Take the logical not
1026 * of each.
1027 */
1028 if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
1029 return ret;
1030
1031 if (mode != ISOLATE_BOTH && page_is_file_cache(page) != file)
1032 return ret;
1033
1034 /*
1035 * When this function is being called for lumpy reclaim, we
1036 * initially look into all LRU pages, active, inactive and
1037 * unevictable; only give shrink_page_list evictable pages.
1038 */
1039 if (PageUnevictable(page))
1040 return ret;
1041
1042 ret = -EBUSY;
1043
1044 if (likely(get_page_unless_zero(page))) {
1045 /*
1046 * Be careful not to clear PageLRU until after we're
1047 * sure the page is not being freed elsewhere -- the
1048 * page release code relies on it.
1049 */
1050 ClearPageLRU(page);
1051 ret = 0;
1052 }
1053
1054 return ret;
1055}
1056
1057/*
1058 * zone->lru_lock is heavily contended. Some of the functions that
1059 * shrink the lists perform better by taking out a batch of pages
1060 * and working on them outside the LRU lock.
1061 *
1062 * For pagecache intensive workloads, this function is the hottest
1063 * spot in the kernel (apart from copy_*_user functions).
1064 *
1065 * Appropriate locks must be held before calling this function.
1066 *
1067 * @nr_to_scan: The number of pages to look through on the list.
1068 * @src: The LRU list to pull pages off.
1069 * @dst: The temp list to put pages on to.
1070 * @scanned: The number of pages that were scanned.
1071 * @order: The caller's attempted allocation order
1072 * @mode: One of the LRU isolation modes
1073 * @file: True [1] if isolating file [!anon] pages
1074 *
1075 * returns how many pages were moved onto *@dst.
1076 */
1077static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1078 struct list_head *src, struct list_head *dst,
1079 unsigned long *scanned, int order, int mode, int file)
1080{
1081 unsigned long nr_taken = 0;
1082 unsigned long nr_lumpy_taken = 0;
1083 unsigned long nr_lumpy_dirty = 0;
1084 unsigned long nr_lumpy_failed = 0;
1085 unsigned long scan;
1086
1087 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
1088 struct page *page;
1089 unsigned long pfn;
1090 unsigned long end_pfn;
1091 unsigned long page_pfn;
1092 int zone_id;
1093
1094 page = lru_to_page(src);
1095 prefetchw_prev_lru_page(page, src, flags);
1096
1097 VM_BUG_ON(!PageLRU(page));
1098
1099 switch (__isolate_lru_page(page, mode, file)) {
1100 case 0:
1101 list_move(&page->lru, dst);
1102 mem_cgroup_del_lru(page);
1103 nr_taken += hpage_nr_pages(page);
1104 break;
1105
1106 case -EBUSY:
1107 /* else it is being freed elsewhere */
1108 list_move(&page->lru, src);
1109 mem_cgroup_rotate_lru_list(page, page_lru(page));
1110 continue;
1111
1112 default:
1113 BUG();
1114 }
1115
1116 if (!order)
1117 continue;
1118
1119 /*
1120 * Attempt to take all pages in the order aligned region
1121 * surrounding the tag page. Only take those pages of
1122 * the same active state as that tag page. We may safely
1123 * round the target page pfn down to the requested order
1124 * as the mem_map is guaranteed valid out to MAX_ORDER,
1125 * where that page is in a different zone we will detect
1126 * it from its zone id and abort this block scan.
1127 */
1128 zone_id = page_zone_id(page);
1129 page_pfn = page_to_pfn(page);
1130 pfn = page_pfn & ~((1 << order) - 1);
1131 end_pfn = pfn + (1 << order);
1132 for (; pfn < end_pfn; pfn++) {
1133 struct page *cursor_page;
1134
1135 /* The target page is in the block, ignore it. */
1136 if (unlikely(pfn == page_pfn))
1137 continue;
1138
1139 /* Avoid holes within the zone. */
1140 if (unlikely(!pfn_valid_within(pfn)))
1141 break;
1142
1143 cursor_page = pfn_to_page(pfn);
1144
1145 /* Check that we have not crossed a zone boundary. */
1146 if (unlikely(page_zone_id(cursor_page) != zone_id))
1147 break;
1148
1149 /*
1150 * If we don't have enough swap space, reclaiming of
1151 * anon page which don't already have a swap slot is
1152 * pointless.
1153 */
1154 if (nr_swap_pages <= 0 && PageAnon(cursor_page) &&
1155 !PageSwapCache(cursor_page))
1156 break;
1157
1158 if (__isolate_lru_page(cursor_page, mode, file) == 0) {
1159 list_move(&cursor_page->lru, dst);
1160 mem_cgroup_del_lru(cursor_page);
1161 nr_taken += hpage_nr_pages(page);
1162 nr_lumpy_taken++;
1163 if (PageDirty(cursor_page))
1164 nr_lumpy_dirty++;
1165 scan++;
1166 } else {
1167 /*
1168 * Check if the page is freed already.
1169 *
1170 * We can't use page_count() as that
1171 * requires compound_head and we don't
1172 * have a pin on the page here. If a
1173 * page is tail, we may or may not
1174 * have isolated the head, so assume
1175 * it's not free, it'd be tricky to
1176 * track the head status without a
1177 * page pin.
1178 */
1179 if (!PageTail(cursor_page) &&
1180 !atomic_read(&cursor_page->_count))
1181 continue;
1182 break;
1183 }
1184 }
1185
1186 /* If we break out of the loop above, lumpy reclaim failed */
1187 if (pfn < end_pfn)
1188 nr_lumpy_failed++;
1189 }
1190
1191 *scanned = scan;
1192
1193 trace_mm_vmscan_lru_isolate(order,
1194 nr_to_scan, scan,
1195 nr_taken,
1196 nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed,
1197 mode);
1198 return nr_taken;
1199}
1200
1201static unsigned long isolate_pages_global(unsigned long nr,
1202 struct list_head *dst,
1203 unsigned long *scanned, int order,
1204 int mode, struct zone *z,
1205 int active, int file)
1206{
1207 int lru = LRU_BASE;
1208 if (active)
1209 lru += LRU_ACTIVE;
1210 if (file)
1211 lru += LRU_FILE;
1212 return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order,
1213 mode, file);
1214}
1215
1216/*
1217 * clear_active_flags() is a helper for shrink_active_list(), clearing
1218 * any active bits from the pages in the list.
1219 */
1220static unsigned long clear_active_flags(struct list_head *page_list,
1221 unsigned int *count)
1222{
1223 int nr_active = 0;
1224 int lru;
1225 struct page *page;
1226
1227 list_for_each_entry(page, page_list, lru) {
1228 int numpages = hpage_nr_pages(page);
1229 lru = page_lru_base_type(page);
1230 if (PageActive(page)) {
1231 lru += LRU_ACTIVE;
1232 ClearPageActive(page);
1233 nr_active += numpages;
1234 }
1235 if (count)
1236 count[lru] += numpages;
1237 }
1238
1239 return nr_active;
1240}
1241
1242/**
1243 * isolate_lru_page - tries to isolate a page from its LRU list
1244 * @page: page to isolate from its LRU list
1245 *
1246 * Isolates a @page from an LRU list, clears PageLRU and adjusts the
1247 * vmstat statistic corresponding to whatever LRU list the page was on.
1248 *
1249 * Returns 0 if the page was removed from an LRU list.
1250 * Returns -EBUSY if the page was not on an LRU list.
1251 *
1252 * The returned page will have PageLRU() cleared. If it was found on
1253 * the active list, it will have PageActive set. If it was found on
1254 * the unevictable list, it will have the PageUnevictable bit set. That flag
1255 * may need to be cleared by the caller before letting the page go.
1256 *
1257 * The vmstat statistic corresponding to the list on which the page was
1258 * found will be decremented.
1259 *
1260 * Restrictions:
1261 * (1) Must be called with an elevated refcount on the page. This is a
1262 * fundamentnal difference from isolate_lru_pages (which is called
1263 * without a stable reference).
1264 * (2) the lru_lock must not be held.
1265 * (3) interrupts must be enabled.
1266 */
1267int isolate_lru_page(struct page *page)
1268{
1269 int ret = -EBUSY;
1270
1271 VM_BUG_ON(!page_count(page));
1272
1273 if (PageLRU(page)) {
1274 struct zone *zone = page_zone(page);
1275
1276 spin_lock_irq(&zone->lru_lock);
1277 if (PageLRU(page)) {
1278 int lru = page_lru(page);
1279 ret = 0;
1280 get_page(page);
1281 ClearPageLRU(page);
1282
1283 del_page_from_lru_list(zone, page, lru);
1284 }
1285 spin_unlock_irq(&zone->lru_lock);
1286 }
1287 return ret;
1288}
1289
1290/*
1291 * Are there way too many processes in the direct reclaim path already?
1292 */
1293static int too_many_isolated(struct zone *zone, int file,
1294 struct scan_control *sc)
1295{
1296 unsigned long inactive, isolated;
1297
1298 if (current_is_kswapd())
1299 return 0;
1300
1301 if (!scanning_global_lru(sc))
1302 return 0;
1303
1304 if (file) {
1305 inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1306 isolated = zone_page_state(zone, NR_ISOLATED_FILE);
1307 } else {
1308 inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1309 isolated = zone_page_state(zone, NR_ISOLATED_ANON);
1310 }
1311
1312 return isolated > inactive;
1313}
1314
1315/*
1316 * TODO: Try merging with migrations version of putback_lru_pages
1317 */
1318static noinline_for_stack void
1319putback_lru_pages(struct zone *zone, struct scan_control *sc,
1320 unsigned long nr_anon, unsigned long nr_file,
1321 struct list_head *page_list)
1322{
1323 struct page *page;
1324 struct pagevec pvec;
1325 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1326
1327 pagevec_init(&pvec, 1);
1328
1329 /*
1330 * Put back any unfreeable pages.
1331 */
1332 spin_lock(&zone->lru_lock);
1333 while (!list_empty(page_list)) {
1334 int lru;
1335 page = lru_to_page(page_list);
1336 VM_BUG_ON(PageLRU(page));
1337 list_del(&page->lru);
1338 if (unlikely(!page_evictable(page, NULL))) {
1339 spin_unlock_irq(&zone->lru_lock);
1340 putback_lru_page(page);
1341 spin_lock_irq(&zone->lru_lock);
1342 continue;
1343 }
1344 SetPageLRU(page);
1345 lru = page_lru(page);
1346 add_page_to_lru_list(zone, page, lru);
1347 if (is_active_lru(lru)) {
1348 int file = is_file_lru(lru);
1349 int numpages = hpage_nr_pages(page);
1350 reclaim_stat->recent_rotated[file] += numpages;
1351 }
1352 if (!pagevec_add(&pvec, page)) {
1353 spin_unlock_irq(&zone->lru_lock);
1354 __pagevec_release(&pvec);
1355 spin_lock_irq(&zone->lru_lock);
1356 }
1357 }
1358 __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
1359 __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
1360
1361 spin_unlock_irq(&zone->lru_lock);
1362 pagevec_release(&pvec);
1363}
1364
1365static noinline_for_stack void update_isolated_counts(struct zone *zone,
1366 struct scan_control *sc,
1367 unsigned long *nr_anon,
1368 unsigned long *nr_file,
1369 struct list_head *isolated_list)
1370{
1371 unsigned long nr_active;
1372 unsigned int count[NR_LRU_LISTS] = { 0, };
1373 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1374
1375 nr_active = clear_active_flags(isolated_list, count);
1376 __count_vm_events(PGDEACTIVATE, nr_active);
1377
1378 __mod_zone_page_state(zone, NR_ACTIVE_FILE,
1379 -count[LRU_ACTIVE_FILE]);
1380 __mod_zone_page_state(zone, NR_INACTIVE_FILE,
1381 -count[LRU_INACTIVE_FILE]);
1382 __mod_zone_page_state(zone, NR_ACTIVE_ANON,
1383 -count[LRU_ACTIVE_ANON]);
1384 __mod_zone_page_state(zone, NR_INACTIVE_ANON,
1385 -count[LRU_INACTIVE_ANON]);
1386
1387 *nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
1388 *nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
1389 __mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon);
1390 __mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file);
1391
1392 reclaim_stat->recent_scanned[0] += *nr_anon;
1393 reclaim_stat->recent_scanned[1] += *nr_file;
1394}
1395
1396/*
1397 * Returns true if the caller should wait to clean dirty/writeback pages.
1398 *
1399 * If we are direct reclaiming for contiguous pages and we do not reclaim
1400 * everything in the list, try again and wait for writeback IO to complete.
1401 * This will stall high-order allocations noticeably. Only do that when really
1402 * need to free the pages under high memory pressure.
1403 */
1404static inline bool should_reclaim_stall(unsigned long nr_taken,
1405 unsigned long nr_freed,
1406 int priority,
1407 struct scan_control *sc)
1408{
1409 int lumpy_stall_priority;
1410
1411 /* kswapd should not stall on sync IO */
1412 if (current_is_kswapd())
1413 return false;
1414
1415 /* Only stall on lumpy reclaim */
1416 if (sc->reclaim_mode & RECLAIM_MODE_SINGLE)
1417 return false;
1418
1419 /* If we have relaimed everything on the isolated list, no stall */
1420 if (nr_freed == nr_taken)
1421 return false;
1422
1423 /*
1424 * For high-order allocations, there are two stall thresholds.
1425 * High-cost allocations stall immediately where as lower
1426 * order allocations such as stacks require the scanning
1427 * priority to be much higher before stalling.
1428 */
1429 if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
1430 lumpy_stall_priority = DEF_PRIORITY;
1431 else
1432 lumpy_stall_priority = DEF_PRIORITY / 3;
1433
1434 return priority <= lumpy_stall_priority;
1435}
1436
1437/*
1438 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
1439 * of reclaimed pages
1440 */
1441static noinline_for_stack unsigned long
1442shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
1443 struct scan_control *sc, int priority, int file)
1444{
1445 LIST_HEAD(page_list);
1446 unsigned long nr_scanned;
1447 unsigned long nr_reclaimed = 0;
1448 unsigned long nr_taken;
1449 unsigned long nr_anon;
1450 unsigned long nr_file;
1451
1452 while (unlikely(too_many_isolated(zone, file, sc))) {
1453 congestion_wait(BLK_RW_ASYNC, HZ/10);
1454
1455 /* We are about to die and free our memory. Return now. */
1456 if (fatal_signal_pending(current))
1457 return SWAP_CLUSTER_MAX;
1458 }
1459
1460 set_reclaim_mode(priority, sc, false);
1461 lru_add_drain();
1462 spin_lock_irq(&zone->lru_lock);
1463
1464 if (scanning_global_lru(sc)) {
1465 nr_taken = isolate_pages_global(nr_to_scan,
1466 &page_list, &nr_scanned, sc->order,
1467 sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ?
1468 ISOLATE_BOTH : ISOLATE_INACTIVE,
1469 zone, 0, file);
1470 zone->pages_scanned += nr_scanned;
1471 if (current_is_kswapd())
1472 __count_zone_vm_events(PGSCAN_KSWAPD, zone,
1473 nr_scanned);
1474 else
1475 __count_zone_vm_events(PGSCAN_DIRECT, zone,
1476 nr_scanned);
1477 } else {
1478 nr_taken = mem_cgroup_isolate_pages(nr_to_scan,
1479 &page_list, &nr_scanned, sc->order,
1480 sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ?
1481 ISOLATE_BOTH : ISOLATE_INACTIVE,
1482 zone, sc->mem_cgroup,
1483 0, file);
1484 /*
1485 * mem_cgroup_isolate_pages() keeps track of
1486 * scanned pages on its own.
1487 */
1488 }
1489
1490 if (nr_taken == 0) {
1491 spin_unlock_irq(&zone->lru_lock);
1492 return 0;
1493 }
1494
1495 update_isolated_counts(zone, sc, &nr_anon, &nr_file, &page_list);
1496
1497 spin_unlock_irq(&zone->lru_lock);
1498
1499 nr_reclaimed = shrink_page_list(&page_list, zone, sc);
1500
1501 /* Check if we should syncronously wait for writeback */
1502 if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
1503 set_reclaim_mode(priority, sc, true);
1504 nr_reclaimed += shrink_page_list(&page_list, zone, sc);
1505 }
1506
1507 local_irq_disable();
1508 if (current_is_kswapd())
1509 __count_vm_events(KSWAPD_STEAL, nr_reclaimed);
1510 __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
1511
1512 putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list);
1513
1514 trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
1515 zone_idx(zone),
1516 nr_scanned, nr_reclaimed,
1517 priority,
1518 trace_shrink_flags(file, sc->reclaim_mode));
1519 return nr_reclaimed;
1520}
1521
1522/*
1523 * This moves pages from the active list to the inactive list.
1524 *
1525 * We move them the other way if the page is referenced by one or more
1526 * processes, from rmap.
1527 *
1528 * If the pages are mostly unmapped, the processing is fast and it is
1529 * appropriate to hold zone->lru_lock across the whole operation. But if
1530 * the pages are mapped, the processing is slow (page_referenced()) so we
1531 * should drop zone->lru_lock around each page. It's impossible to balance
1532 * this, so instead we remove the pages from the LRU while processing them.
1533 * It is safe to rely on PG_active against the non-LRU pages in here because
1534 * nobody will play with that bit on a non-LRU page.
1535 *
1536 * The downside is that we have to touch page->_count against each page.
1537 * But we had to alter page->flags anyway.
1538 */
1539
1540static void move_active_pages_to_lru(struct zone *zone,
1541 struct list_head *list,
1542 enum lru_list lru)
1543{
1544 unsigned long pgmoved = 0;
1545 struct pagevec pvec;
1546 struct page *page;
1547
1548 pagevec_init(&pvec, 1);
1549
1550 while (!list_empty(list)) {
1551 page = lru_to_page(list);
1552
1553 VM_BUG_ON(PageLRU(page));
1554 SetPageLRU(page);
1555
1556 list_move(&page->lru, &zone->lru[lru].list);
1557 mem_cgroup_add_lru_list(page, lru);
1558 pgmoved += hpage_nr_pages(page);
1559
1560 if (!pagevec_add(&pvec, page) || list_empty(list)) {
1561 spin_unlock_irq(&zone->lru_lock);
1562 if (buffer_heads_over_limit)
1563 pagevec_strip(&pvec);
1564 __pagevec_release(&pvec);
1565 spin_lock_irq(&zone->lru_lock);
1566 }
1567 }
1568 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1569 if (!is_active_lru(lru))
1570 __count_vm_events(PGDEACTIVATE, pgmoved);
1571}
1572
1573static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1574 struct scan_control *sc, int priority, int file)
1575{
1576 unsigned long nr_taken;
1577 unsigned long pgscanned;
1578 unsigned long vm_flags;
1579 LIST_HEAD(l_hold); /* The pages which were snipped off */
1580 LIST_HEAD(l_active);
1581 LIST_HEAD(l_inactive);
1582 struct page *page;
1583 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1584 unsigned long nr_rotated = 0;
1585
1586 lru_add_drain();
1587 spin_lock_irq(&zone->lru_lock);
1588 if (scanning_global_lru(sc)) {
1589 nr_taken = isolate_pages_global(nr_pages, &l_hold,
1590 &pgscanned, sc->order,
1591 ISOLATE_ACTIVE, zone,
1592 1, file);
1593 zone->pages_scanned += pgscanned;
1594 } else {
1595 nr_taken = mem_cgroup_isolate_pages(nr_pages, &l_hold,
1596 &pgscanned, sc->order,
1597 ISOLATE_ACTIVE, zone,
1598 sc->mem_cgroup, 1, file);
1599 /*
1600 * mem_cgroup_isolate_pages() keeps track of
1601 * scanned pages on its own.
1602 */
1603 }
1604
1605 reclaim_stat->recent_scanned[file] += nr_taken;
1606
1607 __count_zone_vm_events(PGREFILL, zone, pgscanned);
1608 if (file)
1609 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken);
1610 else
1611 __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken);
1612 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
1613 spin_unlock_irq(&zone->lru_lock);
1614
1615 while (!list_empty(&l_hold)) {
1616 cond_resched();
1617 page = lru_to_page(&l_hold);
1618 list_del(&page->lru);
1619
1620 if (unlikely(!page_evictable(page, NULL))) {
1621 putback_lru_page(page);
1622 continue;
1623 }
1624
1625 if (page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
1626 nr_rotated += hpage_nr_pages(page);
1627 /*
1628 * Identify referenced, file-backed active pages and
1629 * give them one more trip around the active list. So
1630 * that executable code get better chances to stay in
1631 * memory under moderate memory pressure. Anon pages
1632 * are not likely to be evicted by use-once streaming
1633 * IO, plus JVM can create lots of anon VM_EXEC pages,
1634 * so we ignore them here.
1635 */
1636 if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
1637 list_add(&page->lru, &l_active);
1638 continue;
1639 }
1640 }
1641
1642 ClearPageActive(page); /* we are de-activating */
1643 list_add(&page->lru, &l_inactive);
1644 }
1645
1646 /*
1647 * Move pages back to the lru list.
1648 */
1649 spin_lock_irq(&zone->lru_lock);
1650 /*
1651 * Count referenced pages from currently used mappings as rotated,
1652 * even though only some of them are actually re-activated. This
1653 * helps balance scan pressure between file and anonymous pages in
1654 * get_scan_ratio.
1655 */
1656 reclaim_stat->recent_rotated[file] += nr_rotated;
1657
1658 move_active_pages_to_lru(zone, &l_active,
1659 LRU_ACTIVE + file * LRU_FILE);
1660 move_active_pages_to_lru(zone, &l_inactive,
1661 LRU_BASE + file * LRU_FILE);
1662 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
1663 spin_unlock_irq(&zone->lru_lock);
1664}
1665
1666#ifdef CONFIG_SWAP
1667static int inactive_anon_is_low_global(struct zone *zone)
1668{
1669 unsigned long active, inactive;
1670
1671 active = zone_page_state(zone, NR_ACTIVE_ANON);
1672 inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1673
1674 if (inactive * zone->inactive_ratio < active)
1675 return 1;
1676
1677 return 0;
1678}
1679
1680/**
1681 * inactive_anon_is_low - check if anonymous pages need to be deactivated
1682 * @zone: zone to check
1683 * @sc: scan control of this context
1684 *
1685 * Returns true if the zone does not have enough inactive anon pages,
1686 * meaning some active anon pages need to be deactivated.
1687 */
1688static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
1689{
1690 int low;
1691
1692 /*
1693 * If we don't have swap space, anonymous page deactivation
1694 * is pointless.
1695 */
1696 if (!total_swap_pages)
1697 return 0;
1698
1699 if (scanning_global_lru(sc))
1700 low = inactive_anon_is_low_global(zone);
1701 else
1702 low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup);
1703 return low;
1704}
1705#else
1706static inline int inactive_anon_is_low(struct zone *zone,
1707 struct scan_control *sc)
1708{
1709 return 0;
1710}
1711#endif
1712
1713static int inactive_file_is_low_global(struct zone *zone)
1714{
1715 unsigned long active, inactive;
1716
1717 active = zone_page_state(zone, NR_ACTIVE_FILE);
1718 inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1719
1720 return (active > inactive);
1721}
1722
1723/**
1724 * inactive_file_is_low - check if file pages need to be deactivated
1725 * @zone: zone to check
1726 * @sc: scan control of this context
1727 *
1728 * When the system is doing streaming IO, memory pressure here
1729 * ensures that active file pages get deactivated, until more
1730 * than half of the file pages are on the inactive list.
1731 *
1732 * Once we get to that situation, protect the system's working
1733 * set from being evicted by disabling active file page aging.
1734 *
1735 * This uses a different ratio than the anonymous pages, because
1736 * the page cache uses a use-once replacement algorithm.
1737 */
1738static int inactive_file_is_low(struct zone *zone, struct scan_control *sc)
1739{
1740 int low;
1741
1742 if (scanning_global_lru(sc))
1743 low = inactive_file_is_low_global(zone);
1744 else
1745 low = mem_cgroup_inactive_file_is_low(sc->mem_cgroup);
1746 return low;
1747}
1748
1749static int inactive_list_is_low(struct zone *zone, struct scan_control *sc,
1750 int file)
1751{
1752 if (file)
1753 return inactive_file_is_low(zone, sc);
1754 else
1755 return inactive_anon_is_low(zone, sc);
1756}
1757
1758static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1759 struct zone *zone, struct scan_control *sc, int priority)
1760{
1761 int file = is_file_lru(lru);
1762
1763 if (is_active_lru(lru)) {
1764 if (inactive_list_is_low(zone, sc, file))
1765 shrink_active_list(nr_to_scan, zone, sc, priority, file);
1766 return 0;
1767 }
1768
1769 return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
1770}
1771
1772static int vmscan_swappiness(struct scan_control *sc)
1773{
1774 if (scanning_global_lru(sc))
1775 return vm_swappiness;
1776 return mem_cgroup_swappiness(sc->mem_cgroup);
1777}
1778
1779/*
1780 * Determine how aggressively the anon and file LRU lists should be
1781 * scanned. The relative value of each set of LRU lists is determined
1782 * by looking at the fraction of the pages scanned we did rotate back
1783 * onto the active list instead of evict.
1784 *
1785 * nr[0] = anon pages to scan; nr[1] = file pages to scan
1786 */
1787static void get_scan_count(struct zone *zone, struct scan_control *sc,
1788 unsigned long *nr, int priority)
1789{
1790 unsigned long anon, file, free;
1791 unsigned long anon_prio, file_prio;
1792 unsigned long ap, fp;
1793 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1794 u64 fraction[2], denominator;
1795 enum lru_list l;
1796 int noswap = 0;
1797 bool force_scan = false;
1798 unsigned long nr_force_scan[2];
1799
1800 /* kswapd does zone balancing and needs to scan this zone */
1801 if (scanning_global_lru(sc) && current_is_kswapd())
1802 force_scan = true;
1803 /* memcg may have small limit and need to avoid priority drop */
1804 if (!scanning_global_lru(sc))
1805 force_scan = true;
1806
1807 /* If we have no swap space, do not bother scanning anon pages. */
1808 if (!sc->may_swap || (nr_swap_pages <= 0)) {
1809 noswap = 1;
1810 fraction[0] = 0;
1811 fraction[1] = 1;
1812 denominator = 1;
1813 nr_force_scan[0] = 0;
1814 nr_force_scan[1] = SWAP_CLUSTER_MAX;
1815 goto out;
1816 }
1817
1818 anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
1819 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
1820 file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
1821 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
1822
1823 if (scanning_global_lru(sc)) {
1824 free = zone_page_state(zone, NR_FREE_PAGES);
1825 /* If we have very few page cache pages,
1826 force-scan anon pages. */
1827 if (unlikely(file + free <= high_wmark_pages(zone))) {
1828 fraction[0] = 1;
1829 fraction[1] = 0;
1830 denominator = 1;
1831 nr_force_scan[0] = SWAP_CLUSTER_MAX;
1832 nr_force_scan[1] = 0;
1833 goto out;
1834 }
1835 }
1836
1837 /*
1838 * With swappiness at 100, anonymous and file have the same priority.
1839 * This scanning priority is essentially the inverse of IO cost.
1840 */
1841 anon_prio = vmscan_swappiness(sc);
1842 file_prio = 200 - vmscan_swappiness(sc);
1843
1844 /*
1845 * OK, so we have swap space and a fair amount of page cache
1846 * pages. We use the recently rotated / recently scanned
1847 * ratios to determine how valuable each cache is.
1848 *
1849 * Because workloads change over time (and to avoid overflow)
1850 * we keep these statistics as a floating average, which ends
1851 * up weighing recent references more than old ones.
1852 *
1853 * anon in [0], file in [1]
1854 */
1855 spin_lock_irq(&zone->lru_lock);
1856 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
1857 reclaim_stat->recent_scanned[0] /= 2;
1858 reclaim_stat->recent_rotated[0] /= 2;
1859 }
1860
1861 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
1862 reclaim_stat->recent_scanned[1] /= 2;
1863 reclaim_stat->recent_rotated[1] /= 2;
1864 }
1865
1866 /*
1867 * The amount of pressure on anon vs file pages is inversely
1868 * proportional to the fraction of recently scanned pages on
1869 * each list that were recently referenced and in active use.
1870 */
1871 ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
1872 ap /= reclaim_stat->recent_rotated[0] + 1;
1873
1874 fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
1875 fp /= reclaim_stat->recent_rotated[1] + 1;
1876 spin_unlock_irq(&zone->lru_lock);
1877
1878 fraction[0] = ap;
1879 fraction[1] = fp;
1880 denominator = ap + fp + 1;
1881 if (force_scan) {
1882 unsigned long scan = SWAP_CLUSTER_MAX;
1883 nr_force_scan[0] = div64_u64(scan * ap, denominator);
1884 nr_force_scan[1] = div64_u64(scan * fp, denominator);
1885 }
1886out:
1887 for_each_evictable_lru(l) {
1888 int file = is_file_lru(l);
1889 unsigned long scan;
1890
1891 scan = zone_nr_lru_pages(zone, sc, l);
1892 if (priority || noswap) {
1893 scan >>= priority;
1894 scan = div64_u64(scan * fraction[file], denominator);
1895 }
1896
1897 /*
1898 * If zone is small or memcg is small, nr[l] can be 0.
1899 * This results no-scan on this priority and priority drop down.
1900 * For global direct reclaim, it can visit next zone and tend
1901 * not to have problems. For global kswapd, it's for zone
1902 * balancing and it need to scan a small amounts. When using
1903 * memcg, priority drop can cause big latency. So, it's better
1904 * to scan small amount. See may_noscan above.
1905 */
1906 if (!scan && force_scan)
1907 scan = nr_force_scan[file];
1908 nr[l] = scan;
1909 }
1910}
1911
1912/*
1913 * Reclaim/compaction depends on a number of pages being freed. To avoid
1914 * disruption to the system, a small number of order-0 pages continue to be
1915 * rotated and reclaimed in the normal fashion. However, by the time we get
1916 * back to the allocator and call try_to_compact_zone(), we ensure that
1917 * there are enough free pages for it to be likely successful
1918 */
1919static inline bool should_continue_reclaim(struct zone *zone,
1920 unsigned long nr_reclaimed,
1921 unsigned long nr_scanned,
1922 struct scan_control *sc)
1923{
1924 unsigned long pages_for_compaction;
1925 unsigned long inactive_lru_pages;
1926
1927 /* If not in reclaim/compaction mode, stop */
1928 if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION))
1929 return false;
1930
1931 /* Consider stopping depending on scan and reclaim activity */
1932 if (sc->gfp_mask & __GFP_REPEAT) {
1933 /*
1934 * For __GFP_REPEAT allocations, stop reclaiming if the
1935 * full LRU list has been scanned and we are still failing
1936 * to reclaim pages. This full LRU scan is potentially
1937 * expensive but a __GFP_REPEAT caller really wants to succeed
1938 */
1939 if (!nr_reclaimed && !nr_scanned)
1940 return false;
1941 } else {
1942 /*
1943 * For non-__GFP_REPEAT allocations which can presumably
1944 * fail without consequence, stop if we failed to reclaim
1945 * any pages from the last SWAP_CLUSTER_MAX number of
1946 * pages that were scanned. This will return to the
1947 * caller faster at the risk reclaim/compaction and
1948 * the resulting allocation attempt fails
1949 */
1950 if (!nr_reclaimed)
1951 return false;
1952 }
1953
1954 /*
1955 * If we have not reclaimed enough pages for compaction and the
1956 * inactive lists are large enough, continue reclaiming
1957 */
1958 pages_for_compaction = (2UL << sc->order);
1959 inactive_lru_pages = zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON) +
1960 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
1961 if (sc->nr_reclaimed < pages_for_compaction &&
1962 inactive_lru_pages > pages_for_compaction)
1963 return true;
1964
1965 /* If compaction would go ahead or the allocation would succeed, stop */
1966 switch (compaction_suitable(zone, sc->order)) {
1967 case COMPACT_PARTIAL:
1968 case COMPACT_CONTINUE:
1969 return false;
1970 default:
1971 return true;
1972 }
1973}
1974
1975/*
1976 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
1977 */
1978static void shrink_zone(int priority, struct zone *zone,
1979 struct scan_control *sc)
1980{
1981 unsigned long nr[NR_LRU_LISTS];
1982 unsigned long nr_to_scan;
1983 enum lru_list l;
1984 unsigned long nr_reclaimed, nr_scanned;
1985 unsigned long nr_to_reclaim = sc->nr_to_reclaim;
1986
1987restart:
1988 nr_reclaimed = 0;
1989 nr_scanned = sc->nr_scanned;
1990 get_scan_count(zone, sc, nr, priority);
1991
1992 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
1993 nr[LRU_INACTIVE_FILE]) {
1994 for_each_evictable_lru(l) {
1995 if (nr[l]) {
1996 nr_to_scan = min_t(unsigned long,
1997 nr[l], SWAP_CLUSTER_MAX);
1998 nr[l] -= nr_to_scan;
1999
2000 nr_reclaimed += shrink_list(l, nr_to_scan,
2001 zone, sc, priority);
2002 }
2003 }
2004 /*
2005 * On large memory systems, scan >> priority can become
2006 * really large. This is fine for the starting priority;
2007 * we want to put equal scanning pressure on each zone.
2008 * However, if the VM has a harder time of freeing pages,
2009 * with multiple processes reclaiming pages, the total
2010 * freeing target can get unreasonably large.
2011 */
2012 if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
2013 break;
2014 }
2015 sc->nr_reclaimed += nr_reclaimed;
2016
2017 /*
2018 * Even if we did not try to evict anon pages at all, we want to
2019 * rebalance the anon lru active/inactive ratio.
2020 */
2021 if (inactive_anon_is_low(zone, sc))
2022 shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
2023
2024 /* reclaim/compaction might need reclaim to continue */
2025 if (should_continue_reclaim(zone, nr_reclaimed,
2026 sc->nr_scanned - nr_scanned, sc))
2027 goto restart;
2028
2029 throttle_vm_writeout(sc->gfp_mask);
2030}
2031
2032/*
2033 * This is the direct reclaim path, for page-allocating processes. We only
2034 * try to reclaim pages from zones which will satisfy the caller's allocation
2035 * request.
2036 *
2037 * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
2038 * Because:
2039 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
2040 * allocation or
2041 * b) The target zone may be at high_wmark_pages(zone) but the lower zones
2042 * must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
2043 * zone defense algorithm.
2044 *
2045 * If a zone is deemed to be full of pinned pages then just give it a light
2046 * scan then give up on it.
2047 */
2048static void shrink_zones(int priority, struct zonelist *zonelist,
2049 struct scan_control *sc)
2050{
2051 struct zoneref *z;
2052 struct zone *zone;
2053 unsigned long nr_soft_reclaimed;
2054 unsigned long nr_soft_scanned;
2055
2056 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2057 gfp_zone(sc->gfp_mask), sc->nodemask) {
2058 if (!populated_zone(zone))
2059 continue;
2060 /*
2061 * Take care memory controller reclaiming has small influence
2062 * to global LRU.
2063 */
2064 if (scanning_global_lru(sc)) {
2065 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2066 continue;
2067 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
2068 continue; /* Let kswapd poll it */
2069 /*
2070 * This steals pages from memory cgroups over softlimit
2071 * and returns the number of reclaimed pages and
2072 * scanned pages. This works for global memory pressure
2073 * and balancing, not for a memcg's limit.
2074 */
2075 nr_soft_scanned = 0;
2076 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
2077 sc->order, sc->gfp_mask,
2078 &nr_soft_scanned);
2079 sc->nr_reclaimed += nr_soft_reclaimed;
2080 sc->nr_scanned += nr_soft_scanned;
2081 /* need some check for avoid more shrink_zone() */
2082 }
2083
2084 shrink_zone(priority, zone, sc);
2085 }
2086}
2087
2088static bool zone_reclaimable(struct zone *zone)
2089{
2090 return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
2091}
2092
2093/* All zones in zonelist are unreclaimable? */
2094static bool all_unreclaimable(struct zonelist *zonelist,
2095 struct scan_control *sc)
2096{
2097 struct zoneref *z;
2098 struct zone *zone;
2099
2100 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2101 gfp_zone(sc->gfp_mask), sc->nodemask) {
2102 if (!populated_zone(zone))
2103 continue;
2104 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2105 continue;
2106 if (!zone->all_unreclaimable)
2107 return false;
2108 }
2109
2110 return true;
2111}
2112
2113/*
2114 * This is the main entry point to direct page reclaim.
2115 *
2116 * If a full scan of the inactive list fails to free enough memory then we
2117 * are "out of memory" and something needs to be killed.
2118 *
2119 * If the caller is !__GFP_FS then the probability of a failure is reasonably
2120 * high - the zone may be full of dirty or under-writeback pages, which this
2121 * caller can't do much about. We kick the writeback threads and take explicit
2122 * naps in the hope that some of these pages can be written. But if the
2123 * allocating task holds filesystem locks which prevent writeout this might not
2124 * work, and the allocation attempt will fail.
2125 *
2126 * returns: 0, if no pages reclaimed
2127 * else, the number of pages reclaimed
2128 */
2129static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2130 struct scan_control *sc,
2131 struct shrink_control *shrink)
2132{
2133 int priority;
2134 unsigned long total_scanned = 0;
2135 struct reclaim_state *reclaim_state = current->reclaim_state;
2136 struct zoneref *z;
2137 struct zone *zone;
2138 unsigned long writeback_threshold;
2139
2140 get_mems_allowed();
2141 delayacct_freepages_start();
2142
2143 if (scanning_global_lru(sc))
2144 count_vm_event(ALLOCSTALL);
2145
2146 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
2147 sc->nr_scanned = 0;
2148 if (!priority)
2149 disable_swap_token(sc->mem_cgroup);
2150 shrink_zones(priority, zonelist, sc);
2151 /*
2152 * Don't shrink slabs when reclaiming memory from
2153 * over limit cgroups
2154 */
2155 if (scanning_global_lru(sc)) {
2156 unsigned long lru_pages = 0;
2157 for_each_zone_zonelist(zone, z, zonelist,
2158 gfp_zone(sc->gfp_mask)) {
2159 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2160 continue;
2161
2162 lru_pages += zone_reclaimable_pages(zone);
2163 }
2164
2165 shrink_slab(shrink, sc->nr_scanned, lru_pages);
2166 if (reclaim_state) {
2167 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2168 reclaim_state->reclaimed_slab = 0;
2169 }
2170 }
2171 total_scanned += sc->nr_scanned;
2172 if (sc->nr_reclaimed >= sc->nr_to_reclaim)
2173 goto out;
2174
2175 /*
2176 * Try to write back as many pages as we just scanned. This
2177 * tends to cause slow streaming writers to write data to the
2178 * disk smoothly, at the dirtying rate, which is nice. But
2179 * that's undesirable in laptop mode, where we *want* lumpy
2180 * writeout. So in laptop mode, write out the whole world.
2181 */
2182 writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
2183 if (total_scanned > writeback_threshold) {
2184 wakeup_flusher_threads(laptop_mode ? 0 : total_scanned);
2185 sc->may_writepage = 1;
2186 }
2187
2188 /* Take a nap, wait for some writeback to complete */
2189 if (!sc->hibernation_mode && sc->nr_scanned &&
2190 priority < DEF_PRIORITY - 2) {
2191 struct zone *preferred_zone;
2192
2193 first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
2194 &cpuset_current_mems_allowed,
2195 &preferred_zone);
2196 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
2197 }
2198 }
2199
2200out:
2201 delayacct_freepages_end();
2202 put_mems_allowed();
2203
2204 if (sc->nr_reclaimed)
2205 return sc->nr_reclaimed;
2206
2207 /*
2208 * As hibernation is going on, kswapd is freezed so that it can't mark
2209 * the zone into all_unreclaimable. Thus bypassing all_unreclaimable
2210 * check.
2211 */
2212 if (oom_killer_disabled)
2213 return 0;
2214
2215 /* top priority shrink_zones still had more to do? don't OOM, then */
2216 if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc))
2217 return 1;
2218
2219 return 0;
2220}
2221
2222unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
2223 gfp_t gfp_mask, nodemask_t *nodemask)
2224{
2225 unsigned long nr_reclaimed;
2226 struct scan_control sc = {
2227 .gfp_mask = gfp_mask,
2228 .may_writepage = !laptop_mode,
2229 .nr_to_reclaim = SWAP_CLUSTER_MAX,
2230 .may_unmap = 1,
2231 .may_swap = 1,
2232 .order = order,
2233 .mem_cgroup = NULL,
2234 .nodemask = nodemask,
2235 };
2236 struct shrink_control shrink = {
2237 .gfp_mask = sc.gfp_mask,
2238 };
2239
2240 trace_mm_vmscan_direct_reclaim_begin(order,
2241 sc.may_writepage,
2242 gfp_mask);
2243
2244 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2245
2246 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
2247
2248 return nr_reclaimed;
2249}
2250
2251#ifdef CONFIG_CGROUP_MEM_RES_CTLR
2252
2253unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
2254 gfp_t gfp_mask, bool noswap,
2255 struct zone *zone,
2256 unsigned long *nr_scanned)
2257{
2258 struct scan_control sc = {
2259 .nr_scanned = 0,
2260 .nr_to_reclaim = SWAP_CLUSTER_MAX,
2261 .may_writepage = !laptop_mode,
2262 .may_unmap = 1,
2263 .may_swap = !noswap,
2264 .order = 0,
2265 .mem_cgroup = mem,
2266 };
2267
2268 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2269 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
2270
2271 trace_mm_vmscan_memcg_softlimit_reclaim_begin(0,
2272 sc.may_writepage,
2273 sc.gfp_mask);
2274
2275 /*
2276 * NOTE: Although we can get the priority field, using it
2277 * here is not a good idea, since it limits the pages we can scan.
2278 * if we don't reclaim here, the shrink_zone from balance_pgdat
2279 * will pick up pages from other mem cgroup's as well. We hack
2280 * the priority and make it zero.
2281 */
2282 shrink_zone(0, zone, &sc);
2283
2284 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
2285
2286 *nr_scanned = sc.nr_scanned;
2287 return sc.nr_reclaimed;
2288}
2289
2290unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
2291 gfp_t gfp_mask,
2292 bool noswap)
2293{
2294 struct zonelist *zonelist;
2295 unsigned long nr_reclaimed;
2296 int nid;
2297 struct scan_control sc = {
2298 .may_writepage = !laptop_mode,
2299 .may_unmap = 1,
2300 .may_swap = !noswap,
2301 .nr_to_reclaim = SWAP_CLUSTER_MAX,
2302 .order = 0,
2303 .mem_cgroup = mem_cont,
2304 .nodemask = NULL, /* we don't care the placement */
2305 .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2306 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
2307 };
2308 struct shrink_control shrink = {
2309 .gfp_mask = sc.gfp_mask,
2310 };
2311
2312 /*
2313 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
2314 * take care of from where we get pages. So the node where we start the
2315 * scan does not need to be the current node.
2316 */
2317 nid = mem_cgroup_select_victim_node(mem_cont);
2318
2319 zonelist = NODE_DATA(nid)->node_zonelists;
2320
2321 trace_mm_vmscan_memcg_reclaim_begin(0,
2322 sc.may_writepage,
2323 sc.gfp_mask);
2324
2325 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2326
2327 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
2328
2329 return nr_reclaimed;
2330}
2331#endif
2332
2333/*
2334 * pgdat_balanced is used when checking if a node is balanced for high-order
2335 * allocations. Only zones that meet watermarks and are in a zone allowed
2336 * by the callers classzone_idx are added to balanced_pages. The total of
2337 * balanced pages must be at least 25% of the zones allowed by classzone_idx
2338 * for the node to be considered balanced. Forcing all zones to be balanced
2339 * for high orders can cause excessive reclaim when there are imbalanced zones.
2340 * The choice of 25% is due to
2341 * o a 16M DMA zone that is balanced will not balance a zone on any
2342 * reasonable sized machine
2343 * o On all other machines, the top zone must be at least a reasonable
2344 * percentage of the middle zones. For example, on 32-bit x86, highmem
2345 * would need to be at least 256M for it to be balance a whole node.
2346 * Similarly, on x86-64 the Normal zone would need to be at least 1G
2347 * to balance a node on its own. These seemed like reasonable ratios.
2348 */
2349static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages,
2350 int classzone_idx)
2351{
2352 unsigned long present_pages = 0;
2353 int i;
2354
2355 for (i = 0; i <= classzone_idx; i++)
2356 present_pages += pgdat->node_zones[i].present_pages;
2357
2358 /* A special case here: if zone has no page, we think it's balanced */
2359 return balanced_pages >= (present_pages >> 2);
2360}
2361
2362/* is kswapd sleeping prematurely? */
2363static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
2364 int classzone_idx)
2365{
2366 int i;
2367 unsigned long balanced = 0;
2368 bool all_zones_ok = true;
2369
2370 /* If a direct reclaimer woke kswapd within HZ/10, it's premature */
2371 if (remaining)
2372 return true;
2373
2374 /* Check the watermark levels */
2375 for (i = 0; i <= classzone_idx; i++) {
2376 struct zone *zone = pgdat->node_zones + i;
2377
2378 if (!populated_zone(zone))
2379 continue;
2380
2381 /*
2382 * balance_pgdat() skips over all_unreclaimable after
2383 * DEF_PRIORITY. Effectively, it considers them balanced so
2384 * they must be considered balanced here as well if kswapd
2385 * is to sleep
2386 */
2387 if (zone->all_unreclaimable) {
2388 balanced += zone->present_pages;
2389 continue;
2390 }
2391
2392 if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
2393 i, 0))
2394 all_zones_ok = false;
2395 else
2396 balanced += zone->present_pages;
2397 }
2398
2399 /*
2400 * For high-order requests, the balanced zones must contain at least
2401 * 25% of the nodes pages for kswapd to sleep. For order-0, all zones
2402 * must be balanced
2403 */
2404 if (order)
2405 return !pgdat_balanced(pgdat, balanced, classzone_idx);
2406 else
2407 return !all_zones_ok;
2408}
2409
2410/*
2411 * For kswapd, balance_pgdat() will work across all this node's zones until
2412 * they are all at high_wmark_pages(zone).
2413 *
2414 * Returns the final order kswapd was reclaiming at
2415 *
2416 * There is special handling here for zones which are full of pinned pages.
2417 * This can happen if the pages are all mlocked, or if they are all used by
2418 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
2419 * What we do is to detect the case where all pages in the zone have been
2420 * scanned twice and there has been zero successful reclaim. Mark the zone as
2421 * dead and from now on, only perform a short scan. Basically we're polling
2422 * the zone for when the problem goes away.
2423 *
2424 * kswapd scans the zones in the highmem->normal->dma direction. It skips
2425 * zones which have free_pages > high_wmark_pages(zone), but once a zone is
2426 * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
2427 * lower zones regardless of the number of free pages in the lower zones. This
2428 * interoperates with the page allocator fallback scheme to ensure that aging
2429 * of pages is balanced across the zones.
2430 */
2431static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2432 int *classzone_idx)
2433{
2434 int all_zones_ok;
2435 unsigned long balanced;
2436 int priority;
2437 int i;
2438 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
2439 unsigned long total_scanned;
2440 struct reclaim_state *reclaim_state = current->reclaim_state;
2441 unsigned long nr_soft_reclaimed;
2442 unsigned long nr_soft_scanned;
2443 struct scan_control sc = {
2444 .gfp_mask = GFP_KERNEL,
2445 .may_unmap = 1,
2446 .may_swap = 1,
2447 /*
2448 * kswapd doesn't want to be bailed out while reclaim. because
2449 * we want to put equal scanning pressure on each zone.
2450 */
2451 .nr_to_reclaim = ULONG_MAX,
2452 .order = order,
2453 .mem_cgroup = NULL,
2454 };
2455 struct shrink_control shrink = {
2456 .gfp_mask = sc.gfp_mask,
2457 };
2458loop_again:
2459 total_scanned = 0;
2460 sc.nr_reclaimed = 0;
2461 sc.may_writepage = !laptop_mode;
2462 count_vm_event(PAGEOUTRUN);
2463
2464 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
2465 unsigned long lru_pages = 0;
2466 int has_under_min_watermark_zone = 0;
2467
2468 /* The swap token gets in the way of swapout... */
2469 if (!priority)
2470 disable_swap_token(NULL);
2471
2472 all_zones_ok = 1;
2473 balanced = 0;
2474
2475 /*
2476 * Scan in the highmem->dma direction for the highest
2477 * zone which needs scanning
2478 */
2479 for (i = pgdat->nr_zones - 1; i >= 0; i--) {
2480 struct zone *zone = pgdat->node_zones + i;
2481
2482 if (!populated_zone(zone))
2483 continue;
2484
2485 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
2486 continue;
2487
2488 /*
2489 * Do some background aging of the anon list, to give
2490 * pages a chance to be referenced before reclaiming.
2491 */
2492 if (inactive_anon_is_low(zone, &sc))
2493 shrink_active_list(SWAP_CLUSTER_MAX, zone,
2494 &sc, priority, 0);
2495
2496 if (!zone_watermark_ok_safe(zone, order,
2497 high_wmark_pages(zone), 0, 0)) {
2498 end_zone = i;
2499 break;
2500 } else {
2501 /* If balanced, clear the congested flag */
2502 zone_clear_flag(zone, ZONE_CONGESTED);
2503 }
2504 }
2505 if (i < 0)
2506 goto out;
2507
2508 for (i = 0; i <= end_zone; i++) {
2509 struct zone *zone = pgdat->node_zones + i;
2510
2511 lru_pages += zone_reclaimable_pages(zone);
2512 }
2513
2514 /*
2515 * Now scan the zone in the dma->highmem direction, stopping
2516 * at the last zone which needs scanning.
2517 *
2518 * We do this because the page allocator works in the opposite
2519 * direction. This prevents the page allocator from allocating
2520 * pages behind kswapd's direction of progress, which would
2521 * cause too much scanning of the lower zones.
2522 */
2523 for (i = 0; i <= end_zone; i++) {
2524 struct zone *zone = pgdat->node_zones + i;
2525 int nr_slab;
2526 unsigned long balance_gap;
2527
2528 if (!populated_zone(zone))
2529 continue;
2530
2531 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
2532 continue;
2533
2534 sc.nr_scanned = 0;
2535
2536 nr_soft_scanned = 0;
2537 /*
2538 * Call soft limit reclaim before calling shrink_zone.
2539 */
2540 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
2541 order, sc.gfp_mask,
2542 &nr_soft_scanned);
2543 sc.nr_reclaimed += nr_soft_reclaimed;
2544 total_scanned += nr_soft_scanned;
2545
2546 /*
2547 * We put equal pressure on every zone, unless
2548 * one zone has way too many pages free
2549 * already. The "too many pages" is defined
2550 * as the high wmark plus a "gap" where the
2551 * gap is either the low watermark or 1%
2552 * of the zone, whichever is smaller.
2553 */
2554 balance_gap = min(low_wmark_pages(zone),
2555 (zone->present_pages +
2556 KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
2557 KSWAPD_ZONE_BALANCE_GAP_RATIO);
2558 if (!zone_watermark_ok_safe(zone, order,
2559 high_wmark_pages(zone) + balance_gap,
2560 end_zone, 0)) {
2561 shrink_zone(priority, zone, &sc);
2562
2563 reclaim_state->reclaimed_slab = 0;
2564 nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
2565 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
2566 total_scanned += sc.nr_scanned;
2567
2568 if (nr_slab == 0 && !zone_reclaimable(zone))
2569 zone->all_unreclaimable = 1;
2570 }
2571
2572 /*
2573 * If we've done a decent amount of scanning and
2574 * the reclaim ratio is low, start doing writepage
2575 * even in laptop mode
2576 */
2577 if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
2578 total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
2579 sc.may_writepage = 1;
2580
2581 if (zone->all_unreclaimable) {
2582 if (end_zone && end_zone == i)
2583 end_zone--;
2584 continue;
2585 }
2586
2587 if (!zone_watermark_ok_safe(zone, order,
2588 high_wmark_pages(zone), end_zone, 0)) {
2589 all_zones_ok = 0;
2590 /*
2591 * We are still under min water mark. This
2592 * means that we have a GFP_ATOMIC allocation
2593 * failure risk. Hurry up!
2594 */
2595 if (!zone_watermark_ok_safe(zone, order,
2596 min_wmark_pages(zone), end_zone, 0))
2597 has_under_min_watermark_zone = 1;
2598 } else {
2599 /*
2600 * If a zone reaches its high watermark,
2601 * consider it to be no longer congested. It's
2602 * possible there are dirty pages backed by
2603 * congested BDIs but as pressure is relieved,
2604 * spectulatively avoid congestion waits
2605 */
2606 zone_clear_flag(zone, ZONE_CONGESTED);
2607 if (i <= *classzone_idx)
2608 balanced += zone->present_pages;
2609 }
2610
2611 }
2612 if (all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))
2613 break; /* kswapd: all done */
2614 /*
2615 * OK, kswapd is getting into trouble. Take a nap, then take
2616 * another pass across the zones.
2617 */
2618 if (total_scanned && (priority < DEF_PRIORITY - 2)) {
2619 if (has_under_min_watermark_zone)
2620 count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
2621 else
2622 congestion_wait(BLK_RW_ASYNC, HZ/10);
2623 }
2624
2625 /*
2626 * We do this so kswapd doesn't build up large priorities for
2627 * example when it is freeing in parallel with allocators. It
2628 * matches the direct reclaim path behaviour in terms of impact
2629 * on zone->*_priority.
2630 */
2631 if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
2632 break;
2633 }
2634out:
2635
2636 /*
2637 * order-0: All zones must meet high watermark for a balanced node
2638 * high-order: Balanced zones must make up at least 25% of the node
2639 * for the node to be balanced
2640 */
2641 if (!(all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))) {
2642 cond_resched();
2643
2644 try_to_freeze();
2645
2646 /*
2647 * Fragmentation may mean that the system cannot be
2648 * rebalanced for high-order allocations in all zones.
2649 * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX,
2650 * it means the zones have been fully scanned and are still
2651 * not balanced. For high-order allocations, there is
2652 * little point trying all over again as kswapd may
2653 * infinite loop.
2654 *
2655 * Instead, recheck all watermarks at order-0 as they
2656 * are the most important. If watermarks are ok, kswapd will go
2657 * back to sleep. High-order users can still perform direct
2658 * reclaim if they wish.
2659 */
2660 if (sc.nr_reclaimed < SWAP_CLUSTER_MAX)
2661 order = sc.order = 0;
2662
2663 goto loop_again;
2664 }
2665
2666 /*
2667 * If kswapd was reclaiming at a higher order, it has the option of
2668 * sleeping without all zones being balanced. Before it does, it must
2669 * ensure that the watermarks for order-0 on *all* zones are met and
2670 * that the congestion flags are cleared. The congestion flag must
2671 * be cleared as kswapd is the only mechanism that clears the flag
2672 * and it is potentially going to sleep here.
2673 */
2674 if (order) {
2675 for (i = 0; i <= end_zone; i++) {
2676 struct zone *zone = pgdat->node_zones + i;
2677
2678 if (!populated_zone(zone))
2679 continue;
2680
2681 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
2682 continue;
2683
2684 /* Confirm the zone is balanced for order-0 */
2685 if (!zone_watermark_ok(zone, 0,
2686 high_wmark_pages(zone), 0, 0)) {
2687 order = sc.order = 0;
2688 goto loop_again;
2689 }
2690
2691 /* If balanced, clear the congested flag */
2692 zone_clear_flag(zone, ZONE_CONGESTED);
2693 }
2694 }
2695
2696 /*
2697 * Return the order we were reclaiming at so sleeping_prematurely()
2698 * makes a decision on the order we were last reclaiming at. However,
2699 * if another caller entered the allocator slow path while kswapd
2700 * was awake, order will remain at the higher level
2701 */
2702 *classzone_idx = end_zone;
2703 return order;
2704}
2705
2706static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
2707{
2708 long remaining = 0;
2709 DEFINE_WAIT(wait);
2710
2711 if (freezing(current) || kthread_should_stop())
2712 return;
2713
2714 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2715
2716 /* Try to sleep for a short interval */
2717 if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) {
2718 remaining = schedule_timeout(HZ/10);
2719 finish_wait(&pgdat->kswapd_wait, &wait);
2720 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2721 }
2722
2723 /*
2724 * After a short sleep, check if it was a premature sleep. If not, then
2725 * go fully to sleep until explicitly woken up.
2726 */
2727 if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) {
2728 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
2729
2730 /*
2731 * vmstat counters are not perfectly accurate and the estimated
2732 * value for counters such as NR_FREE_PAGES can deviate from the
2733 * true value by nr_online_cpus * threshold. To avoid the zone
2734 * watermarks being breached while under pressure, we reduce the
2735 * per-cpu vmstat threshold while kswapd is awake and restore
2736 * them before going back to sleep.
2737 */
2738 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
2739 schedule();
2740 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
2741 } else {
2742 if (remaining)
2743 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
2744 else
2745 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
2746 }
2747 finish_wait(&pgdat->kswapd_wait, &wait);
2748}
2749
2750/*
2751 * The background pageout daemon, started as a kernel thread
2752 * from the init process.
2753 *
2754 * This basically trickles out pages so that we have _some_
2755 * free memory available even if there is no other activity
2756 * that frees anything up. This is needed for things like routing
2757 * etc, where we otherwise might have all activity going on in
2758 * asynchronous contexts that cannot page things out.
2759 *
2760 * If there are applications that are active memory-allocators
2761 * (most normal use), this basically shouldn't matter.
2762 */
2763static int kswapd(void *p)
2764{
2765 unsigned long order, new_order;
2766 int classzone_idx, new_classzone_idx;
2767 pg_data_t *pgdat = (pg_data_t*)p;
2768 struct task_struct *tsk = current;
2769
2770 struct reclaim_state reclaim_state = {
2771 .reclaimed_slab = 0,
2772 };
2773 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2774
2775 lockdep_set_current_reclaim_state(GFP_KERNEL);
2776
2777 if (!cpumask_empty(cpumask))
2778 set_cpus_allowed_ptr(tsk, cpumask);
2779 current->reclaim_state = &reclaim_state;
2780
2781 /*
2782 * Tell the memory management that we're a "memory allocator",
2783 * and that if we need more memory we should get access to it
2784 * regardless (see "__alloc_pages()"). "kswapd" should
2785 * never get caught in the normal page freeing logic.
2786 *
2787 * (Kswapd normally doesn't need memory anyway, but sometimes
2788 * you need a small amount of memory in order to be able to
2789 * page out something else, and this flag essentially protects
2790 * us from recursively trying to free more memory as we're
2791 * trying to free the first piece of memory in the first place).
2792 */
2793 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
2794 set_freezable();
2795
2796 order = new_order = 0;
2797 classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
2798 for ( ; ; ) {
2799 int ret;
2800
2801 /*
2802 * If the last balance_pgdat was unsuccessful it's unlikely a
2803 * new request of a similar or harder type will succeed soon
2804 * so consider going to sleep on the basis we reclaimed at
2805 */
2806 if (classzone_idx >= new_classzone_idx && order == new_order) {
2807 new_order = pgdat->kswapd_max_order;
2808 new_classzone_idx = pgdat->classzone_idx;
2809 pgdat->kswapd_max_order = 0;
2810 pgdat->classzone_idx = pgdat->nr_zones - 1;
2811 }
2812
2813 if (order < new_order || classzone_idx > new_classzone_idx) {
2814 /*
2815 * Don't sleep if someone wants a larger 'order'
2816 * allocation or has tigher zone constraints
2817 */
2818 order = new_order;
2819 classzone_idx = new_classzone_idx;
2820 } else {
2821 kswapd_try_to_sleep(pgdat, order, classzone_idx);
2822 order = pgdat->kswapd_max_order;
2823 classzone_idx = pgdat->classzone_idx;
2824 pgdat->kswapd_max_order = 0;
2825 pgdat->classzone_idx = pgdat->nr_zones - 1;
2826 }
2827
2828 ret = try_to_freeze();
2829 if (kthread_should_stop())
2830 break;
2831
2832 /*
2833 * We can speed up thawing tasks if we don't call balance_pgdat
2834 * after returning from the refrigerator
2835 */
2836 if (!ret) {
2837 trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
2838 order = balance_pgdat(pgdat, order, &classzone_idx);
2839 }
2840 }
2841 return 0;
2842}
2843
2844/*
2845 * A zone is low on free memory, so wake its kswapd task to service it.
2846 */
2847void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
2848{
2849 pg_data_t *pgdat;
2850
2851 if (!populated_zone(zone))
2852 return;
2853
2854 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2855 return;
2856 pgdat = zone->zone_pgdat;
2857 if (pgdat->kswapd_max_order < order) {
2858 pgdat->kswapd_max_order = order;
2859 pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx);
2860 }
2861 if (!waitqueue_active(&pgdat->kswapd_wait))
2862 return;
2863 if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0))
2864 return;
2865
2866 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
2867 wake_up_interruptible(&pgdat->kswapd_wait);
2868}
2869
2870/*
2871 * The reclaimable count would be mostly accurate.
2872 * The less reclaimable pages may be
2873 * - mlocked pages, which will be moved to unevictable list when encountered
2874 * - mapped pages, which may require several travels to be reclaimed
2875 * - dirty pages, which is not "instantly" reclaimable
2876 */
2877unsigned long global_reclaimable_pages(void)
2878{
2879 int nr;
2880
2881 nr = global_page_state(NR_ACTIVE_FILE) +
2882 global_page_state(NR_INACTIVE_FILE);
2883
2884 if (nr_swap_pages > 0)
2885 nr += global_page_state(NR_ACTIVE_ANON) +
2886 global_page_state(NR_INACTIVE_ANON);
2887
2888 return nr;
2889}
2890
2891unsigned long zone_reclaimable_pages(struct zone *zone)
2892{
2893 int nr;
2894
2895 nr = zone_page_state(zone, NR_ACTIVE_FILE) +
2896 zone_page_state(zone, NR_INACTIVE_FILE);
2897
2898 if (nr_swap_pages > 0)
2899 nr += zone_page_state(zone, NR_ACTIVE_ANON) +
2900 zone_page_state(zone, NR_INACTIVE_ANON);
2901
2902 return nr;
2903}
2904
2905#ifdef CONFIG_HIBERNATION
2906/*
2907 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
2908 * freed pages.
2909 *
2910 * Rather than trying to age LRUs the aim is to preserve the overall
2911 * LRU order by reclaiming preferentially
2912 * inactive > active > active referenced > active mapped
2913 */
2914unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
2915{
2916 struct reclaim_state reclaim_state;
2917 struct scan_control sc = {
2918 .gfp_mask = GFP_HIGHUSER_MOVABLE,
2919 .may_swap = 1,
2920 .may_unmap = 1,
2921 .may_writepage = 1,
2922 .nr_to_reclaim = nr_to_reclaim,
2923 .hibernation_mode = 1,
2924 .order = 0,
2925 };
2926 struct shrink_control shrink = {
2927 .gfp_mask = sc.gfp_mask,
2928 };
2929 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
2930 struct task_struct *p = current;
2931 unsigned long nr_reclaimed;
2932
2933 p->flags |= PF_MEMALLOC;
2934 lockdep_set_current_reclaim_state(sc.gfp_mask);
2935 reclaim_state.reclaimed_slab = 0;
2936 p->reclaim_state = &reclaim_state;
2937
2938 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2939
2940 p->reclaim_state = NULL;
2941 lockdep_clear_current_reclaim_state();
2942 p->flags &= ~PF_MEMALLOC;
2943
2944 return nr_reclaimed;
2945}
2946#endif /* CONFIG_HIBERNATION */
2947
2948/* It's optimal to keep kswapds on the same CPUs as their memory, but
2949 not required for correctness. So if the last cpu in a node goes
2950 away, we get changed to run anywhere: as the first one comes back,
2951 restore their cpu bindings. */
2952static int __devinit cpu_callback(struct notifier_block *nfb,
2953 unsigned long action, void *hcpu)
2954{
2955 int nid;
2956
2957 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
2958 for_each_node_state(nid, N_HIGH_MEMORY) {
2959 pg_data_t *pgdat = NODE_DATA(nid);
2960 const struct cpumask *mask;
2961
2962 mask = cpumask_of_node(pgdat->node_id);
2963
2964 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
2965 /* One of our CPUs online: restore mask */
2966 set_cpus_allowed_ptr(pgdat->kswapd, mask);
2967 }
2968 }
2969 return NOTIFY_OK;
2970}
2971
2972/*
2973 * This kswapd start function will be called by init and node-hot-add.
2974 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
2975 */
2976int kswapd_run(int nid)
2977{
2978 pg_data_t *pgdat = NODE_DATA(nid);
2979 int ret = 0;
2980
2981 if (pgdat->kswapd)
2982 return 0;
2983
2984 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
2985 if (IS_ERR(pgdat->kswapd)) {
2986 /* failure at boot is fatal */
2987 BUG_ON(system_state == SYSTEM_BOOTING);
2988 printk("Failed to start kswapd on node %d\n",nid);
2989 ret = -1;
2990 }
2991 return ret;
2992}
2993
2994/*
2995 * Called by memory hotplug when all memory in a node is offlined.
2996 */
2997void kswapd_stop(int nid)
2998{
2999 struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
3000
3001 if (kswapd)
3002 kthread_stop(kswapd);
3003}
3004
3005static int __init kswapd_init(void)
3006{
3007 int nid;
3008
3009 swap_setup();
3010 for_each_node_state(nid, N_HIGH_MEMORY)
3011 kswapd_run(nid);
3012 hotcpu_notifier(cpu_callback, 0);
3013 return 0;
3014}
3015
3016module_init(kswapd_init)
3017
3018#ifdef CONFIG_NUMA
3019/*
3020 * Zone reclaim mode
3021 *
3022 * If non-zero call zone_reclaim when the number of free pages falls below
3023 * the watermarks.
3024 */
3025int zone_reclaim_mode __read_mostly;
3026
3027#define RECLAIM_OFF 0
3028#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
3029#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
3030#define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
3031
3032/*
3033 * Priority for ZONE_RECLAIM. This determines the fraction of pages
3034 * of a node considered for each zone_reclaim. 4 scans 1/16th of
3035 * a zone.
3036 */
3037#define ZONE_RECLAIM_PRIORITY 4
3038
3039/*
3040 * Percentage of pages in a zone that must be unmapped for zone_reclaim to
3041 * occur.
3042 */
3043int sysctl_min_unmapped_ratio = 1;
3044
3045/*
3046 * If the number of slab pages in a zone grows beyond this percentage then
3047 * slab reclaim needs to occur.
3048 */
3049int sysctl_min_slab_ratio = 5;
3050
3051static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
3052{
3053 unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
3054 unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
3055 zone_page_state(zone, NR_ACTIVE_FILE);
3056
3057 /*
3058 * It's possible for there to be more file mapped pages than
3059 * accounted for by the pages on the file LRU lists because
3060 * tmpfs pages accounted for as ANON can also be FILE_MAPPED
3061 */
3062 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
3063}
3064
3065/* Work out how many page cache pages we can reclaim in this reclaim_mode */
3066static long zone_pagecache_reclaimable(struct zone *zone)
3067{
3068 long nr_pagecache_reclaimable;
3069 long delta = 0;
3070
3071 /*
3072 * If RECLAIM_SWAP is set, then all file pages are considered
3073 * potentially reclaimable. Otherwise, we have to worry about
3074 * pages like swapcache and zone_unmapped_file_pages() provides
3075 * a better estimate
3076 */
3077 if (zone_reclaim_mode & RECLAIM_SWAP)
3078 nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
3079 else
3080 nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
3081
3082 /* If we can't clean pages, remove dirty pages from consideration */
3083 if (!(zone_reclaim_mode & RECLAIM_WRITE))
3084 delta += zone_page_state(zone, NR_FILE_DIRTY);
3085
3086 /* Watch for any possible underflows due to delta */
3087 if (unlikely(delta > nr_pagecache_reclaimable))
3088 delta = nr_pagecache_reclaimable;
3089
3090 return nr_pagecache_reclaimable - delta;
3091}
3092
3093/*
3094 * Try to free up some pages from this zone through reclaim.
3095 */
3096static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3097{
3098 /* Minimum pages needed in order to stay on node */
3099 const unsigned long nr_pages = 1 << order;
3100 struct task_struct *p = current;
3101 struct reclaim_state reclaim_state;
3102 int priority;
3103 struct scan_control sc = {
3104 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
3105 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
3106 .may_swap = 1,
3107 .nr_to_reclaim = max_t(unsigned long, nr_pages,
3108 SWAP_CLUSTER_MAX),
3109 .gfp_mask = gfp_mask,
3110 .order = order,
3111 };
3112 struct shrink_control shrink = {
3113 .gfp_mask = sc.gfp_mask,
3114 };
3115 unsigned long nr_slab_pages0, nr_slab_pages1;
3116
3117 cond_resched();
3118 /*
3119 * We need to be able to allocate from the reserves for RECLAIM_SWAP
3120 * and we also need to be able to write out pages for RECLAIM_WRITE
3121 * and RECLAIM_SWAP.
3122 */
3123 p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
3124 lockdep_set_current_reclaim_state(gfp_mask);
3125 reclaim_state.reclaimed_slab = 0;
3126 p->reclaim_state = &reclaim_state;
3127
3128 if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
3129 /*
3130 * Free memory by calling shrink zone with increasing
3131 * priorities until we have enough memory freed.
3132 */
3133 priority = ZONE_RECLAIM_PRIORITY;
3134 do {
3135 shrink_zone(priority, zone, &sc);
3136 priority--;
3137 } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
3138 }
3139
3140 nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
3141 if (nr_slab_pages0 > zone->min_slab_pages) {
3142 /*
3143 * shrink_slab() does not currently allow us to determine how
3144 * many pages were freed in this zone. So we take the current
3145 * number of slab pages and shake the slab until it is reduced
3146 * by the same nr_pages that we used for reclaiming unmapped
3147 * pages.
3148 *
3149 * Note that shrink_slab will free memory on all zones and may
3150 * take a long time.
3151 */
3152 for (;;) {
3153 unsigned long lru_pages = zone_reclaimable_pages(zone);
3154
3155 /* No reclaimable slab or very low memory pressure */
3156 if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages))
3157 break;
3158
3159 /* Freed enough memory */
3160 nr_slab_pages1 = zone_page_state(zone,
3161 NR_SLAB_RECLAIMABLE);
3162 if (nr_slab_pages1 + nr_pages <= nr_slab_pages0)
3163 break;
3164 }
3165
3166 /*
3167 * Update nr_reclaimed by the number of slab pages we
3168 * reclaimed from this zone.
3169 */
3170 nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
3171 if (nr_slab_pages1 < nr_slab_pages0)
3172 sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1;
3173 }
3174
3175 p->reclaim_state = NULL;
3176 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
3177 lockdep_clear_current_reclaim_state();
3178 return sc.nr_reclaimed >= nr_pages;
3179}
3180
3181int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3182{
3183 int node_id;
3184 int ret;
3185
3186 /*
3187 * Zone reclaim reclaims unmapped file backed pages and
3188 * slab pages if we are over the defined limits.
3189 *
3190 * A small portion of unmapped file backed pages is needed for
3191 * file I/O otherwise pages read by file I/O will be immediately
3192 * thrown out if the zone is overallocated. So we do not reclaim
3193 * if less than a specified percentage of the zone is used by
3194 * unmapped file backed pages.
3195 */
3196 if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
3197 zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
3198 return ZONE_RECLAIM_FULL;
3199
3200 if (zone->all_unreclaimable)
3201 return ZONE_RECLAIM_FULL;
3202
3203 /*
3204 * Do not scan if the allocation should not be delayed.
3205 */
3206 if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
3207 return ZONE_RECLAIM_NOSCAN;
3208
3209 /*
3210 * Only run zone reclaim on the local zone or on zones that do not
3211 * have associated processors. This will favor the local processor
3212 * over remote processors and spread off node memory allocations
3213 * as wide as possible.
3214 */
3215 node_id = zone_to_nid(zone);
3216 if (node_state(node_id, N_CPU) && node_id != numa_node_id())
3217 return ZONE_RECLAIM_NOSCAN;
3218
3219 if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
3220 return ZONE_RECLAIM_NOSCAN;
3221
3222 ret = __zone_reclaim(zone, gfp_mask, order);
3223 zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
3224
3225 if (!ret)
3226 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
3227
3228 return ret;
3229}
3230#endif
3231
3232/*
3233 * page_evictable - test whether a page is evictable
3234 * @page: the page to test
3235 * @vma: the VMA in which the page is or will be mapped, may be NULL
3236 *
3237 * Test whether page is evictable--i.e., should be placed on active/inactive
3238 * lists vs unevictable list. The vma argument is !NULL when called from the
3239 * fault path to determine how to instantate a new page.
3240 *
3241 * Reasons page might not be evictable:
3242 * (1) page's mapping marked unevictable
3243 * (2) page is part of an mlocked VMA
3244 *
3245 */
3246int page_evictable(struct page *page, struct vm_area_struct *vma)
3247{
3248
3249 if (mapping_unevictable(page_mapping(page)))
3250 return 0;
3251
3252 if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page)))
3253 return 0;
3254
3255 return 1;
3256}
3257
3258/**
3259 * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
3260 * @page: page to check evictability and move to appropriate lru list
3261 * @zone: zone page is in
3262 *
3263 * Checks a page for evictability and moves the page to the appropriate
3264 * zone lru list.
3265 *
3266 * Restrictions: zone->lru_lock must be held, page must be on LRU and must
3267 * have PageUnevictable set.
3268 */
3269static void check_move_unevictable_page(struct page *page, struct zone *zone)
3270{
3271 VM_BUG_ON(PageActive(page));
3272
3273retry:
3274 ClearPageUnevictable(page);
3275 if (page_evictable(page, NULL)) {
3276 enum lru_list l = page_lru_base_type(page);
3277
3278 __dec_zone_state(zone, NR_UNEVICTABLE);
3279 list_move(&page->lru, &zone->lru[l].list);
3280 mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
3281 __inc_zone_state(zone, NR_INACTIVE_ANON + l);
3282 __count_vm_event(UNEVICTABLE_PGRESCUED);
3283 } else {
3284 /*
3285 * rotate unevictable list
3286 */
3287 SetPageUnevictable(page);
3288 list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
3289 mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
3290 if (page_evictable(page, NULL))
3291 goto retry;
3292 }
3293}
3294
3295/**
3296 * scan_mapping_unevictable_pages - scan an address space for evictable pages
3297 * @mapping: struct address_space to scan for evictable pages
3298 *
3299 * Scan all pages in mapping. Check unevictable pages for
3300 * evictability and move them to the appropriate zone lru list.
3301 */
3302void scan_mapping_unevictable_pages(struct address_space *mapping)
3303{
3304 pgoff_t next = 0;
3305 pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
3306 PAGE_CACHE_SHIFT;
3307 struct zone *zone;
3308 struct pagevec pvec;
3309
3310 if (mapping->nrpages == 0)
3311 return;
3312
3313 pagevec_init(&pvec, 0);
3314 while (next < end &&
3315 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
3316 int i;
3317 int pg_scanned = 0;
3318
3319 zone = NULL;
3320
3321 for (i = 0; i < pagevec_count(&pvec); i++) {
3322 struct page *page = pvec.pages[i];
3323 pgoff_t page_index = page->index;
3324 struct zone *pagezone = page_zone(page);
3325
3326 pg_scanned++;
3327 if (page_index > next)
3328 next = page_index;
3329 next++;
3330
3331 if (pagezone != zone) {
3332 if (zone)
3333 spin_unlock_irq(&zone->lru_lock);
3334 zone = pagezone;
3335 spin_lock_irq(&zone->lru_lock);
3336 }
3337
3338 if (PageLRU(page) && PageUnevictable(page))
3339 check_move_unevictable_page(page, zone);
3340 }
3341 if (zone)
3342 spin_unlock_irq(&zone->lru_lock);
3343 pagevec_release(&pvec);
3344
3345 count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
3346 }
3347
3348}
3349
3350/**
3351 * scan_zone_unevictable_pages - check unevictable list for evictable pages
3352 * @zone - zone of which to scan the unevictable list
3353 *
3354 * Scan @zone's unevictable LRU lists to check for pages that have become
3355 * evictable. Move those that have to @zone's inactive list where they
3356 * become candidates for reclaim, unless shrink_inactive_zone() decides
3357 * to reactivate them. Pages that are still unevictable are rotated
3358 * back onto @zone's unevictable list.
3359 */
3360#define SCAN_UNEVICTABLE_BATCH_SIZE 16UL /* arbitrary lock hold batch size */
3361static void scan_zone_unevictable_pages(struct zone *zone)
3362{
3363 struct list_head *l_unevictable = &zone->lru[LRU_UNEVICTABLE].list;
3364 unsigned long scan;
3365 unsigned long nr_to_scan = zone_page_state(zone, NR_UNEVICTABLE);
3366
3367 while (nr_to_scan > 0) {
3368 unsigned long batch_size = min(nr_to_scan,
3369 SCAN_UNEVICTABLE_BATCH_SIZE);
3370
3371 spin_lock_irq(&zone->lru_lock);
3372 for (scan = 0; scan < batch_size; scan++) {
3373 struct page *page = lru_to_page(l_unevictable);
3374
3375 if (!trylock_page(page))
3376 continue;
3377
3378 prefetchw_prev_lru_page(page, l_unevictable, flags);
3379
3380 if (likely(PageLRU(page) && PageUnevictable(page)))
3381 check_move_unevictable_page(page, zone);
3382
3383 unlock_page(page);
3384 }
3385 spin_unlock_irq(&zone->lru_lock);
3386
3387 nr_to_scan -= batch_size;
3388 }
3389}
3390
3391
3392/**
3393 * scan_all_zones_unevictable_pages - scan all unevictable lists for evictable pages
3394 *
3395 * A really big hammer: scan all zones' unevictable LRU lists to check for
3396 * pages that have become evictable. Move those back to the zones'
3397 * inactive list where they become candidates for reclaim.
3398 * This occurs when, e.g., we have unswappable pages on the unevictable lists,
3399 * and we add swap to the system. As such, it runs in the context of a task
3400 * that has possibly/probably made some previously unevictable pages
3401 * evictable.
3402 */
3403static void scan_all_zones_unevictable_pages(void)
3404{
3405 struct zone *zone;
3406
3407 for_each_zone(zone) {
3408 scan_zone_unevictable_pages(zone);
3409 }
3410}
3411
3412/*
3413 * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of
3414 * all nodes' unevictable lists for evictable pages
3415 */
3416unsigned long scan_unevictable_pages;
3417
3418int scan_unevictable_handler(struct ctl_table *table, int write,
3419 void __user *buffer,
3420 size_t *length, loff_t *ppos)
3421{
3422 proc_doulongvec_minmax(table, write, buffer, length, ppos);
3423
3424 if (write && *(unsigned long *)table->data)
3425 scan_all_zones_unevictable_pages();
3426
3427 scan_unevictable_pages = 0;
3428 return 0;
3429}
3430
3431#ifdef CONFIG_NUMA
3432/*
3433 * per node 'scan_unevictable_pages' attribute. On demand re-scan of
3434 * a specified node's per zone unevictable lists for evictable pages.
3435 */
3436
3437static ssize_t read_scan_unevictable_node(struct sys_device *dev,
3438 struct sysdev_attribute *attr,
3439 char *buf)
3440{
3441 return sprintf(buf, "0\n"); /* always zero; should fit... */
3442}
3443
3444static ssize_t write_scan_unevictable_node(struct sys_device *dev,
3445 struct sysdev_attribute *attr,
3446 const char *buf, size_t count)
3447{
3448 struct zone *node_zones = NODE_DATA(dev->id)->node_zones;
3449 struct zone *zone;
3450 unsigned long res;
3451 unsigned long req = strict_strtoul(buf, 10, &res);
3452
3453 if (!req)
3454 return 1; /* zero is no-op */
3455
3456 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
3457 if (!populated_zone(zone))
3458 continue;
3459 scan_zone_unevictable_pages(zone);
3460 }
3461 return 1;
3462}
3463
3464
3465static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
3466 read_scan_unevictable_node,
3467 write_scan_unevictable_node);
3468
3469int scan_unevictable_register_node(struct node *node)
3470{
3471 return sysdev_create_file(&node->sysdev, &attr_scan_unevictable_pages);
3472}
3473
3474void scan_unevictable_unregister_node(struct node *node)
3475{
3476 sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
3477}
3478#endif
1/*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed
9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11 * Multiqueue VM started 5.8.00, Rik van Riel.
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/gfp.h>
19#include <linux/kernel_stat.h>
20#include <linux/swap.h>
21#include <linux/pagemap.h>
22#include <linux/init.h>
23#include <linux/highmem.h>
24#include <linux/vmpressure.h>
25#include <linux/vmstat.h>
26#include <linux/file.h>
27#include <linux/writeback.h>
28#include <linux/blkdev.h>
29#include <linux/buffer_head.h> /* for try_to_release_page(),
30 buffer_heads_over_limit */
31#include <linux/mm_inline.h>
32#include <linux/backing-dev.h>
33#include <linux/rmap.h>
34#include <linux/topology.h>
35#include <linux/cpu.h>
36#include <linux/cpuset.h>
37#include <linux/compaction.h>
38#include <linux/notifier.h>
39#include <linux/rwsem.h>
40#include <linux/delay.h>
41#include <linux/kthread.h>
42#include <linux/freezer.h>
43#include <linux/memcontrol.h>
44#include <linux/delayacct.h>
45#include <linux/sysctl.h>
46#include <linux/oom.h>
47#include <linux/prefetch.h>
48#include <linux/printk.h>
49#include <linux/dax.h>
50
51#include <asm/tlbflush.h>
52#include <asm/div64.h>
53
54#include <linux/swapops.h>
55#include <linux/balloon_compaction.h>
56
57#include "internal.h"
58
59#define CREATE_TRACE_POINTS
60#include <trace/events/vmscan.h>
61
62struct scan_control {
63 /* How many pages shrink_list() should reclaim */
64 unsigned long nr_to_reclaim;
65
66 /* This context's GFP mask */
67 gfp_t gfp_mask;
68
69 /* Allocation order */
70 int order;
71
72 /*
73 * Nodemask of nodes allowed by the caller. If NULL, all nodes
74 * are scanned.
75 */
76 nodemask_t *nodemask;
77
78 /*
79 * The memory cgroup that hit its limit and as a result is the
80 * primary target of this reclaim invocation.
81 */
82 struct mem_cgroup *target_mem_cgroup;
83
84 /* Scan (total_size >> priority) pages at once */
85 int priority;
86
87 /* The highest zone to isolate pages for reclaim from */
88 enum zone_type reclaim_idx;
89
90 unsigned int may_writepage:1;
91
92 /* Can mapped pages be reclaimed? */
93 unsigned int may_unmap:1;
94
95 /* Can pages be swapped as part of reclaim? */
96 unsigned int may_swap:1;
97
98 /* Can cgroups be reclaimed below their normal consumption range? */
99 unsigned int may_thrash:1;
100
101 unsigned int hibernation_mode:1;
102
103 /* One of the zones is ready for compaction */
104 unsigned int compaction_ready:1;
105
106 /* Incremented by the number of inactive pages that were scanned */
107 unsigned long nr_scanned;
108
109 /* Number of pages freed so far during a call to shrink_zones() */
110 unsigned long nr_reclaimed;
111};
112
113#ifdef ARCH_HAS_PREFETCH
114#define prefetch_prev_lru_page(_page, _base, _field) \
115 do { \
116 if ((_page)->lru.prev != _base) { \
117 struct page *prev; \
118 \
119 prev = lru_to_page(&(_page->lru)); \
120 prefetch(&prev->_field); \
121 } \
122 } while (0)
123#else
124#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
125#endif
126
127#ifdef ARCH_HAS_PREFETCHW
128#define prefetchw_prev_lru_page(_page, _base, _field) \
129 do { \
130 if ((_page)->lru.prev != _base) { \
131 struct page *prev; \
132 \
133 prev = lru_to_page(&(_page->lru)); \
134 prefetchw(&prev->_field); \
135 } \
136 } while (0)
137#else
138#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
139#endif
140
141/*
142 * From 0 .. 100. Higher means more swappy.
143 */
144int vm_swappiness = 60;
145/*
146 * The total number of pages which are beyond the high watermark within all
147 * zones.
148 */
149unsigned long vm_total_pages;
150
151static LIST_HEAD(shrinker_list);
152static DECLARE_RWSEM(shrinker_rwsem);
153
154#ifdef CONFIG_MEMCG
155static bool global_reclaim(struct scan_control *sc)
156{
157 return !sc->target_mem_cgroup;
158}
159
160/**
161 * sane_reclaim - is the usual dirty throttling mechanism operational?
162 * @sc: scan_control in question
163 *
164 * The normal page dirty throttling mechanism in balance_dirty_pages() is
165 * completely broken with the legacy memcg and direct stalling in
166 * shrink_page_list() is used for throttling instead, which lacks all the
167 * niceties such as fairness, adaptive pausing, bandwidth proportional
168 * allocation and configurability.
169 *
170 * This function tests whether the vmscan currently in progress can assume
171 * that the normal dirty throttling mechanism is operational.
172 */
173static bool sane_reclaim(struct scan_control *sc)
174{
175 struct mem_cgroup *memcg = sc->target_mem_cgroup;
176
177 if (!memcg)
178 return true;
179#ifdef CONFIG_CGROUP_WRITEBACK
180 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
181 return true;
182#endif
183 return false;
184}
185#else
186static bool global_reclaim(struct scan_control *sc)
187{
188 return true;
189}
190
191static bool sane_reclaim(struct scan_control *sc)
192{
193 return true;
194}
195#endif
196
197/*
198 * This misses isolated pages which are not accounted for to save counters.
199 * As the data only determines if reclaim or compaction continues, it is
200 * not expected that isolated pages will be a dominating factor.
201 */
202unsigned long zone_reclaimable_pages(struct zone *zone)
203{
204 unsigned long nr;
205
206 nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) +
207 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE);
208 if (get_nr_swap_pages() > 0)
209 nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
210 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
211
212 return nr;
213}
214
215unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat)
216{
217 unsigned long nr;
218
219 nr = node_page_state_snapshot(pgdat, NR_ACTIVE_FILE) +
220 node_page_state_snapshot(pgdat, NR_INACTIVE_FILE) +
221 node_page_state_snapshot(pgdat, NR_ISOLATED_FILE);
222
223 if (get_nr_swap_pages() > 0)
224 nr += node_page_state_snapshot(pgdat, NR_ACTIVE_ANON) +
225 node_page_state_snapshot(pgdat, NR_INACTIVE_ANON) +
226 node_page_state_snapshot(pgdat, NR_ISOLATED_ANON);
227
228 return nr;
229}
230
231bool pgdat_reclaimable(struct pglist_data *pgdat)
232{
233 return node_page_state_snapshot(pgdat, NR_PAGES_SCANNED) <
234 pgdat_reclaimable_pages(pgdat) * 6;
235}
236
237/**
238 * lruvec_lru_size - Returns the number of pages on the given LRU list.
239 * @lruvec: lru vector
240 * @lru: lru to use
241 * @zone_idx: zones to consider (use MAX_NR_ZONES for the whole LRU list)
242 */
243unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx)
244{
245 unsigned long lru_size;
246 int zid;
247
248 if (!mem_cgroup_disabled())
249 lru_size = mem_cgroup_get_lru_size(lruvec, lru);
250 else
251 lru_size = node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
252
253 for (zid = zone_idx + 1; zid < MAX_NR_ZONES; zid++) {
254 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
255 unsigned long size;
256
257 if (!managed_zone(zone))
258 continue;
259
260 if (!mem_cgroup_disabled())
261 size = mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
262 else
263 size = zone_page_state(&lruvec_pgdat(lruvec)->node_zones[zid],
264 NR_ZONE_LRU_BASE + lru);
265 lru_size -= min(size, lru_size);
266 }
267
268 return lru_size;
269
270}
271
272/*
273 * Add a shrinker callback to be called from the vm.
274 */
275int register_shrinker(struct shrinker *shrinker)
276{
277 size_t size = sizeof(*shrinker->nr_deferred);
278
279 if (shrinker->flags & SHRINKER_NUMA_AWARE)
280 size *= nr_node_ids;
281
282 shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
283 if (!shrinker->nr_deferred)
284 return -ENOMEM;
285
286 down_write(&shrinker_rwsem);
287 list_add_tail(&shrinker->list, &shrinker_list);
288 up_write(&shrinker_rwsem);
289 return 0;
290}
291EXPORT_SYMBOL(register_shrinker);
292
293/*
294 * Remove one
295 */
296void unregister_shrinker(struct shrinker *shrinker)
297{
298 down_write(&shrinker_rwsem);
299 list_del(&shrinker->list);
300 up_write(&shrinker_rwsem);
301 kfree(shrinker->nr_deferred);
302}
303EXPORT_SYMBOL(unregister_shrinker);
304
305#define SHRINK_BATCH 128
306
307static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
308 struct shrinker *shrinker,
309 unsigned long nr_scanned,
310 unsigned long nr_eligible)
311{
312 unsigned long freed = 0;
313 unsigned long long delta;
314 long total_scan;
315 long freeable;
316 long nr;
317 long new_nr;
318 int nid = shrinkctl->nid;
319 long batch_size = shrinker->batch ? shrinker->batch
320 : SHRINK_BATCH;
321 long scanned = 0, next_deferred;
322
323 freeable = shrinker->count_objects(shrinker, shrinkctl);
324 if (freeable == 0)
325 return 0;
326
327 /*
328 * copy the current shrinker scan count into a local variable
329 * and zero it so that other concurrent shrinker invocations
330 * don't also do this scanning work.
331 */
332 nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
333
334 total_scan = nr;
335 delta = (4 * nr_scanned) / shrinker->seeks;
336 delta *= freeable;
337 do_div(delta, nr_eligible + 1);
338 total_scan += delta;
339 if (total_scan < 0) {
340 pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
341 shrinker->scan_objects, total_scan);
342 total_scan = freeable;
343 next_deferred = nr;
344 } else
345 next_deferred = total_scan;
346
347 /*
348 * We need to avoid excessive windup on filesystem shrinkers
349 * due to large numbers of GFP_NOFS allocations causing the
350 * shrinkers to return -1 all the time. This results in a large
351 * nr being built up so when a shrink that can do some work
352 * comes along it empties the entire cache due to nr >>>
353 * freeable. This is bad for sustaining a working set in
354 * memory.
355 *
356 * Hence only allow the shrinker to scan the entire cache when
357 * a large delta change is calculated directly.
358 */
359 if (delta < freeable / 4)
360 total_scan = min(total_scan, freeable / 2);
361
362 /*
363 * Avoid risking looping forever due to too large nr value:
364 * never try to free more than twice the estimate number of
365 * freeable entries.
366 */
367 if (total_scan > freeable * 2)
368 total_scan = freeable * 2;
369
370 trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
371 nr_scanned, nr_eligible,
372 freeable, delta, total_scan);
373
374 /*
375 * Normally, we should not scan less than batch_size objects in one
376 * pass to avoid too frequent shrinker calls, but if the slab has less
377 * than batch_size objects in total and we are really tight on memory,
378 * we will try to reclaim all available objects, otherwise we can end
379 * up failing allocations although there are plenty of reclaimable
380 * objects spread over several slabs with usage less than the
381 * batch_size.
382 *
383 * We detect the "tight on memory" situations by looking at the total
384 * number of objects we want to scan (total_scan). If it is greater
385 * than the total number of objects on slab (freeable), we must be
386 * scanning at high prio and therefore should try to reclaim as much as
387 * possible.
388 */
389 while (total_scan >= batch_size ||
390 total_scan >= freeable) {
391 unsigned long ret;
392 unsigned long nr_to_scan = min(batch_size, total_scan);
393
394 shrinkctl->nr_to_scan = nr_to_scan;
395 ret = shrinker->scan_objects(shrinker, shrinkctl);
396 if (ret == SHRINK_STOP)
397 break;
398 freed += ret;
399
400 count_vm_events(SLABS_SCANNED, nr_to_scan);
401 total_scan -= nr_to_scan;
402 scanned += nr_to_scan;
403
404 cond_resched();
405 }
406
407 if (next_deferred >= scanned)
408 next_deferred -= scanned;
409 else
410 next_deferred = 0;
411 /*
412 * move the unused scan count back into the shrinker in a
413 * manner that handles concurrent updates. If we exhausted the
414 * scan, there is no need to do an update.
415 */
416 if (next_deferred > 0)
417 new_nr = atomic_long_add_return(next_deferred,
418 &shrinker->nr_deferred[nid]);
419 else
420 new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
421
422 trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan);
423 return freed;
424}
425
426/**
427 * shrink_slab - shrink slab caches
428 * @gfp_mask: allocation context
429 * @nid: node whose slab caches to target
430 * @memcg: memory cgroup whose slab caches to target
431 * @nr_scanned: pressure numerator
432 * @nr_eligible: pressure denominator
433 *
434 * Call the shrink functions to age shrinkable caches.
435 *
436 * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set,
437 * unaware shrinkers will receive a node id of 0 instead.
438 *
439 * @memcg specifies the memory cgroup to target. If it is not NULL,
440 * only shrinkers with SHRINKER_MEMCG_AWARE set will be called to scan
441 * objects from the memory cgroup specified. Otherwise, only unaware
442 * shrinkers are called.
443 *
444 * @nr_scanned and @nr_eligible form a ratio that indicate how much of
445 * the available objects should be scanned. Page reclaim for example
446 * passes the number of pages scanned and the number of pages on the
447 * LRU lists that it considered on @nid, plus a bias in @nr_scanned
448 * when it encountered mapped pages. The ratio is further biased by
449 * the ->seeks setting of the shrink function, which indicates the
450 * cost to recreate an object relative to that of an LRU page.
451 *
452 * Returns the number of reclaimed slab objects.
453 */
454static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
455 struct mem_cgroup *memcg,
456 unsigned long nr_scanned,
457 unsigned long nr_eligible)
458{
459 struct shrinker *shrinker;
460 unsigned long freed = 0;
461
462 if (memcg && (!memcg_kmem_enabled() || !mem_cgroup_online(memcg)))
463 return 0;
464
465 if (nr_scanned == 0)
466 nr_scanned = SWAP_CLUSTER_MAX;
467
468 if (!down_read_trylock(&shrinker_rwsem)) {
469 /*
470 * If we would return 0, our callers would understand that we
471 * have nothing else to shrink and give up trying. By returning
472 * 1 we keep it going and assume we'll be able to shrink next
473 * time.
474 */
475 freed = 1;
476 goto out;
477 }
478
479 list_for_each_entry(shrinker, &shrinker_list, list) {
480 struct shrink_control sc = {
481 .gfp_mask = gfp_mask,
482 .nid = nid,
483 .memcg = memcg,
484 };
485
486 /*
487 * If kernel memory accounting is disabled, we ignore
488 * SHRINKER_MEMCG_AWARE flag and call all shrinkers
489 * passing NULL for memcg.
490 */
491 if (memcg_kmem_enabled() &&
492 !!memcg != !!(shrinker->flags & SHRINKER_MEMCG_AWARE))
493 continue;
494
495 if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
496 sc.nid = 0;
497
498 freed += do_shrink_slab(&sc, shrinker, nr_scanned, nr_eligible);
499 }
500
501 up_read(&shrinker_rwsem);
502out:
503 cond_resched();
504 return freed;
505}
506
507void drop_slab_node(int nid)
508{
509 unsigned long freed;
510
511 do {
512 struct mem_cgroup *memcg = NULL;
513
514 freed = 0;
515 do {
516 freed += shrink_slab(GFP_KERNEL, nid, memcg,
517 1000, 1000);
518 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
519 } while (freed > 10);
520}
521
522void drop_slab(void)
523{
524 int nid;
525
526 for_each_online_node(nid)
527 drop_slab_node(nid);
528}
529
530static inline int is_page_cache_freeable(struct page *page)
531{
532 /*
533 * A freeable page cache page is referenced only by the caller
534 * that isolated the page, the page cache radix tree and
535 * optional buffer heads at page->private.
536 */
537 return page_count(page) - page_has_private(page) == 2;
538}
539
540static int may_write_to_inode(struct inode *inode, struct scan_control *sc)
541{
542 if (current->flags & PF_SWAPWRITE)
543 return 1;
544 if (!inode_write_congested(inode))
545 return 1;
546 if (inode_to_bdi(inode) == current->backing_dev_info)
547 return 1;
548 return 0;
549}
550
551/*
552 * We detected a synchronous write error writing a page out. Probably
553 * -ENOSPC. We need to propagate that into the address_space for a subsequent
554 * fsync(), msync() or close().
555 *
556 * The tricky part is that after writepage we cannot touch the mapping: nothing
557 * prevents it from being freed up. But we have a ref on the page and once
558 * that page is locked, the mapping is pinned.
559 *
560 * We're allowed to run sleeping lock_page() here because we know the caller has
561 * __GFP_FS.
562 */
563static void handle_write_error(struct address_space *mapping,
564 struct page *page, int error)
565{
566 lock_page(page);
567 if (page_mapping(page) == mapping)
568 mapping_set_error(mapping, error);
569 unlock_page(page);
570}
571
572/* possible outcome of pageout() */
573typedef enum {
574 /* failed to write page out, page is locked */
575 PAGE_KEEP,
576 /* move page to the active list, page is locked */
577 PAGE_ACTIVATE,
578 /* page has been sent to the disk successfully, page is unlocked */
579 PAGE_SUCCESS,
580 /* page is clean and locked */
581 PAGE_CLEAN,
582} pageout_t;
583
584/*
585 * pageout is called by shrink_page_list() for each dirty page.
586 * Calls ->writepage().
587 */
588static pageout_t pageout(struct page *page, struct address_space *mapping,
589 struct scan_control *sc)
590{
591 /*
592 * If the page is dirty, only perform writeback if that write
593 * will be non-blocking. To prevent this allocation from being
594 * stalled by pagecache activity. But note that there may be
595 * stalls if we need to run get_block(). We could test
596 * PagePrivate for that.
597 *
598 * If this process is currently in __generic_file_write_iter() against
599 * this page's queue, we can perform writeback even if that
600 * will block.
601 *
602 * If the page is swapcache, write it back even if that would
603 * block, for some throttling. This happens by accident, because
604 * swap_backing_dev_info is bust: it doesn't reflect the
605 * congestion state of the swapdevs. Easy to fix, if needed.
606 */
607 if (!is_page_cache_freeable(page))
608 return PAGE_KEEP;
609 if (!mapping) {
610 /*
611 * Some data journaling orphaned pages can have
612 * page->mapping == NULL while being dirty with clean buffers.
613 */
614 if (page_has_private(page)) {
615 if (try_to_free_buffers(page)) {
616 ClearPageDirty(page);
617 pr_info("%s: orphaned page\n", __func__);
618 return PAGE_CLEAN;
619 }
620 }
621 return PAGE_KEEP;
622 }
623 if (mapping->a_ops->writepage == NULL)
624 return PAGE_ACTIVATE;
625 if (!may_write_to_inode(mapping->host, sc))
626 return PAGE_KEEP;
627
628 if (clear_page_dirty_for_io(page)) {
629 int res;
630 struct writeback_control wbc = {
631 .sync_mode = WB_SYNC_NONE,
632 .nr_to_write = SWAP_CLUSTER_MAX,
633 .range_start = 0,
634 .range_end = LLONG_MAX,
635 .for_reclaim = 1,
636 };
637
638 SetPageReclaim(page);
639 res = mapping->a_ops->writepage(page, &wbc);
640 if (res < 0)
641 handle_write_error(mapping, page, res);
642 if (res == AOP_WRITEPAGE_ACTIVATE) {
643 ClearPageReclaim(page);
644 return PAGE_ACTIVATE;
645 }
646
647 if (!PageWriteback(page)) {
648 /* synchronous write or broken a_ops? */
649 ClearPageReclaim(page);
650 }
651 trace_mm_vmscan_writepage(page);
652 inc_node_page_state(page, NR_VMSCAN_WRITE);
653 return PAGE_SUCCESS;
654 }
655
656 return PAGE_CLEAN;
657}
658
659/*
660 * Same as remove_mapping, but if the page is removed from the mapping, it
661 * gets returned with a refcount of 0.
662 */
663static int __remove_mapping(struct address_space *mapping, struct page *page,
664 bool reclaimed)
665{
666 unsigned long flags;
667
668 BUG_ON(!PageLocked(page));
669 BUG_ON(mapping != page_mapping(page));
670
671 spin_lock_irqsave(&mapping->tree_lock, flags);
672 /*
673 * The non racy check for a busy page.
674 *
675 * Must be careful with the order of the tests. When someone has
676 * a ref to the page, it may be possible that they dirty it then
677 * drop the reference. So if PageDirty is tested before page_count
678 * here, then the following race may occur:
679 *
680 * get_user_pages(&page);
681 * [user mapping goes away]
682 * write_to(page);
683 * !PageDirty(page) [good]
684 * SetPageDirty(page);
685 * put_page(page);
686 * !page_count(page) [good, discard it]
687 *
688 * [oops, our write_to data is lost]
689 *
690 * Reversing the order of the tests ensures such a situation cannot
691 * escape unnoticed. The smp_rmb is needed to ensure the page->flags
692 * load is not satisfied before that of page->_refcount.
693 *
694 * Note that if SetPageDirty is always performed via set_page_dirty,
695 * and thus under tree_lock, then this ordering is not required.
696 */
697 if (!page_ref_freeze(page, 2))
698 goto cannot_free;
699 /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
700 if (unlikely(PageDirty(page))) {
701 page_ref_unfreeze(page, 2);
702 goto cannot_free;
703 }
704
705 if (PageSwapCache(page)) {
706 swp_entry_t swap = { .val = page_private(page) };
707 mem_cgroup_swapout(page, swap);
708 __delete_from_swap_cache(page);
709 spin_unlock_irqrestore(&mapping->tree_lock, flags);
710 swapcache_free(swap);
711 } else {
712 void (*freepage)(struct page *);
713 void *shadow = NULL;
714
715 freepage = mapping->a_ops->freepage;
716 /*
717 * Remember a shadow entry for reclaimed file cache in
718 * order to detect refaults, thus thrashing, later on.
719 *
720 * But don't store shadows in an address space that is
721 * already exiting. This is not just an optizimation,
722 * inode reclaim needs to empty out the radix tree or
723 * the nodes are lost. Don't plant shadows behind its
724 * back.
725 *
726 * We also don't store shadows for DAX mappings because the
727 * only page cache pages found in these are zero pages
728 * covering holes, and because we don't want to mix DAX
729 * exceptional entries and shadow exceptional entries in the
730 * same page_tree.
731 */
732 if (reclaimed && page_is_file_cache(page) &&
733 !mapping_exiting(mapping) && !dax_mapping(mapping))
734 shadow = workingset_eviction(mapping, page);
735 __delete_from_page_cache(page, shadow);
736 spin_unlock_irqrestore(&mapping->tree_lock, flags);
737
738 if (freepage != NULL)
739 freepage(page);
740 }
741
742 return 1;
743
744cannot_free:
745 spin_unlock_irqrestore(&mapping->tree_lock, flags);
746 return 0;
747}
748
749/*
750 * Attempt to detach a locked page from its ->mapping. If it is dirty or if
751 * someone else has a ref on the page, abort and return 0. If it was
752 * successfully detached, return 1. Assumes the caller has a single ref on
753 * this page.
754 */
755int remove_mapping(struct address_space *mapping, struct page *page)
756{
757 if (__remove_mapping(mapping, page, false)) {
758 /*
759 * Unfreezing the refcount with 1 rather than 2 effectively
760 * drops the pagecache ref for us without requiring another
761 * atomic operation.
762 */
763 page_ref_unfreeze(page, 1);
764 return 1;
765 }
766 return 0;
767}
768
769/**
770 * putback_lru_page - put previously isolated page onto appropriate LRU list
771 * @page: page to be put back to appropriate lru list
772 *
773 * Add previously isolated @page to appropriate LRU list.
774 * Page may still be unevictable for other reasons.
775 *
776 * lru_lock must not be held, interrupts must be enabled.
777 */
778void putback_lru_page(struct page *page)
779{
780 bool is_unevictable;
781 int was_unevictable = PageUnevictable(page);
782
783 VM_BUG_ON_PAGE(PageLRU(page), page);
784
785redo:
786 ClearPageUnevictable(page);
787
788 if (page_evictable(page)) {
789 /*
790 * For evictable pages, we can use the cache.
791 * In event of a race, worst case is we end up with an
792 * unevictable page on [in]active list.
793 * We know how to handle that.
794 */
795 is_unevictable = false;
796 lru_cache_add(page);
797 } else {
798 /*
799 * Put unevictable pages directly on zone's unevictable
800 * list.
801 */
802 is_unevictable = true;
803 add_page_to_unevictable_list(page);
804 /*
805 * When racing with an mlock or AS_UNEVICTABLE clearing
806 * (page is unlocked) make sure that if the other thread
807 * does not observe our setting of PG_lru and fails
808 * isolation/check_move_unevictable_pages,
809 * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
810 * the page back to the evictable list.
811 *
812 * The other side is TestClearPageMlocked() or shmem_lock().
813 */
814 smp_mb();
815 }
816
817 /*
818 * page's status can change while we move it among lru. If an evictable
819 * page is on unevictable list, it never be freed. To avoid that,
820 * check after we added it to the list, again.
821 */
822 if (is_unevictable && page_evictable(page)) {
823 if (!isolate_lru_page(page)) {
824 put_page(page);
825 goto redo;
826 }
827 /* This means someone else dropped this page from LRU
828 * So, it will be freed or putback to LRU again. There is
829 * nothing to do here.
830 */
831 }
832
833 if (was_unevictable && !is_unevictable)
834 count_vm_event(UNEVICTABLE_PGRESCUED);
835 else if (!was_unevictable && is_unevictable)
836 count_vm_event(UNEVICTABLE_PGCULLED);
837
838 put_page(page); /* drop ref from isolate */
839}
840
841enum page_references {
842 PAGEREF_RECLAIM,
843 PAGEREF_RECLAIM_CLEAN,
844 PAGEREF_KEEP,
845 PAGEREF_ACTIVATE,
846};
847
848static enum page_references page_check_references(struct page *page,
849 struct scan_control *sc)
850{
851 int referenced_ptes, referenced_page;
852 unsigned long vm_flags;
853
854 referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
855 &vm_flags);
856 referenced_page = TestClearPageReferenced(page);
857
858 /*
859 * Mlock lost the isolation race with us. Let try_to_unmap()
860 * move the page to the unevictable list.
861 */
862 if (vm_flags & VM_LOCKED)
863 return PAGEREF_RECLAIM;
864
865 if (referenced_ptes) {
866 if (PageSwapBacked(page))
867 return PAGEREF_ACTIVATE;
868 /*
869 * All mapped pages start out with page table
870 * references from the instantiating fault, so we need
871 * to look twice if a mapped file page is used more
872 * than once.
873 *
874 * Mark it and spare it for another trip around the
875 * inactive list. Another page table reference will
876 * lead to its activation.
877 *
878 * Note: the mark is set for activated pages as well
879 * so that recently deactivated but used pages are
880 * quickly recovered.
881 */
882 SetPageReferenced(page);
883
884 if (referenced_page || referenced_ptes > 1)
885 return PAGEREF_ACTIVATE;
886
887 /*
888 * Activate file-backed executable pages after first usage.
889 */
890 if (vm_flags & VM_EXEC)
891 return PAGEREF_ACTIVATE;
892
893 return PAGEREF_KEEP;
894 }
895
896 /* Reclaim if clean, defer dirty pages to writeback */
897 if (referenced_page && !PageSwapBacked(page))
898 return PAGEREF_RECLAIM_CLEAN;
899
900 return PAGEREF_RECLAIM;
901}
902
903/* Check if a page is dirty or under writeback */
904static void page_check_dirty_writeback(struct page *page,
905 bool *dirty, bool *writeback)
906{
907 struct address_space *mapping;
908
909 /*
910 * Anonymous pages are not handled by flushers and must be written
911 * from reclaim context. Do not stall reclaim based on them
912 */
913 if (!page_is_file_cache(page)) {
914 *dirty = false;
915 *writeback = false;
916 return;
917 }
918
919 /* By default assume that the page flags are accurate */
920 *dirty = PageDirty(page);
921 *writeback = PageWriteback(page);
922
923 /* Verify dirty/writeback state if the filesystem supports it */
924 if (!page_has_private(page))
925 return;
926
927 mapping = page_mapping(page);
928 if (mapping && mapping->a_ops->is_dirty_writeback)
929 mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
930}
931
932/*
933 * shrink_page_list() returns the number of reclaimed pages
934 */
935static unsigned long shrink_page_list(struct list_head *page_list,
936 struct pglist_data *pgdat,
937 struct scan_control *sc,
938 enum ttu_flags ttu_flags,
939 unsigned long *ret_nr_dirty,
940 unsigned long *ret_nr_unqueued_dirty,
941 unsigned long *ret_nr_congested,
942 unsigned long *ret_nr_writeback,
943 unsigned long *ret_nr_immediate,
944 bool force_reclaim)
945{
946 LIST_HEAD(ret_pages);
947 LIST_HEAD(free_pages);
948 int pgactivate = 0;
949 unsigned long nr_unqueued_dirty = 0;
950 unsigned long nr_dirty = 0;
951 unsigned long nr_congested = 0;
952 unsigned long nr_reclaimed = 0;
953 unsigned long nr_writeback = 0;
954 unsigned long nr_immediate = 0;
955
956 cond_resched();
957
958 while (!list_empty(page_list)) {
959 struct address_space *mapping;
960 struct page *page;
961 int may_enter_fs;
962 enum page_references references = PAGEREF_RECLAIM_CLEAN;
963 bool dirty, writeback;
964 bool lazyfree = false;
965 int ret = SWAP_SUCCESS;
966
967 cond_resched();
968
969 page = lru_to_page(page_list);
970 list_del(&page->lru);
971
972 if (!trylock_page(page))
973 goto keep;
974
975 VM_BUG_ON_PAGE(PageActive(page), page);
976
977 sc->nr_scanned++;
978
979 if (unlikely(!page_evictable(page)))
980 goto cull_mlocked;
981
982 if (!sc->may_unmap && page_mapped(page))
983 goto keep_locked;
984
985 /* Double the slab pressure for mapped and swapcache pages */
986 if (page_mapped(page) || PageSwapCache(page))
987 sc->nr_scanned++;
988
989 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
990 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
991
992 /*
993 * The number of dirty pages determines if a zone is marked
994 * reclaim_congested which affects wait_iff_congested. kswapd
995 * will stall and start writing pages if the tail of the LRU
996 * is all dirty unqueued pages.
997 */
998 page_check_dirty_writeback(page, &dirty, &writeback);
999 if (dirty || writeback)
1000 nr_dirty++;
1001
1002 if (dirty && !writeback)
1003 nr_unqueued_dirty++;
1004
1005 /*
1006 * Treat this page as congested if the underlying BDI is or if
1007 * pages are cycling through the LRU so quickly that the
1008 * pages marked for immediate reclaim are making it to the
1009 * end of the LRU a second time.
1010 */
1011 mapping = page_mapping(page);
1012 if (((dirty || writeback) && mapping &&
1013 inode_write_congested(mapping->host)) ||
1014 (writeback && PageReclaim(page)))
1015 nr_congested++;
1016
1017 /*
1018 * If a page at the tail of the LRU is under writeback, there
1019 * are three cases to consider.
1020 *
1021 * 1) If reclaim is encountering an excessive number of pages
1022 * under writeback and this page is both under writeback and
1023 * PageReclaim then it indicates that pages are being queued
1024 * for IO but are being recycled through the LRU before the
1025 * IO can complete. Waiting on the page itself risks an
1026 * indefinite stall if it is impossible to writeback the
1027 * page due to IO error or disconnected storage so instead
1028 * note that the LRU is being scanned too quickly and the
1029 * caller can stall after page list has been processed.
1030 *
1031 * 2) Global or new memcg reclaim encounters a page that is
1032 * not marked for immediate reclaim, or the caller does not
1033 * have __GFP_FS (or __GFP_IO if it's simply going to swap,
1034 * not to fs). In this case mark the page for immediate
1035 * reclaim and continue scanning.
1036 *
1037 * Require may_enter_fs because we would wait on fs, which
1038 * may not have submitted IO yet. And the loop driver might
1039 * enter reclaim, and deadlock if it waits on a page for
1040 * which it is needed to do the write (loop masks off
1041 * __GFP_IO|__GFP_FS for this reason); but more thought
1042 * would probably show more reasons.
1043 *
1044 * 3) Legacy memcg encounters a page that is already marked
1045 * PageReclaim. memcg does not have any dirty pages
1046 * throttling so we could easily OOM just because too many
1047 * pages are in writeback and there is nothing else to
1048 * reclaim. Wait for the writeback to complete.
1049 */
1050 if (PageWriteback(page)) {
1051 /* Case 1 above */
1052 if (current_is_kswapd() &&
1053 PageReclaim(page) &&
1054 test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
1055 nr_immediate++;
1056 goto keep_locked;
1057
1058 /* Case 2 above */
1059 } else if (sane_reclaim(sc) ||
1060 !PageReclaim(page) || !may_enter_fs) {
1061 /*
1062 * This is slightly racy - end_page_writeback()
1063 * might have just cleared PageReclaim, then
1064 * setting PageReclaim here end up interpreted
1065 * as PageReadahead - but that does not matter
1066 * enough to care. What we do want is for this
1067 * page to have PageReclaim set next time memcg
1068 * reclaim reaches the tests above, so it will
1069 * then wait_on_page_writeback() to avoid OOM;
1070 * and it's also appropriate in global reclaim.
1071 */
1072 SetPageReclaim(page);
1073 nr_writeback++;
1074 goto keep_locked;
1075
1076 /* Case 3 above */
1077 } else {
1078 unlock_page(page);
1079 wait_on_page_writeback(page);
1080 /* then go back and try same page again */
1081 list_add_tail(&page->lru, page_list);
1082 continue;
1083 }
1084 }
1085
1086 if (!force_reclaim)
1087 references = page_check_references(page, sc);
1088
1089 switch (references) {
1090 case PAGEREF_ACTIVATE:
1091 goto activate_locked;
1092 case PAGEREF_KEEP:
1093 goto keep_locked;
1094 case PAGEREF_RECLAIM:
1095 case PAGEREF_RECLAIM_CLEAN:
1096 ; /* try to reclaim the page below */
1097 }
1098
1099 /*
1100 * Anonymous process memory has backing store?
1101 * Try to allocate it some swap space here.
1102 */
1103 if (PageAnon(page) && !PageSwapCache(page)) {
1104 if (!(sc->gfp_mask & __GFP_IO))
1105 goto keep_locked;
1106 if (!add_to_swap(page, page_list))
1107 goto activate_locked;
1108 lazyfree = true;
1109 may_enter_fs = 1;
1110
1111 /* Adding to swap updated mapping */
1112 mapping = page_mapping(page);
1113 } else if (unlikely(PageTransHuge(page))) {
1114 /* Split file THP */
1115 if (split_huge_page_to_list(page, page_list))
1116 goto keep_locked;
1117 }
1118
1119 VM_BUG_ON_PAGE(PageTransHuge(page), page);
1120
1121 /*
1122 * The page is mapped into the page tables of one or more
1123 * processes. Try to unmap it here.
1124 */
1125 if (page_mapped(page) && mapping) {
1126 switch (ret = try_to_unmap(page, lazyfree ?
1127 (ttu_flags | TTU_BATCH_FLUSH | TTU_LZFREE) :
1128 (ttu_flags | TTU_BATCH_FLUSH))) {
1129 case SWAP_FAIL:
1130 goto activate_locked;
1131 case SWAP_AGAIN:
1132 goto keep_locked;
1133 case SWAP_MLOCK:
1134 goto cull_mlocked;
1135 case SWAP_LZFREE:
1136 goto lazyfree;
1137 case SWAP_SUCCESS:
1138 ; /* try to free the page below */
1139 }
1140 }
1141
1142 if (PageDirty(page)) {
1143 /*
1144 * Only kswapd can writeback filesystem pages to
1145 * avoid risk of stack overflow but only writeback
1146 * if many dirty pages have been encountered.
1147 */
1148 if (page_is_file_cache(page) &&
1149 (!current_is_kswapd() ||
1150 !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
1151 /*
1152 * Immediately reclaim when written back.
1153 * Similar in principal to deactivate_page()
1154 * except we already have the page isolated
1155 * and know it's dirty
1156 */
1157 inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);
1158 SetPageReclaim(page);
1159
1160 goto keep_locked;
1161 }
1162
1163 if (references == PAGEREF_RECLAIM_CLEAN)
1164 goto keep_locked;
1165 if (!may_enter_fs)
1166 goto keep_locked;
1167 if (!sc->may_writepage)
1168 goto keep_locked;
1169
1170 /*
1171 * Page is dirty. Flush the TLB if a writable entry
1172 * potentially exists to avoid CPU writes after IO
1173 * starts and then write it out here.
1174 */
1175 try_to_unmap_flush_dirty();
1176 switch (pageout(page, mapping, sc)) {
1177 case PAGE_KEEP:
1178 goto keep_locked;
1179 case PAGE_ACTIVATE:
1180 goto activate_locked;
1181 case PAGE_SUCCESS:
1182 if (PageWriteback(page))
1183 goto keep;
1184 if (PageDirty(page))
1185 goto keep;
1186
1187 /*
1188 * A synchronous write - probably a ramdisk. Go
1189 * ahead and try to reclaim the page.
1190 */
1191 if (!trylock_page(page))
1192 goto keep;
1193 if (PageDirty(page) || PageWriteback(page))
1194 goto keep_locked;
1195 mapping = page_mapping(page);
1196 case PAGE_CLEAN:
1197 ; /* try to free the page below */
1198 }
1199 }
1200
1201 /*
1202 * If the page has buffers, try to free the buffer mappings
1203 * associated with this page. If we succeed we try to free
1204 * the page as well.
1205 *
1206 * We do this even if the page is PageDirty().
1207 * try_to_release_page() does not perform I/O, but it is
1208 * possible for a page to have PageDirty set, but it is actually
1209 * clean (all its buffers are clean). This happens if the
1210 * buffers were written out directly, with submit_bh(). ext3
1211 * will do this, as well as the blockdev mapping.
1212 * try_to_release_page() will discover that cleanness and will
1213 * drop the buffers and mark the page clean - it can be freed.
1214 *
1215 * Rarely, pages can have buffers and no ->mapping. These are
1216 * the pages which were not successfully invalidated in
1217 * truncate_complete_page(). We try to drop those buffers here
1218 * and if that worked, and the page is no longer mapped into
1219 * process address space (page_count == 1) it can be freed.
1220 * Otherwise, leave the page on the LRU so it is swappable.
1221 */
1222 if (page_has_private(page)) {
1223 if (!try_to_release_page(page, sc->gfp_mask))
1224 goto activate_locked;
1225 if (!mapping && page_count(page) == 1) {
1226 unlock_page(page);
1227 if (put_page_testzero(page))
1228 goto free_it;
1229 else {
1230 /*
1231 * rare race with speculative reference.
1232 * the speculative reference will free
1233 * this page shortly, so we may
1234 * increment nr_reclaimed here (and
1235 * leave it off the LRU).
1236 */
1237 nr_reclaimed++;
1238 continue;
1239 }
1240 }
1241 }
1242
1243lazyfree:
1244 if (!mapping || !__remove_mapping(mapping, page, true))
1245 goto keep_locked;
1246
1247 /*
1248 * At this point, we have no other references and there is
1249 * no way to pick any more up (removed from LRU, removed
1250 * from pagecache). Can use non-atomic bitops now (and
1251 * we obviously don't have to worry about waking up a process
1252 * waiting on the page lock, because there are no references.
1253 */
1254 __ClearPageLocked(page);
1255free_it:
1256 if (ret == SWAP_LZFREE)
1257 count_vm_event(PGLAZYFREED);
1258
1259 nr_reclaimed++;
1260
1261 /*
1262 * Is there need to periodically free_page_list? It would
1263 * appear not as the counts should be low
1264 */
1265 list_add(&page->lru, &free_pages);
1266 continue;
1267
1268cull_mlocked:
1269 if (PageSwapCache(page))
1270 try_to_free_swap(page);
1271 unlock_page(page);
1272 list_add(&page->lru, &ret_pages);
1273 continue;
1274
1275activate_locked:
1276 /* Not a candidate for swapping, so reclaim swap space. */
1277 if (PageSwapCache(page) && mem_cgroup_swap_full(page))
1278 try_to_free_swap(page);
1279 VM_BUG_ON_PAGE(PageActive(page), page);
1280 SetPageActive(page);
1281 pgactivate++;
1282keep_locked:
1283 unlock_page(page);
1284keep:
1285 list_add(&page->lru, &ret_pages);
1286 VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
1287 }
1288
1289 mem_cgroup_uncharge_list(&free_pages);
1290 try_to_unmap_flush();
1291 free_hot_cold_page_list(&free_pages, true);
1292
1293 list_splice(&ret_pages, page_list);
1294 count_vm_events(PGACTIVATE, pgactivate);
1295
1296 *ret_nr_dirty += nr_dirty;
1297 *ret_nr_congested += nr_congested;
1298 *ret_nr_unqueued_dirty += nr_unqueued_dirty;
1299 *ret_nr_writeback += nr_writeback;
1300 *ret_nr_immediate += nr_immediate;
1301 return nr_reclaimed;
1302}
1303
1304unsigned long reclaim_clean_pages_from_list(struct zone *zone,
1305 struct list_head *page_list)
1306{
1307 struct scan_control sc = {
1308 .gfp_mask = GFP_KERNEL,
1309 .priority = DEF_PRIORITY,
1310 .may_unmap = 1,
1311 };
1312 unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5;
1313 struct page *page, *next;
1314 LIST_HEAD(clean_pages);
1315
1316 list_for_each_entry_safe(page, next, page_list, lru) {
1317 if (page_is_file_cache(page) && !PageDirty(page) &&
1318 !__PageMovable(page)) {
1319 ClearPageActive(page);
1320 list_move(&page->lru, &clean_pages);
1321 }
1322 }
1323
1324 ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
1325 TTU_UNMAP|TTU_IGNORE_ACCESS,
1326 &dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
1327 list_splice(&clean_pages, page_list);
1328 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret);
1329 return ret;
1330}
1331
1332/*
1333 * Attempt to remove the specified page from its LRU. Only take this page
1334 * if it is of the appropriate PageActive status. Pages which are being
1335 * freed elsewhere are also ignored.
1336 *
1337 * page: page to consider
1338 * mode: one of the LRU isolation modes defined above
1339 *
1340 * returns 0 on success, -ve errno on failure.
1341 */
1342int __isolate_lru_page(struct page *page, isolate_mode_t mode)
1343{
1344 int ret = -EINVAL;
1345
1346 /* Only take pages on the LRU. */
1347 if (!PageLRU(page))
1348 return ret;
1349
1350 /* Compaction should not handle unevictable pages but CMA can do so */
1351 if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
1352 return ret;
1353
1354 ret = -EBUSY;
1355
1356 /*
1357 * To minimise LRU disruption, the caller can indicate that it only
1358 * wants to isolate pages it will be able to operate on without
1359 * blocking - clean pages for the most part.
1360 *
1361 * ISOLATE_CLEAN means that only clean pages should be isolated. This
1362 * is used by reclaim when it is cannot write to backing storage
1363 *
1364 * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
1365 * that it is possible to migrate without blocking
1366 */
1367 if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) {
1368 /* All the caller can do on PageWriteback is block */
1369 if (PageWriteback(page))
1370 return ret;
1371
1372 if (PageDirty(page)) {
1373 struct address_space *mapping;
1374
1375 /* ISOLATE_CLEAN means only clean pages */
1376 if (mode & ISOLATE_CLEAN)
1377 return ret;
1378
1379 /*
1380 * Only pages without mappings or that have a
1381 * ->migratepage callback are possible to migrate
1382 * without blocking
1383 */
1384 mapping = page_mapping(page);
1385 if (mapping && !mapping->a_ops->migratepage)
1386 return ret;
1387 }
1388 }
1389
1390 if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
1391 return ret;
1392
1393 if (likely(get_page_unless_zero(page))) {
1394 /*
1395 * Be careful not to clear PageLRU until after we're
1396 * sure the page is not being freed elsewhere -- the
1397 * page release code relies on it.
1398 */
1399 ClearPageLRU(page);
1400 ret = 0;
1401 }
1402
1403 return ret;
1404}
1405
1406
1407/*
1408 * Update LRU sizes after isolating pages. The LRU size updates must
1409 * be complete before mem_cgroup_update_lru_size due to a santity check.
1410 */
1411static __always_inline void update_lru_sizes(struct lruvec *lruvec,
1412 enum lru_list lru, unsigned long *nr_zone_taken)
1413{
1414 int zid;
1415
1416 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1417 if (!nr_zone_taken[zid])
1418 continue;
1419
1420 __update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
1421#ifdef CONFIG_MEMCG
1422 mem_cgroup_update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
1423#endif
1424 }
1425
1426}
1427
1428/*
1429 * zone_lru_lock is heavily contended. Some of the functions that
1430 * shrink the lists perform better by taking out a batch of pages
1431 * and working on them outside the LRU lock.
1432 *
1433 * For pagecache intensive workloads, this function is the hottest
1434 * spot in the kernel (apart from copy_*_user functions).
1435 *
1436 * Appropriate locks must be held before calling this function.
1437 *
1438 * @nr_to_scan: The number of pages to look through on the list.
1439 * @lruvec: The LRU vector to pull pages from.
1440 * @dst: The temp list to put pages on to.
1441 * @nr_scanned: The number of pages that were scanned.
1442 * @sc: The scan_control struct for this reclaim session
1443 * @mode: One of the LRU isolation modes
1444 * @lru: LRU list id for isolating
1445 *
1446 * returns how many pages were moved onto *@dst.
1447 */
1448static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1449 struct lruvec *lruvec, struct list_head *dst,
1450 unsigned long *nr_scanned, struct scan_control *sc,
1451 isolate_mode_t mode, enum lru_list lru)
1452{
1453 struct list_head *src = &lruvec->lists[lru];
1454 unsigned long nr_taken = 0;
1455 unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
1456 unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
1457 unsigned long scan, nr_pages;
1458 LIST_HEAD(pages_skipped);
1459
1460 for (scan = 0; scan < nr_to_scan && nr_taken < nr_to_scan &&
1461 !list_empty(src);) {
1462 struct page *page;
1463
1464 page = lru_to_page(src);
1465 prefetchw_prev_lru_page(page, src, flags);
1466
1467 VM_BUG_ON_PAGE(!PageLRU(page), page);
1468
1469 if (page_zonenum(page) > sc->reclaim_idx) {
1470 list_move(&page->lru, &pages_skipped);
1471 nr_skipped[page_zonenum(page)]++;
1472 continue;
1473 }
1474
1475 /*
1476 * Account for scanned and skipped separetly to avoid the pgdat
1477 * being prematurely marked unreclaimable by pgdat_reclaimable.
1478 */
1479 scan++;
1480
1481 switch (__isolate_lru_page(page, mode)) {
1482 case 0:
1483 nr_pages = hpage_nr_pages(page);
1484 nr_taken += nr_pages;
1485 nr_zone_taken[page_zonenum(page)] += nr_pages;
1486 list_move(&page->lru, dst);
1487 break;
1488
1489 case -EBUSY:
1490 /* else it is being freed elsewhere */
1491 list_move(&page->lru, src);
1492 continue;
1493
1494 default:
1495 BUG();
1496 }
1497 }
1498
1499 /*
1500 * Splice any skipped pages to the start of the LRU list. Note that
1501 * this disrupts the LRU order when reclaiming for lower zones but
1502 * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX
1503 * scanning would soon rescan the same pages to skip and put the
1504 * system at risk of premature OOM.
1505 */
1506 if (!list_empty(&pages_skipped)) {
1507 int zid;
1508 unsigned long total_skipped = 0;
1509
1510 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1511 if (!nr_skipped[zid])
1512 continue;
1513
1514 __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]);
1515 total_skipped += nr_skipped[zid];
1516 }
1517
1518 /*
1519 * Account skipped pages as a partial scan as the pgdat may be
1520 * close to unreclaimable. If the LRU list is empty, account
1521 * skipped pages as a full scan.
1522 */
1523 scan += list_empty(src) ? total_skipped : total_skipped >> 2;
1524
1525 list_splice(&pages_skipped, src);
1526 }
1527 *nr_scanned = scan;
1528 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, scan,
1529 nr_taken, mode, is_file_lru(lru));
1530 update_lru_sizes(lruvec, lru, nr_zone_taken);
1531 return nr_taken;
1532}
1533
1534/**
1535 * isolate_lru_page - tries to isolate a page from its LRU list
1536 * @page: page to isolate from its LRU list
1537 *
1538 * Isolates a @page from an LRU list, clears PageLRU and adjusts the
1539 * vmstat statistic corresponding to whatever LRU list the page was on.
1540 *
1541 * Returns 0 if the page was removed from an LRU list.
1542 * Returns -EBUSY if the page was not on an LRU list.
1543 *
1544 * The returned page will have PageLRU() cleared. If it was found on
1545 * the active list, it will have PageActive set. If it was found on
1546 * the unevictable list, it will have the PageUnevictable bit set. That flag
1547 * may need to be cleared by the caller before letting the page go.
1548 *
1549 * The vmstat statistic corresponding to the list on which the page was
1550 * found will be decremented.
1551 *
1552 * Restrictions:
1553 * (1) Must be called with an elevated refcount on the page. This is a
1554 * fundamentnal difference from isolate_lru_pages (which is called
1555 * without a stable reference).
1556 * (2) the lru_lock must not be held.
1557 * (3) interrupts must be enabled.
1558 */
1559int isolate_lru_page(struct page *page)
1560{
1561 int ret = -EBUSY;
1562
1563 VM_BUG_ON_PAGE(!page_count(page), page);
1564 WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
1565
1566 if (PageLRU(page)) {
1567 struct zone *zone = page_zone(page);
1568 struct lruvec *lruvec;
1569
1570 spin_lock_irq(zone_lru_lock(zone));
1571 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
1572 if (PageLRU(page)) {
1573 int lru = page_lru(page);
1574 get_page(page);
1575 ClearPageLRU(page);
1576 del_page_from_lru_list(page, lruvec, lru);
1577 ret = 0;
1578 }
1579 spin_unlock_irq(zone_lru_lock(zone));
1580 }
1581 return ret;
1582}
1583
1584/*
1585 * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
1586 * then get resheduled. When there are massive number of tasks doing page
1587 * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
1588 * the LRU list will go small and be scanned faster than necessary, leading to
1589 * unnecessary swapping, thrashing and OOM.
1590 */
1591static int too_many_isolated(struct pglist_data *pgdat, int file,
1592 struct scan_control *sc)
1593{
1594 unsigned long inactive, isolated;
1595
1596 if (current_is_kswapd())
1597 return 0;
1598
1599 if (!sane_reclaim(sc))
1600 return 0;
1601
1602 if (file) {
1603 inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
1604 isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
1605 } else {
1606 inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
1607 isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
1608 }
1609
1610 /*
1611 * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
1612 * won't get blocked by normal direct-reclaimers, forming a circular
1613 * deadlock.
1614 */
1615 if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
1616 inactive >>= 3;
1617
1618 return isolated > inactive;
1619}
1620
1621static noinline_for_stack void
1622putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
1623{
1624 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1625 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1626 LIST_HEAD(pages_to_free);
1627
1628 /*
1629 * Put back any unfreeable pages.
1630 */
1631 while (!list_empty(page_list)) {
1632 struct page *page = lru_to_page(page_list);
1633 int lru;
1634
1635 VM_BUG_ON_PAGE(PageLRU(page), page);
1636 list_del(&page->lru);
1637 if (unlikely(!page_evictable(page))) {
1638 spin_unlock_irq(&pgdat->lru_lock);
1639 putback_lru_page(page);
1640 spin_lock_irq(&pgdat->lru_lock);
1641 continue;
1642 }
1643
1644 lruvec = mem_cgroup_page_lruvec(page, pgdat);
1645
1646 SetPageLRU(page);
1647 lru = page_lru(page);
1648 add_page_to_lru_list(page, lruvec, lru);
1649
1650 if (is_active_lru(lru)) {
1651 int file = is_file_lru(lru);
1652 int numpages = hpage_nr_pages(page);
1653 reclaim_stat->recent_rotated[file] += numpages;
1654 }
1655 if (put_page_testzero(page)) {
1656 __ClearPageLRU(page);
1657 __ClearPageActive(page);
1658 del_page_from_lru_list(page, lruvec, lru);
1659
1660 if (unlikely(PageCompound(page))) {
1661 spin_unlock_irq(&pgdat->lru_lock);
1662 mem_cgroup_uncharge(page);
1663 (*get_compound_page_dtor(page))(page);
1664 spin_lock_irq(&pgdat->lru_lock);
1665 } else
1666 list_add(&page->lru, &pages_to_free);
1667 }
1668 }
1669
1670 /*
1671 * To save our caller's stack, now use input list for pages to free.
1672 */
1673 list_splice(&pages_to_free, page_list);
1674}
1675
1676/*
1677 * If a kernel thread (such as nfsd for loop-back mounts) services
1678 * a backing device by writing to the page cache it sets PF_LESS_THROTTLE.
1679 * In that case we should only throttle if the backing device it is
1680 * writing to is congested. In other cases it is safe to throttle.
1681 */
1682static int current_may_throttle(void)
1683{
1684 return !(current->flags & PF_LESS_THROTTLE) ||
1685 current->backing_dev_info == NULL ||
1686 bdi_write_congested(current->backing_dev_info);
1687}
1688
1689static bool inactive_reclaimable_pages(struct lruvec *lruvec,
1690 struct scan_control *sc, enum lru_list lru)
1691{
1692 int zid;
1693 struct zone *zone;
1694 int file = is_file_lru(lru);
1695 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1696
1697 if (!global_reclaim(sc))
1698 return true;
1699
1700 for (zid = sc->reclaim_idx; zid >= 0; zid--) {
1701 zone = &pgdat->node_zones[zid];
1702 if (!managed_zone(zone))
1703 continue;
1704
1705 if (zone_page_state_snapshot(zone, NR_ZONE_LRU_BASE +
1706 LRU_FILE * file) >= SWAP_CLUSTER_MAX)
1707 return true;
1708 }
1709
1710 return false;
1711}
1712
1713/*
1714 * shrink_inactive_list() is a helper for shrink_node(). It returns the number
1715 * of reclaimed pages
1716 */
1717static noinline_for_stack unsigned long
1718shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1719 struct scan_control *sc, enum lru_list lru)
1720{
1721 LIST_HEAD(page_list);
1722 unsigned long nr_scanned;
1723 unsigned long nr_reclaimed = 0;
1724 unsigned long nr_taken;
1725 unsigned long nr_dirty = 0;
1726 unsigned long nr_congested = 0;
1727 unsigned long nr_unqueued_dirty = 0;
1728 unsigned long nr_writeback = 0;
1729 unsigned long nr_immediate = 0;
1730 isolate_mode_t isolate_mode = 0;
1731 int file = is_file_lru(lru);
1732 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1733 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1734
1735 if (!inactive_reclaimable_pages(lruvec, sc, lru))
1736 return 0;
1737
1738 while (unlikely(too_many_isolated(pgdat, file, sc))) {
1739 congestion_wait(BLK_RW_ASYNC, HZ/10);
1740
1741 /* We are about to die and free our memory. Return now. */
1742 if (fatal_signal_pending(current))
1743 return SWAP_CLUSTER_MAX;
1744 }
1745
1746 lru_add_drain();
1747
1748 if (!sc->may_unmap)
1749 isolate_mode |= ISOLATE_UNMAPPED;
1750 if (!sc->may_writepage)
1751 isolate_mode |= ISOLATE_CLEAN;
1752
1753 spin_lock_irq(&pgdat->lru_lock);
1754
1755 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
1756 &nr_scanned, sc, isolate_mode, lru);
1757
1758 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
1759 reclaim_stat->recent_scanned[file] += nr_taken;
1760
1761 if (global_reclaim(sc)) {
1762 __mod_node_page_state(pgdat, NR_PAGES_SCANNED, nr_scanned);
1763 if (current_is_kswapd())
1764 __count_vm_events(PGSCAN_KSWAPD, nr_scanned);
1765 else
1766 __count_vm_events(PGSCAN_DIRECT, nr_scanned);
1767 }
1768 spin_unlock_irq(&pgdat->lru_lock);
1769
1770 if (nr_taken == 0)
1771 return 0;
1772
1773 nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, TTU_UNMAP,
1774 &nr_dirty, &nr_unqueued_dirty, &nr_congested,
1775 &nr_writeback, &nr_immediate,
1776 false);
1777
1778 spin_lock_irq(&pgdat->lru_lock);
1779
1780 if (global_reclaim(sc)) {
1781 if (current_is_kswapd())
1782 __count_vm_events(PGSTEAL_KSWAPD, nr_reclaimed);
1783 else
1784 __count_vm_events(PGSTEAL_DIRECT, nr_reclaimed);
1785 }
1786
1787 putback_inactive_pages(lruvec, &page_list);
1788
1789 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
1790
1791 spin_unlock_irq(&pgdat->lru_lock);
1792
1793 mem_cgroup_uncharge_list(&page_list);
1794 free_hot_cold_page_list(&page_list, true);
1795
1796 /*
1797 * If reclaim is isolating dirty pages under writeback, it implies
1798 * that the long-lived page allocation rate is exceeding the page
1799 * laundering rate. Either the global limits are not being effective
1800 * at throttling processes due to the page distribution throughout
1801 * zones or there is heavy usage of a slow backing device. The
1802 * only option is to throttle from reclaim context which is not ideal
1803 * as there is no guarantee the dirtying process is throttled in the
1804 * same way balance_dirty_pages() manages.
1805 *
1806 * Once a zone is flagged ZONE_WRITEBACK, kswapd will count the number
1807 * of pages under pages flagged for immediate reclaim and stall if any
1808 * are encountered in the nr_immediate check below.
1809 */
1810 if (nr_writeback && nr_writeback == nr_taken)
1811 set_bit(PGDAT_WRITEBACK, &pgdat->flags);
1812
1813 /*
1814 * Legacy memcg will stall in page writeback so avoid forcibly
1815 * stalling here.
1816 */
1817 if (sane_reclaim(sc)) {
1818 /*
1819 * Tag a zone as congested if all the dirty pages scanned were
1820 * backed by a congested BDI and wait_iff_congested will stall.
1821 */
1822 if (nr_dirty && nr_dirty == nr_congested)
1823 set_bit(PGDAT_CONGESTED, &pgdat->flags);
1824
1825 /*
1826 * If dirty pages are scanned that are not queued for IO, it
1827 * implies that flushers are not keeping up. In this case, flag
1828 * the pgdat PGDAT_DIRTY and kswapd will start writing pages from
1829 * reclaim context.
1830 */
1831 if (nr_unqueued_dirty == nr_taken)
1832 set_bit(PGDAT_DIRTY, &pgdat->flags);
1833
1834 /*
1835 * If kswapd scans pages marked marked for immediate
1836 * reclaim and under writeback (nr_immediate), it implies
1837 * that pages are cycling through the LRU faster than
1838 * they are written so also forcibly stall.
1839 */
1840 if (nr_immediate && current_may_throttle())
1841 congestion_wait(BLK_RW_ASYNC, HZ/10);
1842 }
1843
1844 /*
1845 * Stall direct reclaim for IO completions if underlying BDIs or zone
1846 * is congested. Allow kswapd to continue until it starts encountering
1847 * unqueued dirty pages or cycling through the LRU too quickly.
1848 */
1849 if (!sc->hibernation_mode && !current_is_kswapd() &&
1850 current_may_throttle())
1851 wait_iff_congested(pgdat, BLK_RW_ASYNC, HZ/10);
1852
1853 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
1854 nr_scanned, nr_reclaimed,
1855 sc->priority, file);
1856 return nr_reclaimed;
1857}
1858
1859/*
1860 * This moves pages from the active list to the inactive list.
1861 *
1862 * We move them the other way if the page is referenced by one or more
1863 * processes, from rmap.
1864 *
1865 * If the pages are mostly unmapped, the processing is fast and it is
1866 * appropriate to hold zone_lru_lock across the whole operation. But if
1867 * the pages are mapped, the processing is slow (page_referenced()) so we
1868 * should drop zone_lru_lock around each page. It's impossible to balance
1869 * this, so instead we remove the pages from the LRU while processing them.
1870 * It is safe to rely on PG_active against the non-LRU pages in here because
1871 * nobody will play with that bit on a non-LRU page.
1872 *
1873 * The downside is that we have to touch page->_refcount against each page.
1874 * But we had to alter page->flags anyway.
1875 */
1876
1877static void move_active_pages_to_lru(struct lruvec *lruvec,
1878 struct list_head *list,
1879 struct list_head *pages_to_free,
1880 enum lru_list lru)
1881{
1882 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1883 unsigned long pgmoved = 0;
1884 struct page *page;
1885 int nr_pages;
1886
1887 while (!list_empty(list)) {
1888 page = lru_to_page(list);
1889 lruvec = mem_cgroup_page_lruvec(page, pgdat);
1890
1891 VM_BUG_ON_PAGE(PageLRU(page), page);
1892 SetPageLRU(page);
1893
1894 nr_pages = hpage_nr_pages(page);
1895 update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
1896 list_move(&page->lru, &lruvec->lists[lru]);
1897 pgmoved += nr_pages;
1898
1899 if (put_page_testzero(page)) {
1900 __ClearPageLRU(page);
1901 __ClearPageActive(page);
1902 del_page_from_lru_list(page, lruvec, lru);
1903
1904 if (unlikely(PageCompound(page))) {
1905 spin_unlock_irq(&pgdat->lru_lock);
1906 mem_cgroup_uncharge(page);
1907 (*get_compound_page_dtor(page))(page);
1908 spin_lock_irq(&pgdat->lru_lock);
1909 } else
1910 list_add(&page->lru, pages_to_free);
1911 }
1912 }
1913
1914 if (!is_active_lru(lru))
1915 __count_vm_events(PGDEACTIVATE, pgmoved);
1916}
1917
1918static void shrink_active_list(unsigned long nr_to_scan,
1919 struct lruvec *lruvec,
1920 struct scan_control *sc,
1921 enum lru_list lru)
1922{
1923 unsigned long nr_taken;
1924 unsigned long nr_scanned;
1925 unsigned long vm_flags;
1926 LIST_HEAD(l_hold); /* The pages which were snipped off */
1927 LIST_HEAD(l_active);
1928 LIST_HEAD(l_inactive);
1929 struct page *page;
1930 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1931 unsigned long nr_rotated = 0;
1932 isolate_mode_t isolate_mode = 0;
1933 int file = is_file_lru(lru);
1934 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1935
1936 lru_add_drain();
1937
1938 if (!sc->may_unmap)
1939 isolate_mode |= ISOLATE_UNMAPPED;
1940 if (!sc->may_writepage)
1941 isolate_mode |= ISOLATE_CLEAN;
1942
1943 spin_lock_irq(&pgdat->lru_lock);
1944
1945 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
1946 &nr_scanned, sc, isolate_mode, lru);
1947
1948 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
1949 reclaim_stat->recent_scanned[file] += nr_taken;
1950
1951 if (global_reclaim(sc))
1952 __mod_node_page_state(pgdat, NR_PAGES_SCANNED, nr_scanned);
1953 __count_vm_events(PGREFILL, nr_scanned);
1954
1955 spin_unlock_irq(&pgdat->lru_lock);
1956
1957 while (!list_empty(&l_hold)) {
1958 cond_resched();
1959 page = lru_to_page(&l_hold);
1960 list_del(&page->lru);
1961
1962 if (unlikely(!page_evictable(page))) {
1963 putback_lru_page(page);
1964 continue;
1965 }
1966
1967 if (unlikely(buffer_heads_over_limit)) {
1968 if (page_has_private(page) && trylock_page(page)) {
1969 if (page_has_private(page))
1970 try_to_release_page(page, 0);
1971 unlock_page(page);
1972 }
1973 }
1974
1975 if (page_referenced(page, 0, sc->target_mem_cgroup,
1976 &vm_flags)) {
1977 nr_rotated += hpage_nr_pages(page);
1978 /*
1979 * Identify referenced, file-backed active pages and
1980 * give them one more trip around the active list. So
1981 * that executable code get better chances to stay in
1982 * memory under moderate memory pressure. Anon pages
1983 * are not likely to be evicted by use-once streaming
1984 * IO, plus JVM can create lots of anon VM_EXEC pages,
1985 * so we ignore them here.
1986 */
1987 if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
1988 list_add(&page->lru, &l_active);
1989 continue;
1990 }
1991 }
1992
1993 ClearPageActive(page); /* we are de-activating */
1994 list_add(&page->lru, &l_inactive);
1995 }
1996
1997 /*
1998 * Move pages back to the lru list.
1999 */
2000 spin_lock_irq(&pgdat->lru_lock);
2001 /*
2002 * Count referenced pages from currently used mappings as rotated,
2003 * even though only some of them are actually re-activated. This
2004 * helps balance scan pressure between file and anonymous pages in
2005 * get_scan_count.
2006 */
2007 reclaim_stat->recent_rotated[file] += nr_rotated;
2008
2009 move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
2010 move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
2011 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
2012 spin_unlock_irq(&pgdat->lru_lock);
2013
2014 mem_cgroup_uncharge_list(&l_hold);
2015 free_hot_cold_page_list(&l_hold, true);
2016}
2017
2018/*
2019 * The inactive anon list should be small enough that the VM never has
2020 * to do too much work.
2021 *
2022 * The inactive file list should be small enough to leave most memory
2023 * to the established workingset on the scan-resistant active list,
2024 * but large enough to avoid thrashing the aggregate readahead window.
2025 *
2026 * Both inactive lists should also be large enough that each inactive
2027 * page has a chance to be referenced again before it is reclaimed.
2028 *
2029 * The inactive_ratio is the target ratio of ACTIVE to INACTIVE pages
2030 * on this LRU, maintained by the pageout code. A zone->inactive_ratio
2031 * of 3 means 3:1 or 25% of the pages are kept on the inactive list.
2032 *
2033 * total target max
2034 * memory ratio inactive
2035 * -------------------------------------
2036 * 10MB 1 5MB
2037 * 100MB 1 50MB
2038 * 1GB 3 250MB
2039 * 10GB 10 0.9GB
2040 * 100GB 31 3GB
2041 * 1TB 101 10GB
2042 * 10TB 320 32GB
2043 */
2044static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
2045 struct scan_control *sc)
2046{
2047 unsigned long inactive_ratio;
2048 unsigned long inactive, active;
2049 enum lru_list inactive_lru = file * LRU_FILE;
2050 enum lru_list active_lru = file * LRU_FILE + LRU_ACTIVE;
2051 unsigned long gb;
2052
2053 /*
2054 * If we don't have swap space, anonymous page deactivation
2055 * is pointless.
2056 */
2057 if (!file && !total_swap_pages)
2058 return false;
2059
2060 inactive = lruvec_lru_size(lruvec, inactive_lru, sc->reclaim_idx);
2061 active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx);
2062
2063 gb = (inactive + active) >> (30 - PAGE_SHIFT);
2064 if (gb)
2065 inactive_ratio = int_sqrt(10 * gb);
2066 else
2067 inactive_ratio = 1;
2068
2069 return inactive * inactive_ratio < active;
2070}
2071
2072static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
2073 struct lruvec *lruvec, struct scan_control *sc)
2074{
2075 if (is_active_lru(lru)) {
2076 if (inactive_list_is_low(lruvec, is_file_lru(lru), sc))
2077 shrink_active_list(nr_to_scan, lruvec, sc, lru);
2078 return 0;
2079 }
2080
2081 return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
2082}
2083
2084enum scan_balance {
2085 SCAN_EQUAL,
2086 SCAN_FRACT,
2087 SCAN_ANON,
2088 SCAN_FILE,
2089};
2090
2091/*
2092 * Determine how aggressively the anon and file LRU lists should be
2093 * scanned. The relative value of each set of LRU lists is determined
2094 * by looking at the fraction of the pages scanned we did rotate back
2095 * onto the active list instead of evict.
2096 *
2097 * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
2098 * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
2099 */
2100static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
2101 struct scan_control *sc, unsigned long *nr,
2102 unsigned long *lru_pages)
2103{
2104 int swappiness = mem_cgroup_swappiness(memcg);
2105 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
2106 u64 fraction[2];
2107 u64 denominator = 0; /* gcc */
2108 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2109 unsigned long anon_prio, file_prio;
2110 enum scan_balance scan_balance;
2111 unsigned long anon, file;
2112 bool force_scan = false;
2113 unsigned long ap, fp;
2114 enum lru_list lru;
2115 bool some_scanned;
2116 int pass;
2117
2118 /*
2119 * If the zone or memcg is small, nr[l] can be 0. This
2120 * results in no scanning on this priority and a potential
2121 * priority drop. Global direct reclaim can go to the next
2122 * zone and tends to have no problems. Global kswapd is for
2123 * zone balancing and it needs to scan a minimum amount. When
2124 * reclaiming for a memcg, a priority drop can cause high
2125 * latencies, so it's better to scan a minimum amount there as
2126 * well.
2127 */
2128 if (current_is_kswapd()) {
2129 if (!pgdat_reclaimable(pgdat))
2130 force_scan = true;
2131 if (!mem_cgroup_online(memcg))
2132 force_scan = true;
2133 }
2134 if (!global_reclaim(sc))
2135 force_scan = true;
2136
2137 /* If we have no swap space, do not bother scanning anon pages. */
2138 if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) {
2139 scan_balance = SCAN_FILE;
2140 goto out;
2141 }
2142
2143 /*
2144 * Global reclaim will swap to prevent OOM even with no
2145 * swappiness, but memcg users want to use this knob to
2146 * disable swapping for individual groups completely when
2147 * using the memory controller's swap limit feature would be
2148 * too expensive.
2149 */
2150 if (!global_reclaim(sc) && !swappiness) {
2151 scan_balance = SCAN_FILE;
2152 goto out;
2153 }
2154
2155 /*
2156 * Do not apply any pressure balancing cleverness when the
2157 * system is close to OOM, scan both anon and file equally
2158 * (unless the swappiness setting disagrees with swapping).
2159 */
2160 if (!sc->priority && swappiness) {
2161 scan_balance = SCAN_EQUAL;
2162 goto out;
2163 }
2164
2165 /*
2166 * Prevent the reclaimer from falling into the cache trap: as
2167 * cache pages start out inactive, every cache fault will tip
2168 * the scan balance towards the file LRU. And as the file LRU
2169 * shrinks, so does the window for rotation from references.
2170 * This means we have a runaway feedback loop where a tiny
2171 * thrashing file LRU becomes infinitely more attractive than
2172 * anon pages. Try to detect this based on file LRU size.
2173 */
2174 if (global_reclaim(sc)) {
2175 unsigned long pgdatfile;
2176 unsigned long pgdatfree;
2177 int z;
2178 unsigned long total_high_wmark = 0;
2179
2180 pgdatfree = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
2181 pgdatfile = node_page_state(pgdat, NR_ACTIVE_FILE) +
2182 node_page_state(pgdat, NR_INACTIVE_FILE);
2183
2184 for (z = 0; z < MAX_NR_ZONES; z++) {
2185 struct zone *zone = &pgdat->node_zones[z];
2186 if (!managed_zone(zone))
2187 continue;
2188
2189 total_high_wmark += high_wmark_pages(zone);
2190 }
2191
2192 if (unlikely(pgdatfile + pgdatfree <= total_high_wmark)) {
2193 scan_balance = SCAN_ANON;
2194 goto out;
2195 }
2196 }
2197
2198 /*
2199 * If there is enough inactive page cache, i.e. if the size of the
2200 * inactive list is greater than that of the active list *and* the
2201 * inactive list actually has some pages to scan on this priority, we
2202 * do not reclaim anything from the anonymous working set right now.
2203 * Without the second condition we could end up never scanning an
2204 * lruvec even if it has plenty of old anonymous pages unless the
2205 * system is under heavy pressure.
2206 */
2207 if (!inactive_list_is_low(lruvec, true, sc) &&
2208 lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, sc->reclaim_idx) >> sc->priority) {
2209 scan_balance = SCAN_FILE;
2210 goto out;
2211 }
2212
2213 scan_balance = SCAN_FRACT;
2214
2215 /*
2216 * With swappiness at 100, anonymous and file have the same priority.
2217 * This scanning priority is essentially the inverse of IO cost.
2218 */
2219 anon_prio = swappiness;
2220 file_prio = 200 - anon_prio;
2221
2222 /*
2223 * OK, so we have swap space and a fair amount of page cache
2224 * pages. We use the recently rotated / recently scanned
2225 * ratios to determine how valuable each cache is.
2226 *
2227 * Because workloads change over time (and to avoid overflow)
2228 * we keep these statistics as a floating average, which ends
2229 * up weighing recent references more than old ones.
2230 *
2231 * anon in [0], file in [1]
2232 */
2233
2234 anon = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, MAX_NR_ZONES) +
2235 lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, MAX_NR_ZONES);
2236 file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES) +
2237 lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, MAX_NR_ZONES);
2238
2239 spin_lock_irq(&pgdat->lru_lock);
2240 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
2241 reclaim_stat->recent_scanned[0] /= 2;
2242 reclaim_stat->recent_rotated[0] /= 2;
2243 }
2244
2245 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
2246 reclaim_stat->recent_scanned[1] /= 2;
2247 reclaim_stat->recent_rotated[1] /= 2;
2248 }
2249
2250 /*
2251 * The amount of pressure on anon vs file pages is inversely
2252 * proportional to the fraction of recently scanned pages on
2253 * each list that were recently referenced and in active use.
2254 */
2255 ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
2256 ap /= reclaim_stat->recent_rotated[0] + 1;
2257
2258 fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
2259 fp /= reclaim_stat->recent_rotated[1] + 1;
2260 spin_unlock_irq(&pgdat->lru_lock);
2261
2262 fraction[0] = ap;
2263 fraction[1] = fp;
2264 denominator = ap + fp + 1;
2265out:
2266 some_scanned = false;
2267 /* Only use force_scan on second pass. */
2268 for (pass = 0; !some_scanned && pass < 2; pass++) {
2269 *lru_pages = 0;
2270 for_each_evictable_lru(lru) {
2271 int file = is_file_lru(lru);
2272 unsigned long size;
2273 unsigned long scan;
2274
2275 size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
2276 scan = size >> sc->priority;
2277
2278 if (!scan && pass && force_scan)
2279 scan = min(size, SWAP_CLUSTER_MAX);
2280
2281 switch (scan_balance) {
2282 case SCAN_EQUAL:
2283 /* Scan lists relative to size */
2284 break;
2285 case SCAN_FRACT:
2286 /*
2287 * Scan types proportional to swappiness and
2288 * their relative recent reclaim efficiency.
2289 */
2290 scan = div64_u64(scan * fraction[file],
2291 denominator);
2292 break;
2293 case SCAN_FILE:
2294 case SCAN_ANON:
2295 /* Scan one type exclusively */
2296 if ((scan_balance == SCAN_FILE) != file) {
2297 size = 0;
2298 scan = 0;
2299 }
2300 break;
2301 default:
2302 /* Look ma, no brain */
2303 BUG();
2304 }
2305
2306 *lru_pages += size;
2307 nr[lru] = scan;
2308
2309 /*
2310 * Skip the second pass and don't force_scan,
2311 * if we found something to scan.
2312 */
2313 some_scanned |= !!scan;
2314 }
2315 }
2316}
2317
2318/*
2319 * This is a basic per-node page freer. Used by both kswapd and direct reclaim.
2320 */
2321static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memcg,
2322 struct scan_control *sc, unsigned long *lru_pages)
2323{
2324 struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
2325 unsigned long nr[NR_LRU_LISTS];
2326 unsigned long targets[NR_LRU_LISTS];
2327 unsigned long nr_to_scan;
2328 enum lru_list lru;
2329 unsigned long nr_reclaimed = 0;
2330 unsigned long nr_to_reclaim = sc->nr_to_reclaim;
2331 struct blk_plug plug;
2332 bool scan_adjusted;
2333
2334 get_scan_count(lruvec, memcg, sc, nr, lru_pages);
2335
2336 /* Record the original scan target for proportional adjustments later */
2337 memcpy(targets, nr, sizeof(nr));
2338
2339 /*
2340 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
2341 * event that can occur when there is little memory pressure e.g.
2342 * multiple streaming readers/writers. Hence, we do not abort scanning
2343 * when the requested number of pages are reclaimed when scanning at
2344 * DEF_PRIORITY on the assumption that the fact we are direct
2345 * reclaiming implies that kswapd is not keeping up and it is best to
2346 * do a batch of work at once. For memcg reclaim one check is made to
2347 * abort proportional reclaim if either the file or anon lru has already
2348 * dropped to zero at the first pass.
2349 */
2350 scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
2351 sc->priority == DEF_PRIORITY);
2352
2353 blk_start_plug(&plug);
2354 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
2355 nr[LRU_INACTIVE_FILE]) {
2356 unsigned long nr_anon, nr_file, percentage;
2357 unsigned long nr_scanned;
2358
2359 for_each_evictable_lru(lru) {
2360 if (nr[lru]) {
2361 nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
2362 nr[lru] -= nr_to_scan;
2363
2364 nr_reclaimed += shrink_list(lru, nr_to_scan,
2365 lruvec, sc);
2366 }
2367 }
2368
2369 cond_resched();
2370
2371 if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
2372 continue;
2373
2374 /*
2375 * For kswapd and memcg, reclaim at least the number of pages
2376 * requested. Ensure that the anon and file LRUs are scanned
2377 * proportionally what was requested by get_scan_count(). We
2378 * stop reclaiming one LRU and reduce the amount scanning
2379 * proportional to the original scan target.
2380 */
2381 nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
2382 nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
2383
2384 /*
2385 * It's just vindictive to attack the larger once the smaller
2386 * has gone to zero. And given the way we stop scanning the
2387 * smaller below, this makes sure that we only make one nudge
2388 * towards proportionality once we've got nr_to_reclaim.
2389 */
2390 if (!nr_file || !nr_anon)
2391 break;
2392
2393 if (nr_file > nr_anon) {
2394 unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
2395 targets[LRU_ACTIVE_ANON] + 1;
2396 lru = LRU_BASE;
2397 percentage = nr_anon * 100 / scan_target;
2398 } else {
2399 unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
2400 targets[LRU_ACTIVE_FILE] + 1;
2401 lru = LRU_FILE;
2402 percentage = nr_file * 100 / scan_target;
2403 }
2404
2405 /* Stop scanning the smaller of the LRU */
2406 nr[lru] = 0;
2407 nr[lru + LRU_ACTIVE] = 0;
2408
2409 /*
2410 * Recalculate the other LRU scan count based on its original
2411 * scan target and the percentage scanning already complete
2412 */
2413 lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
2414 nr_scanned = targets[lru] - nr[lru];
2415 nr[lru] = targets[lru] * (100 - percentage) / 100;
2416 nr[lru] -= min(nr[lru], nr_scanned);
2417
2418 lru += LRU_ACTIVE;
2419 nr_scanned = targets[lru] - nr[lru];
2420 nr[lru] = targets[lru] * (100 - percentage) / 100;
2421 nr[lru] -= min(nr[lru], nr_scanned);
2422
2423 scan_adjusted = true;
2424 }
2425 blk_finish_plug(&plug);
2426 sc->nr_reclaimed += nr_reclaimed;
2427
2428 /*
2429 * Even if we did not try to evict anon pages at all, we want to
2430 * rebalance the anon lru active/inactive ratio.
2431 */
2432 if (inactive_list_is_low(lruvec, false, sc))
2433 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
2434 sc, LRU_ACTIVE_ANON);
2435}
2436
2437/* Use reclaim/compaction for costly allocs or under memory pressure */
2438static bool in_reclaim_compaction(struct scan_control *sc)
2439{
2440 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
2441 (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
2442 sc->priority < DEF_PRIORITY - 2))
2443 return true;
2444
2445 return false;
2446}
2447
2448/*
2449 * Reclaim/compaction is used for high-order allocation requests. It reclaims
2450 * order-0 pages before compacting the zone. should_continue_reclaim() returns
2451 * true if more pages should be reclaimed such that when the page allocator
2452 * calls try_to_compact_zone() that it will have enough free pages to succeed.
2453 * It will give up earlier than that if there is difficulty reclaiming pages.
2454 */
2455static inline bool should_continue_reclaim(struct pglist_data *pgdat,
2456 unsigned long nr_reclaimed,
2457 unsigned long nr_scanned,
2458 struct scan_control *sc)
2459{
2460 unsigned long pages_for_compaction;
2461 unsigned long inactive_lru_pages;
2462 int z;
2463
2464 /* If not in reclaim/compaction mode, stop */
2465 if (!in_reclaim_compaction(sc))
2466 return false;
2467
2468 /* Consider stopping depending on scan and reclaim activity */
2469 if (sc->gfp_mask & __GFP_REPEAT) {
2470 /*
2471 * For __GFP_REPEAT allocations, stop reclaiming if the
2472 * full LRU list has been scanned and we are still failing
2473 * to reclaim pages. This full LRU scan is potentially
2474 * expensive but a __GFP_REPEAT caller really wants to succeed
2475 */
2476 if (!nr_reclaimed && !nr_scanned)
2477 return false;
2478 } else {
2479 /*
2480 * For non-__GFP_REPEAT allocations which can presumably
2481 * fail without consequence, stop if we failed to reclaim
2482 * any pages from the last SWAP_CLUSTER_MAX number of
2483 * pages that were scanned. This will return to the
2484 * caller faster at the risk reclaim/compaction and
2485 * the resulting allocation attempt fails
2486 */
2487 if (!nr_reclaimed)
2488 return false;
2489 }
2490
2491 /*
2492 * If we have not reclaimed enough pages for compaction and the
2493 * inactive lists are large enough, continue reclaiming
2494 */
2495 pages_for_compaction = compact_gap(sc->order);
2496 inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
2497 if (get_nr_swap_pages() > 0)
2498 inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
2499 if (sc->nr_reclaimed < pages_for_compaction &&
2500 inactive_lru_pages > pages_for_compaction)
2501 return true;
2502
2503 /* If compaction would go ahead or the allocation would succeed, stop */
2504 for (z = 0; z <= sc->reclaim_idx; z++) {
2505 struct zone *zone = &pgdat->node_zones[z];
2506 if (!managed_zone(zone))
2507 continue;
2508
2509 switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
2510 case COMPACT_SUCCESS:
2511 case COMPACT_CONTINUE:
2512 return false;
2513 default:
2514 /* check next zone */
2515 ;
2516 }
2517 }
2518 return true;
2519}
2520
2521static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
2522{
2523 struct reclaim_state *reclaim_state = current->reclaim_state;
2524 unsigned long nr_reclaimed, nr_scanned;
2525 bool reclaimable = false;
2526
2527 do {
2528 struct mem_cgroup *root = sc->target_mem_cgroup;
2529 struct mem_cgroup_reclaim_cookie reclaim = {
2530 .pgdat = pgdat,
2531 .priority = sc->priority,
2532 };
2533 unsigned long node_lru_pages = 0;
2534 struct mem_cgroup *memcg;
2535
2536 nr_reclaimed = sc->nr_reclaimed;
2537 nr_scanned = sc->nr_scanned;
2538
2539 memcg = mem_cgroup_iter(root, NULL, &reclaim);
2540 do {
2541 unsigned long lru_pages;
2542 unsigned long reclaimed;
2543 unsigned long scanned;
2544
2545 if (mem_cgroup_low(root, memcg)) {
2546 if (!sc->may_thrash)
2547 continue;
2548 mem_cgroup_events(memcg, MEMCG_LOW, 1);
2549 }
2550
2551 reclaimed = sc->nr_reclaimed;
2552 scanned = sc->nr_scanned;
2553
2554 shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
2555 node_lru_pages += lru_pages;
2556
2557 if (memcg)
2558 shrink_slab(sc->gfp_mask, pgdat->node_id,
2559 memcg, sc->nr_scanned - scanned,
2560 lru_pages);
2561
2562 /* Record the group's reclaim efficiency */
2563 vmpressure(sc->gfp_mask, memcg, false,
2564 sc->nr_scanned - scanned,
2565 sc->nr_reclaimed - reclaimed);
2566
2567 /*
2568 * Direct reclaim and kswapd have to scan all memory
2569 * cgroups to fulfill the overall scan target for the
2570 * node.
2571 *
2572 * Limit reclaim, on the other hand, only cares about
2573 * nr_to_reclaim pages to be reclaimed and it will
2574 * retry with decreasing priority if one round over the
2575 * whole hierarchy is not sufficient.
2576 */
2577 if (!global_reclaim(sc) &&
2578 sc->nr_reclaimed >= sc->nr_to_reclaim) {
2579 mem_cgroup_iter_break(root, memcg);
2580 break;
2581 }
2582 } while ((memcg = mem_cgroup_iter(root, memcg, &reclaim)));
2583
2584 /*
2585 * Shrink the slab caches in the same proportion that
2586 * the eligible LRU pages were scanned.
2587 */
2588 if (global_reclaim(sc))
2589 shrink_slab(sc->gfp_mask, pgdat->node_id, NULL,
2590 sc->nr_scanned - nr_scanned,
2591 node_lru_pages);
2592
2593 if (reclaim_state) {
2594 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2595 reclaim_state->reclaimed_slab = 0;
2596 }
2597
2598 /* Record the subtree's reclaim efficiency */
2599 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
2600 sc->nr_scanned - nr_scanned,
2601 sc->nr_reclaimed - nr_reclaimed);
2602
2603 if (sc->nr_reclaimed - nr_reclaimed)
2604 reclaimable = true;
2605
2606 } while (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
2607 sc->nr_scanned - nr_scanned, sc));
2608
2609 return reclaimable;
2610}
2611
2612/*
2613 * Returns true if compaction should go ahead for a costly-order request, or
2614 * the allocation would already succeed without compaction. Return false if we
2615 * should reclaim first.
2616 */
2617static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
2618{
2619 unsigned long watermark;
2620 enum compact_result suitable;
2621
2622 suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx);
2623 if (suitable == COMPACT_SUCCESS)
2624 /* Allocation should succeed already. Don't reclaim. */
2625 return true;
2626 if (suitable == COMPACT_SKIPPED)
2627 /* Compaction cannot yet proceed. Do reclaim. */
2628 return false;
2629
2630 /*
2631 * Compaction is already possible, but it takes time to run and there
2632 * are potentially other callers using the pages just freed. So proceed
2633 * with reclaim to make a buffer of free pages available to give
2634 * compaction a reasonable chance of completing and allocating the page.
2635 * Note that we won't actually reclaim the whole buffer in one attempt
2636 * as the target watermark in should_continue_reclaim() is lower. But if
2637 * we are already above the high+gap watermark, don't reclaim at all.
2638 */
2639 watermark = high_wmark_pages(zone) + compact_gap(sc->order);
2640
2641 return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx);
2642}
2643
2644/*
2645 * This is the direct reclaim path, for page-allocating processes. We only
2646 * try to reclaim pages from zones which will satisfy the caller's allocation
2647 * request.
2648 *
2649 * If a zone is deemed to be full of pinned pages then just give it a light
2650 * scan then give up on it.
2651 */
2652static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2653{
2654 struct zoneref *z;
2655 struct zone *zone;
2656 unsigned long nr_soft_reclaimed;
2657 unsigned long nr_soft_scanned;
2658 gfp_t orig_mask;
2659 pg_data_t *last_pgdat = NULL;
2660
2661 /*
2662 * If the number of buffer_heads in the machine exceeds the maximum
2663 * allowed level, force direct reclaim to scan the highmem zone as
2664 * highmem pages could be pinning lowmem pages storing buffer_heads
2665 */
2666 orig_mask = sc->gfp_mask;
2667 if (buffer_heads_over_limit) {
2668 sc->gfp_mask |= __GFP_HIGHMEM;
2669 sc->reclaim_idx = gfp_zone(sc->gfp_mask);
2670 }
2671
2672 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2673 sc->reclaim_idx, sc->nodemask) {
2674 /*
2675 * Take care memory controller reclaiming has small influence
2676 * to global LRU.
2677 */
2678 if (global_reclaim(sc)) {
2679 if (!cpuset_zone_allowed(zone,
2680 GFP_KERNEL | __GFP_HARDWALL))
2681 continue;
2682
2683 if (sc->priority != DEF_PRIORITY &&
2684 !pgdat_reclaimable(zone->zone_pgdat))
2685 continue; /* Let kswapd poll it */
2686
2687 /*
2688 * If we already have plenty of memory free for
2689 * compaction in this zone, don't free any more.
2690 * Even though compaction is invoked for any
2691 * non-zero order, only frequent costly order
2692 * reclamation is disruptive enough to become a
2693 * noticeable problem, like transparent huge
2694 * page allocations.
2695 */
2696 if (IS_ENABLED(CONFIG_COMPACTION) &&
2697 sc->order > PAGE_ALLOC_COSTLY_ORDER &&
2698 compaction_ready(zone, sc)) {
2699 sc->compaction_ready = true;
2700 continue;
2701 }
2702
2703 /*
2704 * Shrink each node in the zonelist once. If the
2705 * zonelist is ordered by zone (not the default) then a
2706 * node may be shrunk multiple times but in that case
2707 * the user prefers lower zones being preserved.
2708 */
2709 if (zone->zone_pgdat == last_pgdat)
2710 continue;
2711
2712 /*
2713 * This steals pages from memory cgroups over softlimit
2714 * and returns the number of reclaimed pages and
2715 * scanned pages. This works for global memory pressure
2716 * and balancing, not for a memcg's limit.
2717 */
2718 nr_soft_scanned = 0;
2719 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat,
2720 sc->order, sc->gfp_mask,
2721 &nr_soft_scanned);
2722 sc->nr_reclaimed += nr_soft_reclaimed;
2723 sc->nr_scanned += nr_soft_scanned;
2724 /* need some check for avoid more shrink_zone() */
2725 }
2726
2727 /* See comment about same check for global reclaim above */
2728 if (zone->zone_pgdat == last_pgdat)
2729 continue;
2730 last_pgdat = zone->zone_pgdat;
2731 shrink_node(zone->zone_pgdat, sc);
2732 }
2733
2734 /*
2735 * Restore to original mask to avoid the impact on the caller if we
2736 * promoted it to __GFP_HIGHMEM.
2737 */
2738 sc->gfp_mask = orig_mask;
2739}
2740
2741/*
2742 * This is the main entry point to direct page reclaim.
2743 *
2744 * If a full scan of the inactive list fails to free enough memory then we
2745 * are "out of memory" and something needs to be killed.
2746 *
2747 * If the caller is !__GFP_FS then the probability of a failure is reasonably
2748 * high - the zone may be full of dirty or under-writeback pages, which this
2749 * caller can't do much about. We kick the writeback threads and take explicit
2750 * naps in the hope that some of these pages can be written. But if the
2751 * allocating task holds filesystem locks which prevent writeout this might not
2752 * work, and the allocation attempt will fail.
2753 *
2754 * returns: 0, if no pages reclaimed
2755 * else, the number of pages reclaimed
2756 */
2757static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2758 struct scan_control *sc)
2759{
2760 int initial_priority = sc->priority;
2761 unsigned long total_scanned = 0;
2762 unsigned long writeback_threshold;
2763retry:
2764 delayacct_freepages_start();
2765
2766 if (global_reclaim(sc))
2767 __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1);
2768
2769 do {
2770 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
2771 sc->priority);
2772 sc->nr_scanned = 0;
2773 shrink_zones(zonelist, sc);
2774
2775 total_scanned += sc->nr_scanned;
2776 if (sc->nr_reclaimed >= sc->nr_to_reclaim)
2777 break;
2778
2779 if (sc->compaction_ready)
2780 break;
2781
2782 /*
2783 * If we're getting trouble reclaiming, start doing
2784 * writepage even in laptop mode.
2785 */
2786 if (sc->priority < DEF_PRIORITY - 2)
2787 sc->may_writepage = 1;
2788
2789 /*
2790 * Try to write back as many pages as we just scanned. This
2791 * tends to cause slow streaming writers to write data to the
2792 * disk smoothly, at the dirtying rate, which is nice. But
2793 * that's undesirable in laptop mode, where we *want* lumpy
2794 * writeout. So in laptop mode, write out the whole world.
2795 */
2796 writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
2797 if (total_scanned > writeback_threshold) {
2798 wakeup_flusher_threads(laptop_mode ? 0 : total_scanned,
2799 WB_REASON_TRY_TO_FREE_PAGES);
2800 sc->may_writepage = 1;
2801 }
2802 } while (--sc->priority >= 0);
2803
2804 delayacct_freepages_end();
2805
2806 if (sc->nr_reclaimed)
2807 return sc->nr_reclaimed;
2808
2809 /* Aborted reclaim to try compaction? don't OOM, then */
2810 if (sc->compaction_ready)
2811 return 1;
2812
2813 /* Untapped cgroup reserves? Don't OOM, retry. */
2814 if (!sc->may_thrash) {
2815 sc->priority = initial_priority;
2816 sc->may_thrash = 1;
2817 goto retry;
2818 }
2819
2820 return 0;
2821}
2822
2823static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
2824{
2825 struct zone *zone;
2826 unsigned long pfmemalloc_reserve = 0;
2827 unsigned long free_pages = 0;
2828 int i;
2829 bool wmark_ok;
2830
2831 for (i = 0; i <= ZONE_NORMAL; i++) {
2832 zone = &pgdat->node_zones[i];
2833 if (!managed_zone(zone) ||
2834 pgdat_reclaimable_pages(pgdat) == 0)
2835 continue;
2836
2837 pfmemalloc_reserve += min_wmark_pages(zone);
2838 free_pages += zone_page_state(zone, NR_FREE_PAGES);
2839 }
2840
2841 /* If there are no reserves (unexpected config) then do not throttle */
2842 if (!pfmemalloc_reserve)
2843 return true;
2844
2845 wmark_ok = free_pages > pfmemalloc_reserve / 2;
2846
2847 /* kswapd must be awake if processes are being throttled */
2848 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
2849 pgdat->kswapd_classzone_idx = min(pgdat->kswapd_classzone_idx,
2850 (enum zone_type)ZONE_NORMAL);
2851 wake_up_interruptible(&pgdat->kswapd_wait);
2852 }
2853
2854 return wmark_ok;
2855}
2856
2857/*
2858 * Throttle direct reclaimers if backing storage is backed by the network
2859 * and the PFMEMALLOC reserve for the preferred node is getting dangerously
2860 * depleted. kswapd will continue to make progress and wake the processes
2861 * when the low watermark is reached.
2862 *
2863 * Returns true if a fatal signal was delivered during throttling. If this
2864 * happens, the page allocator should not consider triggering the OOM killer.
2865 */
2866static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
2867 nodemask_t *nodemask)
2868{
2869 struct zoneref *z;
2870 struct zone *zone;
2871 pg_data_t *pgdat = NULL;
2872
2873 /*
2874 * Kernel threads should not be throttled as they may be indirectly
2875 * responsible for cleaning pages necessary for reclaim to make forward
2876 * progress. kjournald for example may enter direct reclaim while
2877 * committing a transaction where throttling it could forcing other
2878 * processes to block on log_wait_commit().
2879 */
2880 if (current->flags & PF_KTHREAD)
2881 goto out;
2882
2883 /*
2884 * If a fatal signal is pending, this process should not throttle.
2885 * It should return quickly so it can exit and free its memory
2886 */
2887 if (fatal_signal_pending(current))
2888 goto out;
2889
2890 /*
2891 * Check if the pfmemalloc reserves are ok by finding the first node
2892 * with a usable ZONE_NORMAL or lower zone. The expectation is that
2893 * GFP_KERNEL will be required for allocating network buffers when
2894 * swapping over the network so ZONE_HIGHMEM is unusable.
2895 *
2896 * Throttling is based on the first usable node and throttled processes
2897 * wait on a queue until kswapd makes progress and wakes them. There
2898 * is an affinity then between processes waking up and where reclaim
2899 * progress has been made assuming the process wakes on the same node.
2900 * More importantly, processes running on remote nodes will not compete
2901 * for remote pfmemalloc reserves and processes on different nodes
2902 * should make reasonable progress.
2903 */
2904 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2905 gfp_zone(gfp_mask), nodemask) {
2906 if (zone_idx(zone) > ZONE_NORMAL)
2907 continue;
2908
2909 /* Throttle based on the first usable node */
2910 pgdat = zone->zone_pgdat;
2911 if (pfmemalloc_watermark_ok(pgdat))
2912 goto out;
2913 break;
2914 }
2915
2916 /* If no zone was usable by the allocation flags then do not throttle */
2917 if (!pgdat)
2918 goto out;
2919
2920 /* Account for the throttling */
2921 count_vm_event(PGSCAN_DIRECT_THROTTLE);
2922
2923 /*
2924 * If the caller cannot enter the filesystem, it's possible that it
2925 * is due to the caller holding an FS lock or performing a journal
2926 * transaction in the case of a filesystem like ext[3|4]. In this case,
2927 * it is not safe to block on pfmemalloc_wait as kswapd could be
2928 * blocked waiting on the same lock. Instead, throttle for up to a
2929 * second before continuing.
2930 */
2931 if (!(gfp_mask & __GFP_FS)) {
2932 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
2933 pfmemalloc_watermark_ok(pgdat), HZ);
2934
2935 goto check_pending;
2936 }
2937
2938 /* Throttle until kswapd wakes the process */
2939 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
2940 pfmemalloc_watermark_ok(pgdat));
2941
2942check_pending:
2943 if (fatal_signal_pending(current))
2944 return true;
2945
2946out:
2947 return false;
2948}
2949
2950unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
2951 gfp_t gfp_mask, nodemask_t *nodemask)
2952{
2953 unsigned long nr_reclaimed;
2954 struct scan_control sc = {
2955 .nr_to_reclaim = SWAP_CLUSTER_MAX,
2956 .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
2957 .reclaim_idx = gfp_zone(gfp_mask),
2958 .order = order,
2959 .nodemask = nodemask,
2960 .priority = DEF_PRIORITY,
2961 .may_writepage = !laptop_mode,
2962 .may_unmap = 1,
2963 .may_swap = 1,
2964 };
2965
2966 /*
2967 * Do not enter reclaim if fatal signal was delivered while throttled.
2968 * 1 is returned so that the page allocator does not OOM kill at this
2969 * point.
2970 */
2971 if (throttle_direct_reclaim(gfp_mask, zonelist, nodemask))
2972 return 1;
2973
2974 trace_mm_vmscan_direct_reclaim_begin(order,
2975 sc.may_writepage,
2976 gfp_mask,
2977 sc.reclaim_idx);
2978
2979 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
2980
2981 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
2982
2983 return nr_reclaimed;
2984}
2985
2986#ifdef CONFIG_MEMCG
2987
2988unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
2989 gfp_t gfp_mask, bool noswap,
2990 pg_data_t *pgdat,
2991 unsigned long *nr_scanned)
2992{
2993 struct scan_control sc = {
2994 .nr_to_reclaim = SWAP_CLUSTER_MAX,
2995 .target_mem_cgroup = memcg,
2996 .may_writepage = !laptop_mode,
2997 .may_unmap = 1,
2998 .reclaim_idx = MAX_NR_ZONES - 1,
2999 .may_swap = !noswap,
3000 };
3001 unsigned long lru_pages;
3002
3003 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
3004 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
3005
3006 trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
3007 sc.may_writepage,
3008 sc.gfp_mask,
3009 sc.reclaim_idx);
3010
3011 /*
3012 * NOTE: Although we can get the priority field, using it
3013 * here is not a good idea, since it limits the pages we can scan.
3014 * if we don't reclaim here, the shrink_node from balance_pgdat
3015 * will pick up pages from other mem cgroup's as well. We hack
3016 * the priority and make it zero.
3017 */
3018 shrink_node_memcg(pgdat, memcg, &sc, &lru_pages);
3019
3020 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
3021
3022 *nr_scanned = sc.nr_scanned;
3023 return sc.nr_reclaimed;
3024}
3025
3026unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
3027 unsigned long nr_pages,
3028 gfp_t gfp_mask,
3029 bool may_swap)
3030{
3031 struct zonelist *zonelist;
3032 unsigned long nr_reclaimed;
3033 int nid;
3034 struct scan_control sc = {
3035 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
3036 .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
3037 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
3038 .reclaim_idx = MAX_NR_ZONES - 1,
3039 .target_mem_cgroup = memcg,
3040 .priority = DEF_PRIORITY,
3041 .may_writepage = !laptop_mode,
3042 .may_unmap = 1,
3043 .may_swap = may_swap,
3044 };
3045
3046 /*
3047 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
3048 * take care of from where we get pages. So the node where we start the
3049 * scan does not need to be the current node.
3050 */
3051 nid = mem_cgroup_select_victim_node(memcg);
3052
3053 zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
3054
3055 trace_mm_vmscan_memcg_reclaim_begin(0,
3056 sc.may_writepage,
3057 sc.gfp_mask,
3058 sc.reclaim_idx);
3059
3060 current->flags |= PF_MEMALLOC;
3061 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
3062 current->flags &= ~PF_MEMALLOC;
3063
3064 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
3065
3066 return nr_reclaimed;
3067}
3068#endif
3069
3070static void age_active_anon(struct pglist_data *pgdat,
3071 struct scan_control *sc)
3072{
3073 struct mem_cgroup *memcg;
3074
3075 if (!total_swap_pages)
3076 return;
3077
3078 memcg = mem_cgroup_iter(NULL, NULL, NULL);
3079 do {
3080 struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
3081
3082 if (inactive_list_is_low(lruvec, false, sc))
3083 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
3084 sc, LRU_ACTIVE_ANON);
3085
3086 memcg = mem_cgroup_iter(NULL, memcg, NULL);
3087 } while (memcg);
3088}
3089
3090static bool zone_balanced(struct zone *zone, int order, int classzone_idx)
3091{
3092 unsigned long mark = high_wmark_pages(zone);
3093
3094 if (!zone_watermark_ok_safe(zone, order, mark, classzone_idx))
3095 return false;
3096
3097 /*
3098 * If any eligible zone is balanced then the node is not considered
3099 * to be congested or dirty
3100 */
3101 clear_bit(PGDAT_CONGESTED, &zone->zone_pgdat->flags);
3102 clear_bit(PGDAT_DIRTY, &zone->zone_pgdat->flags);
3103
3104 return true;
3105}
3106
3107/*
3108 * Prepare kswapd for sleeping. This verifies that there are no processes
3109 * waiting in throttle_direct_reclaim() and that watermarks have been met.
3110 *
3111 * Returns true if kswapd is ready to sleep
3112 */
3113static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, int classzone_idx)
3114{
3115 int i;
3116
3117 /*
3118 * The throttled processes are normally woken up in balance_pgdat() as
3119 * soon as pfmemalloc_watermark_ok() is true. But there is a potential
3120 * race between when kswapd checks the watermarks and a process gets
3121 * throttled. There is also a potential race if processes get
3122 * throttled, kswapd wakes, a large process exits thereby balancing the
3123 * zones, which causes kswapd to exit balance_pgdat() before reaching
3124 * the wake up checks. If kswapd is going to sleep, no process should
3125 * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
3126 * the wake up is premature, processes will wake kswapd and get
3127 * throttled again. The difference from wake ups in balance_pgdat() is
3128 * that here we are under prepare_to_wait().
3129 */
3130 if (waitqueue_active(&pgdat->pfmemalloc_wait))
3131 wake_up_all(&pgdat->pfmemalloc_wait);
3132
3133 for (i = 0; i <= classzone_idx; i++) {
3134 struct zone *zone = pgdat->node_zones + i;
3135
3136 if (!managed_zone(zone))
3137 continue;
3138
3139 if (!zone_balanced(zone, order, classzone_idx))
3140 return false;
3141 }
3142
3143 return true;
3144}
3145
3146/*
3147 * kswapd shrinks a node of pages that are at or below the highest usable
3148 * zone that is currently unbalanced.
3149 *
3150 * Returns true if kswapd scanned at least the requested number of pages to
3151 * reclaim or if the lack of progress was due to pages under writeback.
3152 * This is used to determine if the scanning priority needs to be raised.
3153 */
3154static bool kswapd_shrink_node(pg_data_t *pgdat,
3155 struct scan_control *sc)
3156{
3157 struct zone *zone;
3158 int z;
3159
3160 /* Reclaim a number of pages proportional to the number of zones */
3161 sc->nr_to_reclaim = 0;
3162 for (z = 0; z <= sc->reclaim_idx; z++) {
3163 zone = pgdat->node_zones + z;
3164 if (!managed_zone(zone))
3165 continue;
3166
3167 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
3168 }
3169
3170 /*
3171 * Historically care was taken to put equal pressure on all zones but
3172 * now pressure is applied based on node LRU order.
3173 */
3174 shrink_node(pgdat, sc);
3175
3176 /*
3177 * Fragmentation may mean that the system cannot be rebalanced for
3178 * high-order allocations. If twice the allocation size has been
3179 * reclaimed then recheck watermarks only at order-0 to prevent
3180 * excessive reclaim. Assume that a process requested a high-order
3181 * can direct reclaim/compact.
3182 */
3183 if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order))
3184 sc->order = 0;
3185
3186 return sc->nr_scanned >= sc->nr_to_reclaim;
3187}
3188
3189/*
3190 * For kswapd, balance_pgdat() will reclaim pages across a node from zones
3191 * that are eligible for use by the caller until at least one zone is
3192 * balanced.
3193 *
3194 * Returns the order kswapd finished reclaiming at.
3195 *
3196 * kswapd scans the zones in the highmem->normal->dma direction. It skips
3197 * zones which have free_pages > high_wmark_pages(zone), but once a zone is
3198 * found to have free_pages <= high_wmark_pages(zone), any page is that zone
3199 * or lower is eligible for reclaim until at least one usable zone is
3200 * balanced.
3201 */
3202static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
3203{
3204 int i;
3205 unsigned long nr_soft_reclaimed;
3206 unsigned long nr_soft_scanned;
3207 struct zone *zone;
3208 struct scan_control sc = {
3209 .gfp_mask = GFP_KERNEL,
3210 .order = order,
3211 .priority = DEF_PRIORITY,
3212 .may_writepage = !laptop_mode,
3213 .may_unmap = 1,
3214 .may_swap = 1,
3215 };
3216 count_vm_event(PAGEOUTRUN);
3217
3218 do {
3219 bool raise_priority = true;
3220
3221 sc.nr_reclaimed = 0;
3222 sc.reclaim_idx = classzone_idx;
3223
3224 /*
3225 * If the number of buffer_heads exceeds the maximum allowed
3226 * then consider reclaiming from all zones. This has a dual
3227 * purpose -- on 64-bit systems it is expected that
3228 * buffer_heads are stripped during active rotation. On 32-bit
3229 * systems, highmem pages can pin lowmem memory and shrinking
3230 * buffers can relieve lowmem pressure. Reclaim may still not
3231 * go ahead if all eligible zones for the original allocation
3232 * request are balanced to avoid excessive reclaim from kswapd.
3233 */
3234 if (buffer_heads_over_limit) {
3235 for (i = MAX_NR_ZONES - 1; i >= 0; i--) {
3236 zone = pgdat->node_zones + i;
3237 if (!managed_zone(zone))
3238 continue;
3239
3240 sc.reclaim_idx = i;
3241 break;
3242 }
3243 }
3244
3245 /*
3246 * Only reclaim if there are no eligible zones. Check from
3247 * high to low zone as allocations prefer higher zones.
3248 * Scanning from low to high zone would allow congestion to be
3249 * cleared during a very small window when a small low
3250 * zone was balanced even under extreme pressure when the
3251 * overall node may be congested. Note that sc.reclaim_idx
3252 * is not used as buffer_heads_over_limit may have adjusted
3253 * it.
3254 */
3255 for (i = classzone_idx; i >= 0; i--) {
3256 zone = pgdat->node_zones + i;
3257 if (!managed_zone(zone))
3258 continue;
3259
3260 if (zone_balanced(zone, sc.order, classzone_idx))
3261 goto out;
3262 }
3263
3264 /*
3265 * Do some background aging of the anon list, to give
3266 * pages a chance to be referenced before reclaiming. All
3267 * pages are rotated regardless of classzone as this is
3268 * about consistent aging.
3269 */
3270 age_active_anon(pgdat, &sc);
3271
3272 /*
3273 * If we're getting trouble reclaiming, start doing writepage
3274 * even in laptop mode.
3275 */
3276 if (sc.priority < DEF_PRIORITY - 2 || !pgdat_reclaimable(pgdat))
3277 sc.may_writepage = 1;
3278
3279 /* Call soft limit reclaim before calling shrink_node. */
3280 sc.nr_scanned = 0;
3281 nr_soft_scanned = 0;
3282 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order,
3283 sc.gfp_mask, &nr_soft_scanned);
3284 sc.nr_reclaimed += nr_soft_reclaimed;
3285
3286 /*
3287 * There should be no need to raise the scanning priority if
3288 * enough pages are already being scanned that that high
3289 * watermark would be met at 100% efficiency.
3290 */
3291 if (kswapd_shrink_node(pgdat, &sc))
3292 raise_priority = false;
3293
3294 /*
3295 * If the low watermark is met there is no need for processes
3296 * to be throttled on pfmemalloc_wait as they should not be
3297 * able to safely make forward progress. Wake them
3298 */
3299 if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
3300 pfmemalloc_watermark_ok(pgdat))
3301 wake_up_all(&pgdat->pfmemalloc_wait);
3302
3303 /* Check if kswapd should be suspending */
3304 if (try_to_freeze() || kthread_should_stop())
3305 break;
3306
3307 /*
3308 * Raise priority if scanning rate is too low or there was no
3309 * progress in reclaiming pages
3310 */
3311 if (raise_priority || !sc.nr_reclaimed)
3312 sc.priority--;
3313 } while (sc.priority >= 1);
3314
3315out:
3316 /*
3317 * Return the order kswapd stopped reclaiming at as
3318 * prepare_kswapd_sleep() takes it into account. If another caller
3319 * entered the allocator slow path while kswapd was awake, order will
3320 * remain at the higher level.
3321 */
3322 return sc.order;
3323}
3324
3325static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
3326 unsigned int classzone_idx)
3327{
3328 long remaining = 0;
3329 DEFINE_WAIT(wait);
3330
3331 if (freezing(current) || kthread_should_stop())
3332 return;
3333
3334 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3335
3336 /* Try to sleep for a short interval */
3337 if (prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) {
3338 /*
3339 * Compaction records what page blocks it recently failed to
3340 * isolate pages from and skips them in the future scanning.
3341 * When kswapd is going to sleep, it is reasonable to assume
3342 * that pages and compaction may succeed so reset the cache.
3343 */
3344 reset_isolation_suitable(pgdat);
3345
3346 /*
3347 * We have freed the memory, now we should compact it to make
3348 * allocation of the requested order possible.
3349 */
3350 wakeup_kcompactd(pgdat, alloc_order, classzone_idx);
3351
3352 remaining = schedule_timeout(HZ/10);
3353
3354 /*
3355 * If woken prematurely then reset kswapd_classzone_idx and
3356 * order. The values will either be from a wakeup request or
3357 * the previous request that slept prematurely.
3358 */
3359 if (remaining) {
3360 pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx, classzone_idx);
3361 pgdat->kswapd_order = max(pgdat->kswapd_order, reclaim_order);
3362 }
3363
3364 finish_wait(&pgdat->kswapd_wait, &wait);
3365 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3366 }
3367
3368 /*
3369 * After a short sleep, check if it was a premature sleep. If not, then
3370 * go fully to sleep until explicitly woken up.
3371 */
3372 if (!remaining &&
3373 prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) {
3374 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
3375
3376 /*
3377 * vmstat counters are not perfectly accurate and the estimated
3378 * value for counters such as NR_FREE_PAGES can deviate from the
3379 * true value by nr_online_cpus * threshold. To avoid the zone
3380 * watermarks being breached while under pressure, we reduce the
3381 * per-cpu vmstat threshold while kswapd is awake and restore
3382 * them before going back to sleep.
3383 */
3384 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
3385
3386 if (!kthread_should_stop())
3387 schedule();
3388
3389 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
3390 } else {
3391 if (remaining)
3392 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
3393 else
3394 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
3395 }
3396 finish_wait(&pgdat->kswapd_wait, &wait);
3397}
3398
3399/*
3400 * The background pageout daemon, started as a kernel thread
3401 * from the init process.
3402 *
3403 * This basically trickles out pages so that we have _some_
3404 * free memory available even if there is no other activity
3405 * that frees anything up. This is needed for things like routing
3406 * etc, where we otherwise might have all activity going on in
3407 * asynchronous contexts that cannot page things out.
3408 *
3409 * If there are applications that are active memory-allocators
3410 * (most normal use), this basically shouldn't matter.
3411 */
3412static int kswapd(void *p)
3413{
3414 unsigned int alloc_order, reclaim_order, classzone_idx;
3415 pg_data_t *pgdat = (pg_data_t*)p;
3416 struct task_struct *tsk = current;
3417
3418 struct reclaim_state reclaim_state = {
3419 .reclaimed_slab = 0,
3420 };
3421 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
3422
3423 lockdep_set_current_reclaim_state(GFP_KERNEL);
3424
3425 if (!cpumask_empty(cpumask))
3426 set_cpus_allowed_ptr(tsk, cpumask);
3427 current->reclaim_state = &reclaim_state;
3428
3429 /*
3430 * Tell the memory management that we're a "memory allocator",
3431 * and that if we need more memory we should get access to it
3432 * regardless (see "__alloc_pages()"). "kswapd" should
3433 * never get caught in the normal page freeing logic.
3434 *
3435 * (Kswapd normally doesn't need memory anyway, but sometimes
3436 * you need a small amount of memory in order to be able to
3437 * page out something else, and this flag essentially protects
3438 * us from recursively trying to free more memory as we're
3439 * trying to free the first piece of memory in the first place).
3440 */
3441 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
3442 set_freezable();
3443
3444 pgdat->kswapd_order = alloc_order = reclaim_order = 0;
3445 pgdat->kswapd_classzone_idx = classzone_idx = 0;
3446 for ( ; ; ) {
3447 bool ret;
3448
3449kswapd_try_sleep:
3450 kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order,
3451 classzone_idx);
3452
3453 /* Read the new order and classzone_idx */
3454 alloc_order = reclaim_order = pgdat->kswapd_order;
3455 classzone_idx = pgdat->kswapd_classzone_idx;
3456 pgdat->kswapd_order = 0;
3457 pgdat->kswapd_classzone_idx = 0;
3458
3459 ret = try_to_freeze();
3460 if (kthread_should_stop())
3461 break;
3462
3463 /*
3464 * We can speed up thawing tasks if we don't call balance_pgdat
3465 * after returning from the refrigerator
3466 */
3467 if (ret)
3468 continue;
3469
3470 /*
3471 * Reclaim begins at the requested order but if a high-order
3472 * reclaim fails then kswapd falls back to reclaiming for
3473 * order-0. If that happens, kswapd will consider sleeping
3474 * for the order it finished reclaiming at (reclaim_order)
3475 * but kcompactd is woken to compact for the original
3476 * request (alloc_order).
3477 */
3478 trace_mm_vmscan_kswapd_wake(pgdat->node_id, classzone_idx,
3479 alloc_order);
3480 reclaim_order = balance_pgdat(pgdat, alloc_order, classzone_idx);
3481 if (reclaim_order < alloc_order)
3482 goto kswapd_try_sleep;
3483
3484 alloc_order = reclaim_order = pgdat->kswapd_order;
3485 classzone_idx = pgdat->kswapd_classzone_idx;
3486 }
3487
3488 tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
3489 current->reclaim_state = NULL;
3490 lockdep_clear_current_reclaim_state();
3491
3492 return 0;
3493}
3494
3495/*
3496 * A zone is low on free memory, so wake its kswapd task to service it.
3497 */
3498void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
3499{
3500 pg_data_t *pgdat;
3501 int z;
3502
3503 if (!managed_zone(zone))
3504 return;
3505
3506 if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL))
3507 return;
3508 pgdat = zone->zone_pgdat;
3509 pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx, classzone_idx);
3510 pgdat->kswapd_order = max(pgdat->kswapd_order, order);
3511 if (!waitqueue_active(&pgdat->kswapd_wait))
3512 return;
3513
3514 /* Only wake kswapd if all zones are unbalanced */
3515 for (z = 0; z <= classzone_idx; z++) {
3516 zone = pgdat->node_zones + z;
3517 if (!managed_zone(zone))
3518 continue;
3519
3520 if (zone_balanced(zone, order, classzone_idx))
3521 return;
3522 }
3523
3524 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
3525 wake_up_interruptible(&pgdat->kswapd_wait);
3526}
3527
3528#ifdef CONFIG_HIBERNATION
3529/*
3530 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
3531 * freed pages.
3532 *
3533 * Rather than trying to age LRUs the aim is to preserve the overall
3534 * LRU order by reclaiming preferentially
3535 * inactive > active > active referenced > active mapped
3536 */
3537unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
3538{
3539 struct reclaim_state reclaim_state;
3540 struct scan_control sc = {
3541 .nr_to_reclaim = nr_to_reclaim,
3542 .gfp_mask = GFP_HIGHUSER_MOVABLE,
3543 .reclaim_idx = MAX_NR_ZONES - 1,
3544 .priority = DEF_PRIORITY,
3545 .may_writepage = 1,
3546 .may_unmap = 1,
3547 .may_swap = 1,
3548 .hibernation_mode = 1,
3549 };
3550 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
3551 struct task_struct *p = current;
3552 unsigned long nr_reclaimed;
3553
3554 p->flags |= PF_MEMALLOC;
3555 lockdep_set_current_reclaim_state(sc.gfp_mask);
3556 reclaim_state.reclaimed_slab = 0;
3557 p->reclaim_state = &reclaim_state;
3558
3559 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
3560
3561 p->reclaim_state = NULL;
3562 lockdep_clear_current_reclaim_state();
3563 p->flags &= ~PF_MEMALLOC;
3564
3565 return nr_reclaimed;
3566}
3567#endif /* CONFIG_HIBERNATION */
3568
3569/* It's optimal to keep kswapds on the same CPUs as their memory, but
3570 not required for correctness. So if the last cpu in a node goes
3571 away, we get changed to run anywhere: as the first one comes back,
3572 restore their cpu bindings. */
3573static int kswapd_cpu_online(unsigned int cpu)
3574{
3575 int nid;
3576
3577 for_each_node_state(nid, N_MEMORY) {
3578 pg_data_t *pgdat = NODE_DATA(nid);
3579 const struct cpumask *mask;
3580
3581 mask = cpumask_of_node(pgdat->node_id);
3582
3583 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
3584 /* One of our CPUs online: restore mask */
3585 set_cpus_allowed_ptr(pgdat->kswapd, mask);
3586 }
3587 return 0;
3588}
3589
3590/*
3591 * This kswapd start function will be called by init and node-hot-add.
3592 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
3593 */
3594int kswapd_run(int nid)
3595{
3596 pg_data_t *pgdat = NODE_DATA(nid);
3597 int ret = 0;
3598
3599 if (pgdat->kswapd)
3600 return 0;
3601
3602 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
3603 if (IS_ERR(pgdat->kswapd)) {
3604 /* failure at boot is fatal */
3605 BUG_ON(system_state == SYSTEM_BOOTING);
3606 pr_err("Failed to start kswapd on node %d\n", nid);
3607 ret = PTR_ERR(pgdat->kswapd);
3608 pgdat->kswapd = NULL;
3609 }
3610 return ret;
3611}
3612
3613/*
3614 * Called by memory hotplug when all memory in a node is offlined. Caller must
3615 * hold mem_hotplug_begin/end().
3616 */
3617void kswapd_stop(int nid)
3618{
3619 struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
3620
3621 if (kswapd) {
3622 kthread_stop(kswapd);
3623 NODE_DATA(nid)->kswapd = NULL;
3624 }
3625}
3626
3627static int __init kswapd_init(void)
3628{
3629 int nid, ret;
3630
3631 swap_setup();
3632 for_each_node_state(nid, N_MEMORY)
3633 kswapd_run(nid);
3634 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
3635 "mm/vmscan:online", kswapd_cpu_online,
3636 NULL);
3637 WARN_ON(ret < 0);
3638 return 0;
3639}
3640
3641module_init(kswapd_init)
3642
3643#ifdef CONFIG_NUMA
3644/*
3645 * Node reclaim mode
3646 *
3647 * If non-zero call node_reclaim when the number of free pages falls below
3648 * the watermarks.
3649 */
3650int node_reclaim_mode __read_mostly;
3651
3652#define RECLAIM_OFF 0
3653#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
3654#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
3655#define RECLAIM_UNMAP (1<<2) /* Unmap pages during reclaim */
3656
3657/*
3658 * Priority for NODE_RECLAIM. This determines the fraction of pages
3659 * of a node considered for each zone_reclaim. 4 scans 1/16th of
3660 * a zone.
3661 */
3662#define NODE_RECLAIM_PRIORITY 4
3663
3664/*
3665 * Percentage of pages in a zone that must be unmapped for node_reclaim to
3666 * occur.
3667 */
3668int sysctl_min_unmapped_ratio = 1;
3669
3670/*
3671 * If the number of slab pages in a zone grows beyond this percentage then
3672 * slab reclaim needs to occur.
3673 */
3674int sysctl_min_slab_ratio = 5;
3675
3676static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat)
3677{
3678 unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED);
3679 unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) +
3680 node_page_state(pgdat, NR_ACTIVE_FILE);
3681
3682 /*
3683 * It's possible for there to be more file mapped pages than
3684 * accounted for by the pages on the file LRU lists because
3685 * tmpfs pages accounted for as ANON can also be FILE_MAPPED
3686 */
3687 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
3688}
3689
3690/* Work out how many page cache pages we can reclaim in this reclaim_mode */
3691static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat)
3692{
3693 unsigned long nr_pagecache_reclaimable;
3694 unsigned long delta = 0;
3695
3696 /*
3697 * If RECLAIM_UNMAP is set, then all file pages are considered
3698 * potentially reclaimable. Otherwise, we have to worry about
3699 * pages like swapcache and node_unmapped_file_pages() provides
3700 * a better estimate
3701 */
3702 if (node_reclaim_mode & RECLAIM_UNMAP)
3703 nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES);
3704 else
3705 nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat);
3706
3707 /* If we can't clean pages, remove dirty pages from consideration */
3708 if (!(node_reclaim_mode & RECLAIM_WRITE))
3709 delta += node_page_state(pgdat, NR_FILE_DIRTY);
3710
3711 /* Watch for any possible underflows due to delta */
3712 if (unlikely(delta > nr_pagecache_reclaimable))
3713 delta = nr_pagecache_reclaimable;
3714
3715 return nr_pagecache_reclaimable - delta;
3716}
3717
3718/*
3719 * Try to free up some pages from this node through reclaim.
3720 */
3721static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
3722{
3723 /* Minimum pages needed in order to stay on node */
3724 const unsigned long nr_pages = 1 << order;
3725 struct task_struct *p = current;
3726 struct reclaim_state reclaim_state;
3727 int classzone_idx = gfp_zone(gfp_mask);
3728 struct scan_control sc = {
3729 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
3730 .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
3731 .order = order,
3732 .priority = NODE_RECLAIM_PRIORITY,
3733 .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
3734 .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
3735 .may_swap = 1,
3736 .reclaim_idx = classzone_idx,
3737 };
3738
3739 cond_resched();
3740 /*
3741 * We need to be able to allocate from the reserves for RECLAIM_UNMAP
3742 * and we also need to be able to write out pages for RECLAIM_WRITE
3743 * and RECLAIM_UNMAP.
3744 */
3745 p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
3746 lockdep_set_current_reclaim_state(gfp_mask);
3747 reclaim_state.reclaimed_slab = 0;
3748 p->reclaim_state = &reclaim_state;
3749
3750 if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) {
3751 /*
3752 * Free memory by calling shrink zone with increasing
3753 * priorities until we have enough memory freed.
3754 */
3755 do {
3756 shrink_node(pgdat, &sc);
3757 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
3758 }
3759
3760 p->reclaim_state = NULL;
3761 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
3762 lockdep_clear_current_reclaim_state();
3763 return sc.nr_reclaimed >= nr_pages;
3764}
3765
3766int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
3767{
3768 int ret;
3769
3770 /*
3771 * Node reclaim reclaims unmapped file backed pages and
3772 * slab pages if we are over the defined limits.
3773 *
3774 * A small portion of unmapped file backed pages is needed for
3775 * file I/O otherwise pages read by file I/O will be immediately
3776 * thrown out if the node is overallocated. So we do not reclaim
3777 * if less than a specified percentage of the node is used by
3778 * unmapped file backed pages.
3779 */
3780 if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
3781 sum_zone_node_page_state(pgdat->node_id, NR_SLAB_RECLAIMABLE) <= pgdat->min_slab_pages)
3782 return NODE_RECLAIM_FULL;
3783
3784 if (!pgdat_reclaimable(pgdat))
3785 return NODE_RECLAIM_FULL;
3786
3787 /*
3788 * Do not scan if the allocation should not be delayed.
3789 */
3790 if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC))
3791 return NODE_RECLAIM_NOSCAN;
3792
3793 /*
3794 * Only run node reclaim on the local node or on nodes that do not
3795 * have associated processors. This will favor the local processor
3796 * over remote processors and spread off node memory allocations
3797 * as wide as possible.
3798 */
3799 if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id())
3800 return NODE_RECLAIM_NOSCAN;
3801
3802 if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
3803 return NODE_RECLAIM_NOSCAN;
3804
3805 ret = __node_reclaim(pgdat, gfp_mask, order);
3806 clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
3807
3808 if (!ret)
3809 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
3810
3811 return ret;
3812}
3813#endif
3814
3815/*
3816 * page_evictable - test whether a page is evictable
3817 * @page: the page to test
3818 *
3819 * Test whether page is evictable--i.e., should be placed on active/inactive
3820 * lists vs unevictable list.
3821 *
3822 * Reasons page might not be evictable:
3823 * (1) page's mapping marked unevictable
3824 * (2) page is part of an mlocked VMA
3825 *
3826 */
3827int page_evictable(struct page *page)
3828{
3829 return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
3830}
3831
3832#ifdef CONFIG_SHMEM
3833/**
3834 * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
3835 * @pages: array of pages to check
3836 * @nr_pages: number of pages to check
3837 *
3838 * Checks pages for evictability and moves them to the appropriate lru list.
3839 *
3840 * This function is only used for SysV IPC SHM_UNLOCK.
3841 */
3842void check_move_unevictable_pages(struct page **pages, int nr_pages)
3843{
3844 struct lruvec *lruvec;
3845 struct pglist_data *pgdat = NULL;
3846 int pgscanned = 0;
3847 int pgrescued = 0;
3848 int i;
3849
3850 for (i = 0; i < nr_pages; i++) {
3851 struct page *page = pages[i];
3852 struct pglist_data *pagepgdat = page_pgdat(page);
3853
3854 pgscanned++;
3855 if (pagepgdat != pgdat) {
3856 if (pgdat)
3857 spin_unlock_irq(&pgdat->lru_lock);
3858 pgdat = pagepgdat;
3859 spin_lock_irq(&pgdat->lru_lock);
3860 }
3861 lruvec = mem_cgroup_page_lruvec(page, pgdat);
3862
3863 if (!PageLRU(page) || !PageUnevictable(page))
3864 continue;
3865
3866 if (page_evictable(page)) {
3867 enum lru_list lru = page_lru_base_type(page);
3868
3869 VM_BUG_ON_PAGE(PageActive(page), page);
3870 ClearPageUnevictable(page);
3871 del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
3872 add_page_to_lru_list(page, lruvec, lru);
3873 pgrescued++;
3874 }
3875 }
3876
3877 if (pgdat) {
3878 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
3879 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
3880 spin_unlock_irq(&pgdat->lru_lock);
3881 }
3882}
3883#endif /* CONFIG_SHMEM */