Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/mm/swap.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 */
7
8/*
9 * This file contains the default values for the operation of the
10 * Linux VM subsystem. Fine-tuning documentation can be found in
11 * Documentation/admin-guide/sysctl/vm.rst.
12 * Started 18.12.91
13 * Swap aging added 23.2.95, Stephen Tweedie.
14 * Buffermem limits added 12.3.98, Rik van Riel.
15 */
16
17#include <linux/mm.h>
18#include <linux/sched.h>
19#include <linux/kernel_stat.h>
20#include <linux/swap.h>
21#include <linux/mman.h>
22#include <linux/pagemap.h>
23#include <linux/pagevec.h>
24#include <linux/init.h>
25#include <linux/export.h>
26#include <linux/mm_inline.h>
27#include <linux/percpu_counter.h>
28#include <linux/memremap.h>
29#include <linux/percpu.h>
30#include <linux/cpu.h>
31#include <linux/notifier.h>
32#include <linux/backing-dev.h>
33#include <linux/memcontrol.h>
34#include <linux/gfp.h>
35#include <linux/uio.h>
36#include <linux/hugetlb.h>
37#include <linux/page_idle.h>
38#include <linux/local_lock.h>
39#include <linux/buffer_head.h>
40
41#include "internal.h"
42
43#define CREATE_TRACE_POINTS
44#include <trace/events/pagemap.h>
45
46/* How many pages do we try to swap or page in/out together? As a power of 2 */
47int page_cluster;
48const int page_cluster_max = 31;
49
50struct cpu_fbatches {
51 /*
52 * The following folio batches are grouped together because they are protected
53 * by disabling preemption (and interrupts remain enabled).
54 */
55 local_lock_t lock;
56 struct folio_batch lru_add;
57 struct folio_batch lru_deactivate_file;
58 struct folio_batch lru_deactivate;
59 struct folio_batch lru_lazyfree;
60#ifdef CONFIG_SMP
61 struct folio_batch lru_activate;
62#endif
63 /* Protecting the following batches which require disabling interrupts */
64 local_lock_t lock_irq;
65 struct folio_batch lru_move_tail;
66};
67
68static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = {
69 .lock = INIT_LOCAL_LOCK(lock),
70 .lock_irq = INIT_LOCAL_LOCK(lock_irq),
71};
72
73static void __page_cache_release(struct folio *folio, struct lruvec **lruvecp,
74 unsigned long *flagsp)
75{
76 if (folio_test_lru(folio)) {
77 folio_lruvec_relock_irqsave(folio, lruvecp, flagsp);
78 lruvec_del_folio(*lruvecp, folio);
79 __folio_clear_lru_flags(folio);
80 }
81}
82
83/*
84 * This path almost never happens for VM activity - pages are normally freed
85 * in batches. But it gets used by networking - and for compound pages.
86 */
87static void page_cache_release(struct folio *folio)
88{
89 struct lruvec *lruvec = NULL;
90 unsigned long flags;
91
92 __page_cache_release(folio, &lruvec, &flags);
93 if (lruvec)
94 unlock_page_lruvec_irqrestore(lruvec, flags);
95}
96
97void __folio_put(struct folio *folio)
98{
99 if (unlikely(folio_is_zone_device(folio))) {
100 free_zone_device_folio(folio);
101 return;
102 }
103
104 if (folio_test_hugetlb(folio)) {
105 free_huge_folio(folio);
106 return;
107 }
108
109 page_cache_release(folio);
110 folio_unqueue_deferred_split(folio);
111 mem_cgroup_uncharge(folio);
112 free_unref_page(&folio->page, folio_order(folio));
113}
114EXPORT_SYMBOL(__folio_put);
115
116typedef void (*move_fn_t)(struct lruvec *lruvec, struct folio *folio);
117
118static void lru_add(struct lruvec *lruvec, struct folio *folio)
119{
120 int was_unevictable = folio_test_clear_unevictable(folio);
121 long nr_pages = folio_nr_pages(folio);
122
123 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
124
125 /*
126 * Is an smp_mb__after_atomic() still required here, before
127 * folio_evictable() tests the mlocked flag, to rule out the possibility
128 * of stranding an evictable folio on an unevictable LRU? I think
129 * not, because __munlock_folio() only clears the mlocked flag
130 * while the LRU lock is held.
131 *
132 * (That is not true of __page_cache_release(), and not necessarily
133 * true of folios_put(): but those only clear the mlocked flag after
134 * folio_put_testzero() has excluded any other users of the folio.)
135 */
136 if (folio_evictable(folio)) {
137 if (was_unevictable)
138 __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
139 } else {
140 folio_clear_active(folio);
141 folio_set_unevictable(folio);
142 /*
143 * folio->mlock_count = !!folio_test_mlocked(folio)?
144 * But that leaves __mlock_folio() in doubt whether another
145 * actor has already counted the mlock or not. Err on the
146 * safe side, underestimate, let page reclaim fix it, rather
147 * than leaving a page on the unevictable LRU indefinitely.
148 */
149 folio->mlock_count = 0;
150 if (!was_unevictable)
151 __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
152 }
153
154 lruvec_add_folio(lruvec, folio);
155 trace_mm_lru_insertion(folio);
156}
157
158static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
159{
160 int i;
161 struct lruvec *lruvec = NULL;
162 unsigned long flags = 0;
163
164 for (i = 0; i < folio_batch_count(fbatch); i++) {
165 struct folio *folio = fbatch->folios[i];
166
167 folio_lruvec_relock_irqsave(folio, &lruvec, &flags);
168 move_fn(lruvec, folio);
169
170 folio_set_lru(folio);
171 }
172
173 if (lruvec)
174 unlock_page_lruvec_irqrestore(lruvec, flags);
175 folios_put(fbatch);
176}
177
178static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
179 struct folio *folio, move_fn_t move_fn,
180 bool on_lru, bool disable_irq)
181{
182 unsigned long flags;
183
184 if (on_lru && !folio_test_clear_lru(folio))
185 return;
186
187 folio_get(folio);
188
189 if (disable_irq)
190 local_lock_irqsave(&cpu_fbatches.lock_irq, flags);
191 else
192 local_lock(&cpu_fbatches.lock);
193
194 if (!folio_batch_add(this_cpu_ptr(fbatch), folio) || folio_test_large(folio) ||
195 lru_cache_disabled())
196 folio_batch_move_lru(this_cpu_ptr(fbatch), move_fn);
197
198 if (disable_irq)
199 local_unlock_irqrestore(&cpu_fbatches.lock_irq, flags);
200 else
201 local_unlock(&cpu_fbatches.lock);
202}
203
204#define folio_batch_add_and_move(folio, op, on_lru) \
205 __folio_batch_add_and_move( \
206 &cpu_fbatches.op, \
207 folio, \
208 op, \
209 on_lru, \
210 offsetof(struct cpu_fbatches, op) >= offsetof(struct cpu_fbatches, lock_irq) \
211 )
212
213static void lru_move_tail(struct lruvec *lruvec, struct folio *folio)
214{
215 if (folio_test_unevictable(folio))
216 return;
217
218 lruvec_del_folio(lruvec, folio);
219 folio_clear_active(folio);
220 lruvec_add_folio_tail(lruvec, folio);
221 __count_vm_events(PGROTATED, folio_nr_pages(folio));
222}
223
224/*
225 * Writeback is about to end against a folio which has been marked for
226 * immediate reclaim. If it still appears to be reclaimable, move it
227 * to the tail of the inactive list.
228 *
229 * folio_rotate_reclaimable() must disable IRQs, to prevent nasty races.
230 */
231void folio_rotate_reclaimable(struct folio *folio)
232{
233 if (folio_test_locked(folio) || folio_test_dirty(folio) ||
234 folio_test_unevictable(folio))
235 return;
236
237 folio_batch_add_and_move(folio, lru_move_tail, true);
238}
239
240void lru_note_cost(struct lruvec *lruvec, bool file,
241 unsigned int nr_io, unsigned int nr_rotated)
242{
243 unsigned long cost;
244
245 /*
246 * Reflect the relative cost of incurring IO and spending CPU
247 * time on rotations. This doesn't attempt to make a precise
248 * comparison, it just says: if reloads are about comparable
249 * between the LRU lists, or rotations are overwhelmingly
250 * different between them, adjust scan balance for CPU work.
251 */
252 cost = nr_io * SWAP_CLUSTER_MAX + nr_rotated;
253
254 do {
255 unsigned long lrusize;
256
257 /*
258 * Hold lruvec->lru_lock is safe here, since
259 * 1) The pinned lruvec in reclaim, or
260 * 2) From a pre-LRU page during refault (which also holds the
261 * rcu lock, so would be safe even if the page was on the LRU
262 * and could move simultaneously to a new lruvec).
263 */
264 spin_lock_irq(&lruvec->lru_lock);
265 /* Record cost event */
266 if (file)
267 lruvec->file_cost += cost;
268 else
269 lruvec->anon_cost += cost;
270
271 /*
272 * Decay previous events
273 *
274 * Because workloads change over time (and to avoid
275 * overflow) we keep these statistics as a floating
276 * average, which ends up weighing recent refaults
277 * more than old ones.
278 */
279 lrusize = lruvec_page_state(lruvec, NR_INACTIVE_ANON) +
280 lruvec_page_state(lruvec, NR_ACTIVE_ANON) +
281 lruvec_page_state(lruvec, NR_INACTIVE_FILE) +
282 lruvec_page_state(lruvec, NR_ACTIVE_FILE);
283
284 if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) {
285 lruvec->file_cost /= 2;
286 lruvec->anon_cost /= 2;
287 }
288 spin_unlock_irq(&lruvec->lru_lock);
289 } while ((lruvec = parent_lruvec(lruvec)));
290}
291
292void lru_note_cost_refault(struct folio *folio)
293{
294 lru_note_cost(folio_lruvec(folio), folio_is_file_lru(folio),
295 folio_nr_pages(folio), 0);
296}
297
298static void lru_activate(struct lruvec *lruvec, struct folio *folio)
299{
300 long nr_pages = folio_nr_pages(folio);
301
302 if (folio_test_active(folio) || folio_test_unevictable(folio))
303 return;
304
305
306 lruvec_del_folio(lruvec, folio);
307 folio_set_active(folio);
308 lruvec_add_folio(lruvec, folio);
309 trace_mm_lru_activate(folio);
310
311 __count_vm_events(PGACTIVATE, nr_pages);
312 __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE, nr_pages);
313}
314
315#ifdef CONFIG_SMP
316static void folio_activate_drain(int cpu)
317{
318 struct folio_batch *fbatch = &per_cpu(cpu_fbatches.lru_activate, cpu);
319
320 if (folio_batch_count(fbatch))
321 folio_batch_move_lru(fbatch, lru_activate);
322}
323
324void folio_activate(struct folio *folio)
325{
326 if (folio_test_active(folio) || folio_test_unevictable(folio))
327 return;
328
329 folio_batch_add_and_move(folio, lru_activate, true);
330}
331
332#else
333static inline void folio_activate_drain(int cpu)
334{
335}
336
337void folio_activate(struct folio *folio)
338{
339 struct lruvec *lruvec;
340
341 if (!folio_test_clear_lru(folio))
342 return;
343
344 lruvec = folio_lruvec_lock_irq(folio);
345 lru_activate(lruvec, folio);
346 unlock_page_lruvec_irq(lruvec);
347 folio_set_lru(folio);
348}
349#endif
350
351static void __lru_cache_activate_folio(struct folio *folio)
352{
353 struct folio_batch *fbatch;
354 int i;
355
356 local_lock(&cpu_fbatches.lock);
357 fbatch = this_cpu_ptr(&cpu_fbatches.lru_add);
358
359 /*
360 * Search backwards on the optimistic assumption that the folio being
361 * activated has just been added to this batch. Note that only
362 * the local batch is examined as a !LRU folio could be in the
363 * process of being released, reclaimed, migrated or on a remote
364 * batch that is currently being drained. Furthermore, marking
365 * a remote batch's folio active potentially hits a race where
366 * a folio is marked active just after it is added to the inactive
367 * list causing accounting errors and BUG_ON checks to trigger.
368 */
369 for (i = folio_batch_count(fbatch) - 1; i >= 0; i--) {
370 struct folio *batch_folio = fbatch->folios[i];
371
372 if (batch_folio == folio) {
373 folio_set_active(folio);
374 break;
375 }
376 }
377
378 local_unlock(&cpu_fbatches.lock);
379}
380
381#ifdef CONFIG_LRU_GEN
382static void folio_inc_refs(struct folio *folio)
383{
384 unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
385
386 if (folio_test_unevictable(folio))
387 return;
388
389 if (!folio_test_referenced(folio)) {
390 folio_set_referenced(folio);
391 return;
392 }
393
394 if (!folio_test_workingset(folio)) {
395 folio_set_workingset(folio);
396 return;
397 }
398
399 /* see the comment on MAX_NR_TIERS */
400 do {
401 new_flags = old_flags & LRU_REFS_MASK;
402 if (new_flags == LRU_REFS_MASK)
403 break;
404
405 new_flags += BIT(LRU_REFS_PGOFF);
406 new_flags |= old_flags & ~LRU_REFS_MASK;
407 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags));
408}
409#else
410static void folio_inc_refs(struct folio *folio)
411{
412}
413#endif /* CONFIG_LRU_GEN */
414
415/**
416 * folio_mark_accessed - Mark a folio as having seen activity.
417 * @folio: The folio to mark.
418 *
419 * This function will perform one of the following transitions:
420 *
421 * * inactive,unreferenced -> inactive,referenced
422 * * inactive,referenced -> active,unreferenced
423 * * active,unreferenced -> active,referenced
424 *
425 * When a newly allocated folio is not yet visible, so safe for non-atomic ops,
426 * __folio_set_referenced() may be substituted for folio_mark_accessed().
427 */
428void folio_mark_accessed(struct folio *folio)
429{
430 if (lru_gen_enabled()) {
431 folio_inc_refs(folio);
432 return;
433 }
434
435 if (!folio_test_referenced(folio)) {
436 folio_set_referenced(folio);
437 } else if (folio_test_unevictable(folio)) {
438 /*
439 * Unevictable pages are on the "LRU_UNEVICTABLE" list. But,
440 * this list is never rotated or maintained, so marking an
441 * unevictable page accessed has no effect.
442 */
443 } else if (!folio_test_active(folio)) {
444 /*
445 * If the folio is on the LRU, queue it for activation via
446 * cpu_fbatches.lru_activate. Otherwise, assume the folio is in a
447 * folio_batch, mark it active and it'll be moved to the active
448 * LRU on the next drain.
449 */
450 if (folio_test_lru(folio))
451 folio_activate(folio);
452 else
453 __lru_cache_activate_folio(folio);
454 folio_clear_referenced(folio);
455 workingset_activation(folio);
456 }
457 if (folio_test_idle(folio))
458 folio_clear_idle(folio);
459}
460EXPORT_SYMBOL(folio_mark_accessed);
461
462/**
463 * folio_add_lru - Add a folio to an LRU list.
464 * @folio: The folio to be added to the LRU.
465 *
466 * Queue the folio for addition to the LRU. The decision on whether
467 * to add the page to the [in]active [file|anon] list is deferred until the
468 * folio_batch is drained. This gives a chance for the caller of folio_add_lru()
469 * have the folio added to the active list using folio_mark_accessed().
470 */
471void folio_add_lru(struct folio *folio)
472{
473 VM_BUG_ON_FOLIO(folio_test_active(folio) &&
474 folio_test_unevictable(folio), folio);
475 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
476
477 /* see the comment in lru_gen_add_folio() */
478 if (lru_gen_enabled() && !folio_test_unevictable(folio) &&
479 lru_gen_in_fault() && !(current->flags & PF_MEMALLOC))
480 folio_set_active(folio);
481
482 folio_batch_add_and_move(folio, lru_add, false);
483}
484EXPORT_SYMBOL(folio_add_lru);
485
486/**
487 * folio_add_lru_vma() - Add a folio to the appropate LRU list for this VMA.
488 * @folio: The folio to be added to the LRU.
489 * @vma: VMA in which the folio is mapped.
490 *
491 * If the VMA is mlocked, @folio is added to the unevictable list.
492 * Otherwise, it is treated the same way as folio_add_lru().
493 */
494void folio_add_lru_vma(struct folio *folio, struct vm_area_struct *vma)
495{
496 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
497
498 if (unlikely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED))
499 mlock_new_folio(folio);
500 else
501 folio_add_lru(folio);
502}
503
504/*
505 * If the folio cannot be invalidated, it is moved to the
506 * inactive list to speed up its reclaim. It is moved to the
507 * head of the list, rather than the tail, to give the flusher
508 * threads some time to write it out, as this is much more
509 * effective than the single-page writeout from reclaim.
510 *
511 * If the folio isn't mapped and dirty/writeback, the folio
512 * could be reclaimed asap using the reclaim flag.
513 *
514 * 1. active, mapped folio -> none
515 * 2. active, dirty/writeback folio -> inactive, head, reclaim
516 * 3. inactive, mapped folio -> none
517 * 4. inactive, dirty/writeback folio -> inactive, head, reclaim
518 * 5. inactive, clean -> inactive, tail
519 * 6. Others -> none
520 *
521 * In 4, it moves to the head of the inactive list so the folio is
522 * written out by flusher threads as this is much more efficient
523 * than the single-page writeout from reclaim.
524 */
525static void lru_deactivate_file(struct lruvec *lruvec, struct folio *folio)
526{
527 bool active = folio_test_active(folio);
528 long nr_pages = folio_nr_pages(folio);
529
530 if (folio_test_unevictable(folio))
531 return;
532
533 /* Some processes are using the folio */
534 if (folio_mapped(folio))
535 return;
536
537 lruvec_del_folio(lruvec, folio);
538 folio_clear_active(folio);
539 folio_clear_referenced(folio);
540
541 if (folio_test_writeback(folio) || folio_test_dirty(folio)) {
542 /*
543 * Setting the reclaim flag could race with
544 * folio_end_writeback() and confuse readahead. But the
545 * race window is _really_ small and it's not a critical
546 * problem.
547 */
548 lruvec_add_folio(lruvec, folio);
549 folio_set_reclaim(folio);
550 } else {
551 /*
552 * The folio's writeback ended while it was in the batch.
553 * We move that folio to the tail of the inactive list.
554 */
555 lruvec_add_folio_tail(lruvec, folio);
556 __count_vm_events(PGROTATED, nr_pages);
557 }
558
559 if (active) {
560 __count_vm_events(PGDEACTIVATE, nr_pages);
561 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
562 nr_pages);
563 }
564}
565
566static void lru_deactivate(struct lruvec *lruvec, struct folio *folio)
567{
568 long nr_pages = folio_nr_pages(folio);
569
570 if (folio_test_unevictable(folio) || !(folio_test_active(folio) || lru_gen_enabled()))
571 return;
572
573 lruvec_del_folio(lruvec, folio);
574 folio_clear_active(folio);
575 folio_clear_referenced(folio);
576 lruvec_add_folio(lruvec, folio);
577
578 __count_vm_events(PGDEACTIVATE, nr_pages);
579 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_pages);
580}
581
582static void lru_lazyfree(struct lruvec *lruvec, struct folio *folio)
583{
584 long nr_pages = folio_nr_pages(folio);
585
586 if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) ||
587 folio_test_swapcache(folio) || folio_test_unevictable(folio))
588 return;
589
590 lruvec_del_folio(lruvec, folio);
591 folio_clear_active(folio);
592 folio_clear_referenced(folio);
593 /*
594 * Lazyfree folios are clean anonymous folios. They have
595 * the swapbacked flag cleared, to distinguish them from normal
596 * anonymous folios
597 */
598 folio_clear_swapbacked(folio);
599 lruvec_add_folio(lruvec, folio);
600
601 __count_vm_events(PGLAZYFREE, nr_pages);
602 __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE, nr_pages);
603}
604
605/*
606 * Drain pages out of the cpu's folio_batch.
607 * Either "cpu" is the current CPU, and preemption has already been
608 * disabled; or "cpu" is being hot-unplugged, and is already dead.
609 */
610void lru_add_drain_cpu(int cpu)
611{
612 struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu);
613 struct folio_batch *fbatch = &fbatches->lru_add;
614
615 if (folio_batch_count(fbatch))
616 folio_batch_move_lru(fbatch, lru_add);
617
618 fbatch = &fbatches->lru_move_tail;
619 /* Disabling interrupts below acts as a compiler barrier. */
620 if (data_race(folio_batch_count(fbatch))) {
621 unsigned long flags;
622
623 /* No harm done if a racing interrupt already did this */
624 local_lock_irqsave(&cpu_fbatches.lock_irq, flags);
625 folio_batch_move_lru(fbatch, lru_move_tail);
626 local_unlock_irqrestore(&cpu_fbatches.lock_irq, flags);
627 }
628
629 fbatch = &fbatches->lru_deactivate_file;
630 if (folio_batch_count(fbatch))
631 folio_batch_move_lru(fbatch, lru_deactivate_file);
632
633 fbatch = &fbatches->lru_deactivate;
634 if (folio_batch_count(fbatch))
635 folio_batch_move_lru(fbatch, lru_deactivate);
636
637 fbatch = &fbatches->lru_lazyfree;
638 if (folio_batch_count(fbatch))
639 folio_batch_move_lru(fbatch, lru_lazyfree);
640
641 folio_activate_drain(cpu);
642}
643
644/**
645 * deactivate_file_folio() - Deactivate a file folio.
646 * @folio: Folio to deactivate.
647 *
648 * This function hints to the VM that @folio is a good reclaim candidate,
649 * for example if its invalidation fails due to the folio being dirty
650 * or under writeback.
651 *
652 * Context: Caller holds a reference on the folio.
653 */
654void deactivate_file_folio(struct folio *folio)
655{
656 /* Deactivating an unevictable folio will not accelerate reclaim */
657 if (folio_test_unevictable(folio))
658 return;
659
660 folio_batch_add_and_move(folio, lru_deactivate_file, true);
661}
662
663/*
664 * folio_deactivate - deactivate a folio
665 * @folio: folio to deactivate
666 *
667 * folio_deactivate() moves @folio to the inactive list if @folio was on the
668 * active list and was not unevictable. This is done to accelerate the
669 * reclaim of @folio.
670 */
671void folio_deactivate(struct folio *folio)
672{
673 if (folio_test_unevictable(folio) || !(folio_test_active(folio) || lru_gen_enabled()))
674 return;
675
676 folio_batch_add_and_move(folio, lru_deactivate, true);
677}
678
679/**
680 * folio_mark_lazyfree - make an anon folio lazyfree
681 * @folio: folio to deactivate
682 *
683 * folio_mark_lazyfree() moves @folio to the inactive file list.
684 * This is done to accelerate the reclaim of @folio.
685 */
686void folio_mark_lazyfree(struct folio *folio)
687{
688 if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) ||
689 folio_test_swapcache(folio) || folio_test_unevictable(folio))
690 return;
691
692 folio_batch_add_and_move(folio, lru_lazyfree, true);
693}
694
695void lru_add_drain(void)
696{
697 local_lock(&cpu_fbatches.lock);
698 lru_add_drain_cpu(smp_processor_id());
699 local_unlock(&cpu_fbatches.lock);
700 mlock_drain_local();
701}
702
703/*
704 * It's called from per-cpu workqueue context in SMP case so
705 * lru_add_drain_cpu and invalidate_bh_lrus_cpu should run on
706 * the same cpu. It shouldn't be a problem in !SMP case since
707 * the core is only one and the locks will disable preemption.
708 */
709static void lru_add_and_bh_lrus_drain(void)
710{
711 local_lock(&cpu_fbatches.lock);
712 lru_add_drain_cpu(smp_processor_id());
713 local_unlock(&cpu_fbatches.lock);
714 invalidate_bh_lrus_cpu();
715 mlock_drain_local();
716}
717
718void lru_add_drain_cpu_zone(struct zone *zone)
719{
720 local_lock(&cpu_fbatches.lock);
721 lru_add_drain_cpu(smp_processor_id());
722 drain_local_pages(zone);
723 local_unlock(&cpu_fbatches.lock);
724 mlock_drain_local();
725}
726
727#ifdef CONFIG_SMP
728
729static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
730
731static void lru_add_drain_per_cpu(struct work_struct *dummy)
732{
733 lru_add_and_bh_lrus_drain();
734}
735
736static bool cpu_needs_drain(unsigned int cpu)
737{
738 struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu);
739
740 /* Check these in order of likelihood that they're not zero */
741 return folio_batch_count(&fbatches->lru_add) ||
742 folio_batch_count(&fbatches->lru_move_tail) ||
743 folio_batch_count(&fbatches->lru_deactivate_file) ||
744 folio_batch_count(&fbatches->lru_deactivate) ||
745 folio_batch_count(&fbatches->lru_lazyfree) ||
746 folio_batch_count(&fbatches->lru_activate) ||
747 need_mlock_drain(cpu) ||
748 has_bh_in_lru(cpu, NULL);
749}
750
751/*
752 * Doesn't need any cpu hotplug locking because we do rely on per-cpu
753 * kworkers being shut down before our page_alloc_cpu_dead callback is
754 * executed on the offlined cpu.
755 * Calling this function with cpu hotplug locks held can actually lead
756 * to obscure indirect dependencies via WQ context.
757 */
758static inline void __lru_add_drain_all(bool force_all_cpus)
759{
760 /*
761 * lru_drain_gen - Global pages generation number
762 *
763 * (A) Definition: global lru_drain_gen = x implies that all generations
764 * 0 < n <= x are already *scheduled* for draining.
765 *
766 * This is an optimization for the highly-contended use case where a
767 * user space workload keeps constantly generating a flow of pages for
768 * each CPU.
769 */
770 static unsigned int lru_drain_gen;
771 static struct cpumask has_work;
772 static DEFINE_MUTEX(lock);
773 unsigned cpu, this_gen;
774
775 /*
776 * Make sure nobody triggers this path before mm_percpu_wq is fully
777 * initialized.
778 */
779 if (WARN_ON(!mm_percpu_wq))
780 return;
781
782 /*
783 * Guarantee folio_batch counter stores visible by this CPU
784 * are visible to other CPUs before loading the current drain
785 * generation.
786 */
787 smp_mb();
788
789 /*
790 * (B) Locally cache global LRU draining generation number
791 *
792 * The read barrier ensures that the counter is loaded before the mutex
793 * is taken. It pairs with smp_mb() inside the mutex critical section
794 * at (D).
795 */
796 this_gen = smp_load_acquire(&lru_drain_gen);
797
798 mutex_lock(&lock);
799
800 /*
801 * (C) Exit the draining operation if a newer generation, from another
802 * lru_add_drain_all(), was already scheduled for draining. Check (A).
803 */
804 if (unlikely(this_gen != lru_drain_gen && !force_all_cpus))
805 goto done;
806
807 /*
808 * (D) Increment global generation number
809 *
810 * Pairs with smp_load_acquire() at (B), outside of the critical
811 * section. Use a full memory barrier to guarantee that the
812 * new global drain generation number is stored before loading
813 * folio_batch counters.
814 *
815 * This pairing must be done here, before the for_each_online_cpu loop
816 * below which drains the page vectors.
817 *
818 * Let x, y, and z represent some system CPU numbers, where x < y < z.
819 * Assume CPU #z is in the middle of the for_each_online_cpu loop
820 * below and has already reached CPU #y's per-cpu data. CPU #x comes
821 * along, adds some pages to its per-cpu vectors, then calls
822 * lru_add_drain_all().
823 *
824 * If the paired barrier is done at any later step, e.g. after the
825 * loop, CPU #x will just exit at (C) and miss flushing out all of its
826 * added pages.
827 */
828 WRITE_ONCE(lru_drain_gen, lru_drain_gen + 1);
829 smp_mb();
830
831 cpumask_clear(&has_work);
832 for_each_online_cpu(cpu) {
833 struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
834
835 if (cpu_needs_drain(cpu)) {
836 INIT_WORK(work, lru_add_drain_per_cpu);
837 queue_work_on(cpu, mm_percpu_wq, work);
838 __cpumask_set_cpu(cpu, &has_work);
839 }
840 }
841
842 for_each_cpu(cpu, &has_work)
843 flush_work(&per_cpu(lru_add_drain_work, cpu));
844
845done:
846 mutex_unlock(&lock);
847}
848
849void lru_add_drain_all(void)
850{
851 __lru_add_drain_all(false);
852}
853#else
854void lru_add_drain_all(void)
855{
856 lru_add_drain();
857}
858#endif /* CONFIG_SMP */
859
860atomic_t lru_disable_count = ATOMIC_INIT(0);
861
862/*
863 * lru_cache_disable() needs to be called before we start compiling
864 * a list of folios to be migrated using folio_isolate_lru().
865 * It drains folios on LRU cache and then disable on all cpus until
866 * lru_cache_enable is called.
867 *
868 * Must be paired with a call to lru_cache_enable().
869 */
870void lru_cache_disable(void)
871{
872 atomic_inc(&lru_disable_count);
873 /*
874 * Readers of lru_disable_count are protected by either disabling
875 * preemption or rcu_read_lock:
876 *
877 * preempt_disable, local_irq_disable [bh_lru_lock()]
878 * rcu_read_lock [rt_spin_lock CONFIG_PREEMPT_RT]
879 * preempt_disable [local_lock !CONFIG_PREEMPT_RT]
880 *
881 * Since v5.1 kernel, synchronize_rcu() is guaranteed to wait on
882 * preempt_disable() regions of code. So any CPU which sees
883 * lru_disable_count = 0 will have exited the critical
884 * section when synchronize_rcu() returns.
885 */
886 synchronize_rcu_expedited();
887#ifdef CONFIG_SMP
888 __lru_add_drain_all(true);
889#else
890 lru_add_and_bh_lrus_drain();
891#endif
892}
893
894/**
895 * folios_put_refs - Reduce the reference count on a batch of folios.
896 * @folios: The folios.
897 * @refs: The number of refs to subtract from each folio.
898 *
899 * Like folio_put(), but for a batch of folios. This is more efficient
900 * than writing the loop yourself as it will optimise the locks which need
901 * to be taken if the folios are freed. The folios batch is returned
902 * empty and ready to be reused for another batch; there is no need
903 * to reinitialise it. If @refs is NULL, we subtract one from each
904 * folio refcount.
905 *
906 * Context: May be called in process or interrupt context, but not in NMI
907 * context. May be called while holding a spinlock.
908 */
909void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
910{
911 int i, j;
912 struct lruvec *lruvec = NULL;
913 unsigned long flags = 0;
914
915 for (i = 0, j = 0; i < folios->nr; i++) {
916 struct folio *folio = folios->folios[i];
917 unsigned int nr_refs = refs ? refs[i] : 1;
918
919 if (is_huge_zero_folio(folio))
920 continue;
921
922 if (folio_is_zone_device(folio)) {
923 if (lruvec) {
924 unlock_page_lruvec_irqrestore(lruvec, flags);
925 lruvec = NULL;
926 }
927 if (put_devmap_managed_folio_refs(folio, nr_refs))
928 continue;
929 if (folio_ref_sub_and_test(folio, nr_refs))
930 free_zone_device_folio(folio);
931 continue;
932 }
933
934 if (!folio_ref_sub_and_test(folio, nr_refs))
935 continue;
936
937 /* hugetlb has its own memcg */
938 if (folio_test_hugetlb(folio)) {
939 if (lruvec) {
940 unlock_page_lruvec_irqrestore(lruvec, flags);
941 lruvec = NULL;
942 }
943 free_huge_folio(folio);
944 continue;
945 }
946 folio_unqueue_deferred_split(folio);
947 __page_cache_release(folio, &lruvec, &flags);
948
949 if (j != i)
950 folios->folios[j] = folio;
951 j++;
952 }
953 if (lruvec)
954 unlock_page_lruvec_irqrestore(lruvec, flags);
955 if (!j) {
956 folio_batch_reinit(folios);
957 return;
958 }
959
960 folios->nr = j;
961 mem_cgroup_uncharge_folios(folios);
962 free_unref_folios(folios);
963}
964EXPORT_SYMBOL(folios_put_refs);
965
966/**
967 * release_pages - batched put_page()
968 * @arg: array of pages to release
969 * @nr: number of pages
970 *
971 * Decrement the reference count on all the pages in @arg. If it
972 * fell to zero, remove the page from the LRU and free it.
973 *
974 * Note that the argument can be an array of pages, encoded pages,
975 * or folio pointers. We ignore any encoded bits, and turn any of
976 * them into just a folio that gets free'd.
977 */
978void release_pages(release_pages_arg arg, int nr)
979{
980 struct folio_batch fbatch;
981 int refs[PAGEVEC_SIZE];
982 struct encoded_page **encoded = arg.encoded_pages;
983 int i;
984
985 folio_batch_init(&fbatch);
986 for (i = 0; i < nr; i++) {
987 /* Turn any of the argument types into a folio */
988 struct folio *folio = page_folio(encoded_page_ptr(encoded[i]));
989
990 /* Is our next entry actually "nr_pages" -> "nr_refs" ? */
991 refs[fbatch.nr] = 1;
992 if (unlikely(encoded_page_flags(encoded[i]) &
993 ENCODED_PAGE_BIT_NR_PAGES_NEXT))
994 refs[fbatch.nr] = encoded_nr_pages(encoded[++i]);
995
996 if (folio_batch_add(&fbatch, folio) > 0)
997 continue;
998 folios_put_refs(&fbatch, refs);
999 }
1000
1001 if (fbatch.nr)
1002 folios_put_refs(&fbatch, refs);
1003}
1004EXPORT_SYMBOL(release_pages);
1005
1006/*
1007 * The folios which we're about to release may be in the deferred lru-addition
1008 * queues. That would prevent them from really being freed right now. That's
1009 * OK from a correctness point of view but is inefficient - those folios may be
1010 * cache-warm and we want to give them back to the page allocator ASAP.
1011 *
1012 * So __folio_batch_release() will drain those queues here.
1013 * folio_batch_move_lru() calls folios_put() directly to avoid
1014 * mutual recursion.
1015 */
1016void __folio_batch_release(struct folio_batch *fbatch)
1017{
1018 if (!fbatch->percpu_pvec_drained) {
1019 lru_add_drain();
1020 fbatch->percpu_pvec_drained = true;
1021 }
1022 folios_put(fbatch);
1023}
1024EXPORT_SYMBOL(__folio_batch_release);
1025
1026/**
1027 * folio_batch_remove_exceptionals() - Prune non-folios from a batch.
1028 * @fbatch: The batch to prune
1029 *
1030 * find_get_entries() fills a batch with both folios and shadow/swap/DAX
1031 * entries. This function prunes all the non-folio entries from @fbatch
1032 * without leaving holes, so that it can be passed on to folio-only batch
1033 * operations.
1034 */
1035void folio_batch_remove_exceptionals(struct folio_batch *fbatch)
1036{
1037 unsigned int i, j;
1038
1039 for (i = 0, j = 0; i < folio_batch_count(fbatch); i++) {
1040 struct folio *folio = fbatch->folios[i];
1041 if (!xa_is_value(folio))
1042 fbatch->folios[j++] = folio;
1043 }
1044 fbatch->nr = j;
1045}
1046
1047/*
1048 * Perform any setup for the swap system
1049 */
1050void __init swap_setup(void)
1051{
1052 unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT);
1053
1054 /* Use a smaller cluster for small-memory machines */
1055 if (megs < 16)
1056 page_cluster = 2;
1057 else
1058 page_cluster = 3;
1059 /*
1060 * Right now other parts of the system means that we
1061 * _really_ don't want to cluster much more
1062 */
1063}
1/*
2 * linux/mm/swap.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 */
6
7/*
8 * This file contains the default values for the operation of the
9 * Linux VM subsystem. Fine-tuning documentation can be found in
10 * Documentation/sysctl/vm.txt.
11 * Started 18.12.91
12 * Swap aging added 23.2.95, Stephen Tweedie.
13 * Buffermem limits added 12.3.98, Rik van Riel.
14 */
15
16#include <linux/mm.h>
17#include <linux/sched.h>
18#include <linux/kernel_stat.h>
19#include <linux/swap.h>
20#include <linux/mman.h>
21#include <linux/pagemap.h>
22#include <linux/pagevec.h>
23#include <linux/init.h>
24#include <linux/export.h>
25#include <linux/mm_inline.h>
26#include <linux/percpu_counter.h>
27#include <linux/percpu.h>
28#include <linux/cpu.h>
29#include <linux/notifier.h>
30#include <linux/backing-dev.h>
31#include <linux/memcontrol.h>
32#include <linux/gfp.h>
33#include <linux/uio.h>
34
35#include "internal.h"
36
37#define CREATE_TRACE_POINTS
38#include <trace/events/pagemap.h>
39
40/* How many pages do we try to swap or page in/out together? */
41int page_cluster;
42
43static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
44static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
45static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
46
47/*
48 * This path almost never happens for VM activity - pages are normally
49 * freed via pagevecs. But it gets used by networking.
50 */
51static void __page_cache_release(struct page *page)
52{
53 if (PageLRU(page)) {
54 struct zone *zone = page_zone(page);
55 struct lruvec *lruvec;
56 unsigned long flags;
57
58 spin_lock_irqsave(&zone->lru_lock, flags);
59 lruvec = mem_cgroup_page_lruvec(page, zone);
60 VM_BUG_ON_PAGE(!PageLRU(page), page);
61 __ClearPageLRU(page);
62 del_page_from_lru_list(page, lruvec, page_off_lru(page));
63 spin_unlock_irqrestore(&zone->lru_lock, flags);
64 }
65}
66
67static void __put_single_page(struct page *page)
68{
69 __page_cache_release(page);
70 free_hot_cold_page(page, 0);
71}
72
73static void __put_compound_page(struct page *page)
74{
75 compound_page_dtor *dtor;
76
77 __page_cache_release(page);
78 dtor = get_compound_page_dtor(page);
79 (*dtor)(page);
80}
81
82static void put_compound_page(struct page *page)
83{
84 struct page *page_head;
85
86 if (likely(!PageTail(page))) {
87 if (put_page_testzero(page)) {
88 /*
89 * By the time all refcounts have been released
90 * split_huge_page cannot run anymore from under us.
91 */
92 if (PageHead(page))
93 __put_compound_page(page);
94 else
95 __put_single_page(page);
96 }
97 return;
98 }
99
100 /* __split_huge_page_refcount can run under us */
101 page_head = compound_head(page);
102
103 /*
104 * THP can not break up slab pages so avoid taking
105 * compound_lock() and skip the tail page refcounting (in
106 * _mapcount) too. Slab performs non-atomic bit ops on
107 * page->flags for better performance. In particular
108 * slab_unlock() in slub used to be a hot path. It is still
109 * hot on arches that do not support
110 * this_cpu_cmpxchg_double().
111 *
112 * If "page" is part of a slab or hugetlbfs page it cannot be
113 * splitted and the head page cannot change from under us. And
114 * if "page" is part of a THP page under splitting, if the
115 * head page pointed by the THP tail isn't a THP head anymore,
116 * we'll find PageTail clear after smp_rmb() and we'll treat
117 * it as a single page.
118 */
119 if (!__compound_tail_refcounted(page_head)) {
120 /*
121 * If "page" is a THP tail, we must read the tail page
122 * flags after the head page flags. The
123 * split_huge_page side enforces write memory barriers
124 * between clearing PageTail and before the head page
125 * can be freed and reallocated.
126 */
127 smp_rmb();
128 if (likely(PageTail(page))) {
129 /*
130 * __split_huge_page_refcount cannot race
131 * here.
132 */
133 VM_BUG_ON_PAGE(!PageHead(page_head), page_head);
134 VM_BUG_ON_PAGE(page_mapcount(page) != 0, page);
135 if (put_page_testzero(page_head)) {
136 /*
137 * If this is the tail of a slab
138 * compound page, the tail pin must
139 * not be the last reference held on
140 * the page, because the PG_slab
141 * cannot be cleared before all tail
142 * pins (which skips the _mapcount
143 * tail refcounting) have been
144 * released. For hugetlbfs the tail
145 * pin may be the last reference on
146 * the page instead, because
147 * PageHeadHuge will not go away until
148 * the compound page enters the buddy
149 * allocator.
150 */
151 VM_BUG_ON_PAGE(PageSlab(page_head), page_head);
152 __put_compound_page(page_head);
153 }
154 return;
155 } else
156 /*
157 * __split_huge_page_refcount run before us,
158 * "page" was a THP tail. The split page_head
159 * has been freed and reallocated as slab or
160 * hugetlbfs page of smaller order (only
161 * possible if reallocated as slab on x86).
162 */
163 goto out_put_single;
164 }
165
166 if (likely(page != page_head && get_page_unless_zero(page_head))) {
167 unsigned long flags;
168
169 /*
170 * page_head wasn't a dangling pointer but it may not
171 * be a head page anymore by the time we obtain the
172 * lock. That is ok as long as it can't be freed from
173 * under us.
174 */
175 flags = compound_lock_irqsave(page_head);
176 if (unlikely(!PageTail(page))) {
177 /* __split_huge_page_refcount run before us */
178 compound_unlock_irqrestore(page_head, flags);
179 if (put_page_testzero(page_head)) {
180 /*
181 * The head page may have been freed
182 * and reallocated as a compound page
183 * of smaller order and then freed
184 * again. All we know is that it
185 * cannot have become: a THP page, a
186 * compound page of higher order, a
187 * tail page. That is because we
188 * still hold the refcount of the
189 * split THP tail and page_head was
190 * the THP head before the split.
191 */
192 if (PageHead(page_head))
193 __put_compound_page(page_head);
194 else
195 __put_single_page(page_head);
196 }
197out_put_single:
198 if (put_page_testzero(page))
199 __put_single_page(page);
200 return;
201 }
202 VM_BUG_ON_PAGE(page_head != page->first_page, page);
203 /*
204 * We can release the refcount taken by
205 * get_page_unless_zero() now that
206 * __split_huge_page_refcount() is blocked on the
207 * compound_lock.
208 */
209 if (put_page_testzero(page_head))
210 VM_BUG_ON_PAGE(1, page_head);
211 /* __split_huge_page_refcount will wait now */
212 VM_BUG_ON_PAGE(page_mapcount(page) <= 0, page);
213 atomic_dec(&page->_mapcount);
214 VM_BUG_ON_PAGE(atomic_read(&page_head->_count) <= 0, page_head);
215 VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
216 compound_unlock_irqrestore(page_head, flags);
217
218 if (put_page_testzero(page_head)) {
219 if (PageHead(page_head))
220 __put_compound_page(page_head);
221 else
222 __put_single_page(page_head);
223 }
224 } else {
225 /* page_head is a dangling pointer */
226 VM_BUG_ON_PAGE(PageTail(page), page);
227 goto out_put_single;
228 }
229}
230
231void put_page(struct page *page)
232{
233 if (unlikely(PageCompound(page)))
234 put_compound_page(page);
235 else if (put_page_testzero(page))
236 __put_single_page(page);
237}
238EXPORT_SYMBOL(put_page);
239
240/*
241 * This function is exported but must not be called by anything other
242 * than get_page(). It implements the slow path of get_page().
243 */
244bool __get_page_tail(struct page *page)
245{
246 /*
247 * This takes care of get_page() if run on a tail page
248 * returned by one of the get_user_pages/follow_page variants.
249 * get_user_pages/follow_page itself doesn't need the compound
250 * lock because it runs __get_page_tail_foll() under the
251 * proper PT lock that already serializes against
252 * split_huge_page().
253 */
254 unsigned long flags;
255 bool got;
256 struct page *page_head = compound_head(page);
257
258 /* Ref to put_compound_page() comment. */
259 if (!__compound_tail_refcounted(page_head)) {
260 smp_rmb();
261 if (likely(PageTail(page))) {
262 /*
263 * This is a hugetlbfs page or a slab
264 * page. __split_huge_page_refcount
265 * cannot race here.
266 */
267 VM_BUG_ON_PAGE(!PageHead(page_head), page_head);
268 __get_page_tail_foll(page, true);
269 return true;
270 } else {
271 /*
272 * __split_huge_page_refcount run
273 * before us, "page" was a THP
274 * tail. The split page_head has been
275 * freed and reallocated as slab or
276 * hugetlbfs page of smaller order
277 * (only possible if reallocated as
278 * slab on x86).
279 */
280 return false;
281 }
282 }
283
284 got = false;
285 if (likely(page != page_head && get_page_unless_zero(page_head))) {
286 /*
287 * page_head wasn't a dangling pointer but it
288 * may not be a head page anymore by the time
289 * we obtain the lock. That is ok as long as it
290 * can't be freed from under us.
291 */
292 flags = compound_lock_irqsave(page_head);
293 /* here __split_huge_page_refcount won't run anymore */
294 if (likely(PageTail(page))) {
295 __get_page_tail_foll(page, false);
296 got = true;
297 }
298 compound_unlock_irqrestore(page_head, flags);
299 if (unlikely(!got))
300 put_page(page_head);
301 }
302 return got;
303}
304EXPORT_SYMBOL(__get_page_tail);
305
306/**
307 * put_pages_list() - release a list of pages
308 * @pages: list of pages threaded on page->lru
309 *
310 * Release a list of pages which are strung together on page.lru. Currently
311 * used by read_cache_pages() and related error recovery code.
312 */
313void put_pages_list(struct list_head *pages)
314{
315 while (!list_empty(pages)) {
316 struct page *victim;
317
318 victim = list_entry(pages->prev, struct page, lru);
319 list_del(&victim->lru);
320 page_cache_release(victim);
321 }
322}
323EXPORT_SYMBOL(put_pages_list);
324
325/*
326 * get_kernel_pages() - pin kernel pages in memory
327 * @kiov: An array of struct kvec structures
328 * @nr_segs: number of segments to pin
329 * @write: pinning for read/write, currently ignored
330 * @pages: array that receives pointers to the pages pinned.
331 * Should be at least nr_segs long.
332 *
333 * Returns number of pages pinned. This may be fewer than the number
334 * requested. If nr_pages is 0 or negative, returns 0. If no pages
335 * were pinned, returns -errno. Each page returned must be released
336 * with a put_page() call when it is finished with.
337 */
338int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
339 struct page **pages)
340{
341 int seg;
342
343 for (seg = 0; seg < nr_segs; seg++) {
344 if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
345 return seg;
346
347 pages[seg] = kmap_to_page(kiov[seg].iov_base);
348 page_cache_get(pages[seg]);
349 }
350
351 return seg;
352}
353EXPORT_SYMBOL_GPL(get_kernel_pages);
354
355/*
356 * get_kernel_page() - pin a kernel page in memory
357 * @start: starting kernel address
358 * @write: pinning for read/write, currently ignored
359 * @pages: array that receives pointer to the page pinned.
360 * Must be at least nr_segs long.
361 *
362 * Returns 1 if page is pinned. If the page was not pinned, returns
363 * -errno. The page returned must be released with a put_page() call
364 * when it is finished with.
365 */
366int get_kernel_page(unsigned long start, int write, struct page **pages)
367{
368 const struct kvec kiov = {
369 .iov_base = (void *)start,
370 .iov_len = PAGE_SIZE
371 };
372
373 return get_kernel_pages(&kiov, 1, write, pages);
374}
375EXPORT_SYMBOL_GPL(get_kernel_page);
376
377static void pagevec_lru_move_fn(struct pagevec *pvec,
378 void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
379 void *arg)
380{
381 int i;
382 struct zone *zone = NULL;
383 struct lruvec *lruvec;
384 unsigned long flags = 0;
385
386 for (i = 0; i < pagevec_count(pvec); i++) {
387 struct page *page = pvec->pages[i];
388 struct zone *pagezone = page_zone(page);
389
390 if (pagezone != zone) {
391 if (zone)
392 spin_unlock_irqrestore(&zone->lru_lock, flags);
393 zone = pagezone;
394 spin_lock_irqsave(&zone->lru_lock, flags);
395 }
396
397 lruvec = mem_cgroup_page_lruvec(page, zone);
398 (*move_fn)(page, lruvec, arg);
399 }
400 if (zone)
401 spin_unlock_irqrestore(&zone->lru_lock, flags);
402 release_pages(pvec->pages, pvec->nr, pvec->cold);
403 pagevec_reinit(pvec);
404}
405
406static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
407 void *arg)
408{
409 int *pgmoved = arg;
410
411 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
412 enum lru_list lru = page_lru_base_type(page);
413 list_move_tail(&page->lru, &lruvec->lists[lru]);
414 (*pgmoved)++;
415 }
416}
417
418/*
419 * pagevec_move_tail() must be called with IRQ disabled.
420 * Otherwise this may cause nasty races.
421 */
422static void pagevec_move_tail(struct pagevec *pvec)
423{
424 int pgmoved = 0;
425
426 pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
427 __count_vm_events(PGROTATED, pgmoved);
428}
429
430/*
431 * Writeback is about to end against a page which has been marked for immediate
432 * reclaim. If it still appears to be reclaimable, move it to the tail of the
433 * inactive list.
434 */
435void rotate_reclaimable_page(struct page *page)
436{
437 if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
438 !PageUnevictable(page) && PageLRU(page)) {
439 struct pagevec *pvec;
440 unsigned long flags;
441
442 page_cache_get(page);
443 local_irq_save(flags);
444 pvec = &__get_cpu_var(lru_rotate_pvecs);
445 if (!pagevec_add(pvec, page))
446 pagevec_move_tail(pvec);
447 local_irq_restore(flags);
448 }
449}
450
451static void update_page_reclaim_stat(struct lruvec *lruvec,
452 int file, int rotated)
453{
454 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
455
456 reclaim_stat->recent_scanned[file]++;
457 if (rotated)
458 reclaim_stat->recent_rotated[file]++;
459}
460
461static void __activate_page(struct page *page, struct lruvec *lruvec,
462 void *arg)
463{
464 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
465 int file = page_is_file_cache(page);
466 int lru = page_lru_base_type(page);
467
468 del_page_from_lru_list(page, lruvec, lru);
469 SetPageActive(page);
470 lru += LRU_ACTIVE;
471 add_page_to_lru_list(page, lruvec, lru);
472 trace_mm_lru_activate(page, page_to_pfn(page));
473
474 __count_vm_event(PGACTIVATE);
475 update_page_reclaim_stat(lruvec, file, 1);
476 }
477}
478
479#ifdef CONFIG_SMP
480static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
481
482static void activate_page_drain(int cpu)
483{
484 struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
485
486 if (pagevec_count(pvec))
487 pagevec_lru_move_fn(pvec, __activate_page, NULL);
488}
489
490static bool need_activate_page_drain(int cpu)
491{
492 return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0;
493}
494
495void activate_page(struct page *page)
496{
497 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
498 struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
499
500 page_cache_get(page);
501 if (!pagevec_add(pvec, page))
502 pagevec_lru_move_fn(pvec, __activate_page, NULL);
503 put_cpu_var(activate_page_pvecs);
504 }
505}
506
507#else
508static inline void activate_page_drain(int cpu)
509{
510}
511
512static bool need_activate_page_drain(int cpu)
513{
514 return false;
515}
516
517void activate_page(struct page *page)
518{
519 struct zone *zone = page_zone(page);
520
521 spin_lock_irq(&zone->lru_lock);
522 __activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL);
523 spin_unlock_irq(&zone->lru_lock);
524}
525#endif
526
527static void __lru_cache_activate_page(struct page *page)
528{
529 struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
530 int i;
531
532 /*
533 * Search backwards on the optimistic assumption that the page being
534 * activated has just been added to this pagevec. Note that only
535 * the local pagevec is examined as a !PageLRU page could be in the
536 * process of being released, reclaimed, migrated or on a remote
537 * pagevec that is currently being drained. Furthermore, marking
538 * a remote pagevec's page PageActive potentially hits a race where
539 * a page is marked PageActive just after it is added to the inactive
540 * list causing accounting errors and BUG_ON checks to trigger.
541 */
542 for (i = pagevec_count(pvec) - 1; i >= 0; i--) {
543 struct page *pagevec_page = pvec->pages[i];
544
545 if (pagevec_page == page) {
546 SetPageActive(page);
547 break;
548 }
549 }
550
551 put_cpu_var(lru_add_pvec);
552}
553
554/*
555 * Mark a page as having seen activity.
556 *
557 * inactive,unreferenced -> inactive,referenced
558 * inactive,referenced -> active,unreferenced
559 * active,unreferenced -> active,referenced
560 */
561void mark_page_accessed(struct page *page)
562{
563 if (!PageActive(page) && !PageUnevictable(page) &&
564 PageReferenced(page)) {
565
566 /*
567 * If the page is on the LRU, queue it for activation via
568 * activate_page_pvecs. Otherwise, assume the page is on a
569 * pagevec, mark it active and it'll be moved to the active
570 * LRU on the next drain.
571 */
572 if (PageLRU(page))
573 activate_page(page);
574 else
575 __lru_cache_activate_page(page);
576 ClearPageReferenced(page);
577 if (page_is_file_cache(page))
578 workingset_activation(page);
579 } else if (!PageReferenced(page)) {
580 SetPageReferenced(page);
581 }
582}
583EXPORT_SYMBOL(mark_page_accessed);
584
585/*
586 * Queue the page for addition to the LRU via pagevec. The decision on whether
587 * to add the page to the [in]active [file|anon] list is deferred until the
588 * pagevec is drained. This gives a chance for the caller of __lru_cache_add()
589 * have the page added to the active list using mark_page_accessed().
590 */
591void __lru_cache_add(struct page *page)
592{
593 struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
594
595 page_cache_get(page);
596 if (!pagevec_space(pvec))
597 __pagevec_lru_add(pvec);
598 pagevec_add(pvec, page);
599 put_cpu_var(lru_add_pvec);
600}
601EXPORT_SYMBOL(__lru_cache_add);
602
603/**
604 * lru_cache_add - add a page to a page list
605 * @page: the page to be added to the LRU.
606 */
607void lru_cache_add(struct page *page)
608{
609 VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
610 VM_BUG_ON_PAGE(PageLRU(page), page);
611 __lru_cache_add(page);
612}
613
614/**
615 * add_page_to_unevictable_list - add a page to the unevictable list
616 * @page: the page to be added to the unevictable list
617 *
618 * Add page directly to its zone's unevictable list. To avoid races with
619 * tasks that might be making the page evictable, through eg. munlock,
620 * munmap or exit, while it's not on the lru, we want to add the page
621 * while it's locked or otherwise "invisible" to other tasks. This is
622 * difficult to do when using the pagevec cache, so bypass that.
623 */
624void add_page_to_unevictable_list(struct page *page)
625{
626 struct zone *zone = page_zone(page);
627 struct lruvec *lruvec;
628
629 spin_lock_irq(&zone->lru_lock);
630 lruvec = mem_cgroup_page_lruvec(page, zone);
631 ClearPageActive(page);
632 SetPageUnevictable(page);
633 SetPageLRU(page);
634 add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
635 spin_unlock_irq(&zone->lru_lock);
636}
637
638/*
639 * If the page can not be invalidated, it is moved to the
640 * inactive list to speed up its reclaim. It is moved to the
641 * head of the list, rather than the tail, to give the flusher
642 * threads some time to write it out, as this is much more
643 * effective than the single-page writeout from reclaim.
644 *
645 * If the page isn't page_mapped and dirty/writeback, the page
646 * could reclaim asap using PG_reclaim.
647 *
648 * 1. active, mapped page -> none
649 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
650 * 3. inactive, mapped page -> none
651 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
652 * 5. inactive, clean -> inactive, tail
653 * 6. Others -> none
654 *
655 * In 4, why it moves inactive's head, the VM expects the page would
656 * be write it out by flusher threads as this is much more effective
657 * than the single-page writeout from reclaim.
658 */
659static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
660 void *arg)
661{
662 int lru, file;
663 bool active;
664
665 if (!PageLRU(page))
666 return;
667
668 if (PageUnevictable(page))
669 return;
670
671 /* Some processes are using the page */
672 if (page_mapped(page))
673 return;
674
675 active = PageActive(page);
676 file = page_is_file_cache(page);
677 lru = page_lru_base_type(page);
678
679 del_page_from_lru_list(page, lruvec, lru + active);
680 ClearPageActive(page);
681 ClearPageReferenced(page);
682 add_page_to_lru_list(page, lruvec, lru);
683
684 if (PageWriteback(page) || PageDirty(page)) {
685 /*
686 * PG_reclaim could be raced with end_page_writeback
687 * It can make readahead confusing. But race window
688 * is _really_ small and it's non-critical problem.
689 */
690 SetPageReclaim(page);
691 } else {
692 /*
693 * The page's writeback ends up during pagevec
694 * We moves tha page into tail of inactive.
695 */
696 list_move_tail(&page->lru, &lruvec->lists[lru]);
697 __count_vm_event(PGROTATED);
698 }
699
700 if (active)
701 __count_vm_event(PGDEACTIVATE);
702 update_page_reclaim_stat(lruvec, file, 0);
703}
704
705/*
706 * Drain pages out of the cpu's pagevecs.
707 * Either "cpu" is the current CPU, and preemption has already been
708 * disabled; or "cpu" is being hot-unplugged, and is already dead.
709 */
710void lru_add_drain_cpu(int cpu)
711{
712 struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu);
713
714 if (pagevec_count(pvec))
715 __pagevec_lru_add(pvec);
716
717 pvec = &per_cpu(lru_rotate_pvecs, cpu);
718 if (pagevec_count(pvec)) {
719 unsigned long flags;
720
721 /* No harm done if a racing interrupt already did this */
722 local_irq_save(flags);
723 pagevec_move_tail(pvec);
724 local_irq_restore(flags);
725 }
726
727 pvec = &per_cpu(lru_deactivate_pvecs, cpu);
728 if (pagevec_count(pvec))
729 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
730
731 activate_page_drain(cpu);
732}
733
734/**
735 * deactivate_page - forcefully deactivate a page
736 * @page: page to deactivate
737 *
738 * This function hints the VM that @page is a good reclaim candidate,
739 * for example if its invalidation fails due to the page being dirty
740 * or under writeback.
741 */
742void deactivate_page(struct page *page)
743{
744 /*
745 * In a workload with many unevictable page such as mprotect, unevictable
746 * page deactivation for accelerating reclaim is pointless.
747 */
748 if (PageUnevictable(page))
749 return;
750
751 if (likely(get_page_unless_zero(page))) {
752 struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
753
754 if (!pagevec_add(pvec, page))
755 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
756 put_cpu_var(lru_deactivate_pvecs);
757 }
758}
759
760void lru_add_drain(void)
761{
762 lru_add_drain_cpu(get_cpu());
763 put_cpu();
764}
765
766static void lru_add_drain_per_cpu(struct work_struct *dummy)
767{
768 lru_add_drain();
769}
770
771static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
772
773void lru_add_drain_all(void)
774{
775 static DEFINE_MUTEX(lock);
776 static struct cpumask has_work;
777 int cpu;
778
779 mutex_lock(&lock);
780 get_online_cpus();
781 cpumask_clear(&has_work);
782
783 for_each_online_cpu(cpu) {
784 struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
785
786 if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
787 pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
788 pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
789 need_activate_page_drain(cpu)) {
790 INIT_WORK(work, lru_add_drain_per_cpu);
791 schedule_work_on(cpu, work);
792 cpumask_set_cpu(cpu, &has_work);
793 }
794 }
795
796 for_each_cpu(cpu, &has_work)
797 flush_work(&per_cpu(lru_add_drain_work, cpu));
798
799 put_online_cpus();
800 mutex_unlock(&lock);
801}
802
803/*
804 * Batched page_cache_release(). Decrement the reference count on all the
805 * passed pages. If it fell to zero then remove the page from the LRU and
806 * free it.
807 *
808 * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
809 * for the remainder of the operation.
810 *
811 * The locking in this function is against shrink_inactive_list(): we recheck
812 * the page count inside the lock to see whether shrink_inactive_list()
813 * grabbed the page via the LRU. If it did, give up: shrink_inactive_list()
814 * will free it.
815 */
816void release_pages(struct page **pages, int nr, int cold)
817{
818 int i;
819 LIST_HEAD(pages_to_free);
820 struct zone *zone = NULL;
821 struct lruvec *lruvec;
822 unsigned long uninitialized_var(flags);
823
824 for (i = 0; i < nr; i++) {
825 struct page *page = pages[i];
826
827 if (unlikely(PageCompound(page))) {
828 if (zone) {
829 spin_unlock_irqrestore(&zone->lru_lock, flags);
830 zone = NULL;
831 }
832 put_compound_page(page);
833 continue;
834 }
835
836 if (!put_page_testzero(page))
837 continue;
838
839 if (PageLRU(page)) {
840 struct zone *pagezone = page_zone(page);
841
842 if (pagezone != zone) {
843 if (zone)
844 spin_unlock_irqrestore(&zone->lru_lock,
845 flags);
846 zone = pagezone;
847 spin_lock_irqsave(&zone->lru_lock, flags);
848 }
849
850 lruvec = mem_cgroup_page_lruvec(page, zone);
851 VM_BUG_ON_PAGE(!PageLRU(page), page);
852 __ClearPageLRU(page);
853 del_page_from_lru_list(page, lruvec, page_off_lru(page));
854 }
855
856 /* Clear Active bit in case of parallel mark_page_accessed */
857 ClearPageActive(page);
858
859 list_add(&page->lru, &pages_to_free);
860 }
861 if (zone)
862 spin_unlock_irqrestore(&zone->lru_lock, flags);
863
864 free_hot_cold_page_list(&pages_to_free, cold);
865}
866EXPORT_SYMBOL(release_pages);
867
868/*
869 * The pages which we're about to release may be in the deferred lru-addition
870 * queues. That would prevent them from really being freed right now. That's
871 * OK from a correctness point of view but is inefficient - those pages may be
872 * cache-warm and we want to give them back to the page allocator ASAP.
873 *
874 * So __pagevec_release() will drain those queues here. __pagevec_lru_add()
875 * and __pagevec_lru_add_active() call release_pages() directly to avoid
876 * mutual recursion.
877 */
878void __pagevec_release(struct pagevec *pvec)
879{
880 lru_add_drain();
881 release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
882 pagevec_reinit(pvec);
883}
884EXPORT_SYMBOL(__pagevec_release);
885
886#ifdef CONFIG_TRANSPARENT_HUGEPAGE
887/* used by __split_huge_page_refcount() */
888void lru_add_page_tail(struct page *page, struct page *page_tail,
889 struct lruvec *lruvec, struct list_head *list)
890{
891 const int file = 0;
892
893 VM_BUG_ON_PAGE(!PageHead(page), page);
894 VM_BUG_ON_PAGE(PageCompound(page_tail), page);
895 VM_BUG_ON_PAGE(PageLRU(page_tail), page);
896 VM_BUG_ON(NR_CPUS != 1 &&
897 !spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
898
899 if (!list)
900 SetPageLRU(page_tail);
901
902 if (likely(PageLRU(page)))
903 list_add_tail(&page_tail->lru, &page->lru);
904 else if (list) {
905 /* page reclaim is reclaiming a huge page */
906 get_page(page_tail);
907 list_add_tail(&page_tail->lru, list);
908 } else {
909 struct list_head *list_head;
910 /*
911 * Head page has not yet been counted, as an hpage,
912 * so we must account for each subpage individually.
913 *
914 * Use the standard add function to put page_tail on the list,
915 * but then correct its position so they all end up in order.
916 */
917 add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail));
918 list_head = page_tail->lru.prev;
919 list_move_tail(&page_tail->lru, list_head);
920 }
921
922 if (!PageUnevictable(page))
923 update_page_reclaim_stat(lruvec, file, PageActive(page_tail));
924}
925#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
926
927static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
928 void *arg)
929{
930 int file = page_is_file_cache(page);
931 int active = PageActive(page);
932 enum lru_list lru = page_lru(page);
933
934 VM_BUG_ON_PAGE(PageLRU(page), page);
935
936 SetPageLRU(page);
937 add_page_to_lru_list(page, lruvec, lru);
938 update_page_reclaim_stat(lruvec, file, active);
939 trace_mm_lru_insertion(page, page_to_pfn(page), lru, trace_pagemap_flags(page));
940}
941
942/*
943 * Add the passed pages to the LRU, then drop the caller's refcount
944 * on them. Reinitialises the caller's pagevec.
945 */
946void __pagevec_lru_add(struct pagevec *pvec)
947{
948 pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL);
949}
950EXPORT_SYMBOL(__pagevec_lru_add);
951
952/**
953 * pagevec_lookup_entries - gang pagecache lookup
954 * @pvec: Where the resulting entries are placed
955 * @mapping: The address_space to search
956 * @start: The starting entry index
957 * @nr_entries: The maximum number of entries
958 * @indices: The cache indices corresponding to the entries in @pvec
959 *
960 * pagevec_lookup_entries() will search for and return a group of up
961 * to @nr_entries pages and shadow entries in the mapping. All
962 * entries are placed in @pvec. pagevec_lookup_entries() takes a
963 * reference against actual pages in @pvec.
964 *
965 * The search returns a group of mapping-contiguous entries with
966 * ascending indexes. There may be holes in the indices due to
967 * not-present entries.
968 *
969 * pagevec_lookup_entries() returns the number of entries which were
970 * found.
971 */
972unsigned pagevec_lookup_entries(struct pagevec *pvec,
973 struct address_space *mapping,
974 pgoff_t start, unsigned nr_pages,
975 pgoff_t *indices)
976{
977 pvec->nr = find_get_entries(mapping, start, nr_pages,
978 pvec->pages, indices);
979 return pagevec_count(pvec);
980}
981
982/**
983 * pagevec_remove_exceptionals - pagevec exceptionals pruning
984 * @pvec: The pagevec to prune
985 *
986 * pagevec_lookup_entries() fills both pages and exceptional radix
987 * tree entries into the pagevec. This function prunes all
988 * exceptionals from @pvec without leaving holes, so that it can be
989 * passed on to page-only pagevec operations.
990 */
991void pagevec_remove_exceptionals(struct pagevec *pvec)
992{
993 int i, j;
994
995 for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
996 struct page *page = pvec->pages[i];
997 if (!radix_tree_exceptional_entry(page))
998 pvec->pages[j++] = page;
999 }
1000 pvec->nr = j;
1001}
1002
1003/**
1004 * pagevec_lookup - gang pagecache lookup
1005 * @pvec: Where the resulting pages are placed
1006 * @mapping: The address_space to search
1007 * @start: The starting page index
1008 * @nr_pages: The maximum number of pages
1009 *
1010 * pagevec_lookup() will search for and return a group of up to @nr_pages pages
1011 * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a
1012 * reference against the pages in @pvec.
1013 *
1014 * The search returns a group of mapping-contiguous pages with ascending
1015 * indexes. There may be holes in the indices due to not-present pages.
1016 *
1017 * pagevec_lookup() returns the number of pages which were found.
1018 */
1019unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
1020 pgoff_t start, unsigned nr_pages)
1021{
1022 pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
1023 return pagevec_count(pvec);
1024}
1025EXPORT_SYMBOL(pagevec_lookup);
1026
1027unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
1028 pgoff_t *index, int tag, unsigned nr_pages)
1029{
1030 pvec->nr = find_get_pages_tag(mapping, index, tag,
1031 nr_pages, pvec->pages);
1032 return pagevec_count(pvec);
1033}
1034EXPORT_SYMBOL(pagevec_lookup_tag);
1035
1036/*
1037 * Perform any setup for the swap system
1038 */
1039void __init swap_setup(void)
1040{
1041 unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
1042#ifdef CONFIG_SWAP
1043 int i;
1044
1045 if (bdi_init(swapper_spaces[0].backing_dev_info))
1046 panic("Failed to init swap bdi");
1047 for (i = 0; i < MAX_SWAPFILES; i++) {
1048 spin_lock_init(&swapper_spaces[i].tree_lock);
1049 INIT_LIST_HEAD(&swapper_spaces[i].i_mmap_nonlinear);
1050 }
1051#endif
1052
1053 /* Use a smaller cluster for small-memory machines */
1054 if (megs < 16)
1055 page_cluster = 2;
1056 else
1057 page_cluster = 3;
1058 /*
1059 * Right now other parts of the system means that we
1060 * _really_ don't want to cluster much more
1061 */
1062}