Loading...
1/*
2 * linux/mm/swap.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 */
6
7/*
8 * This file contains the default values for the operation of the
9 * Linux VM subsystem. Fine-tuning documentation can be found in
10 * Documentation/sysctl/vm.txt.
11 * Started 18.12.91
12 * Swap aging added 23.2.95, Stephen Tweedie.
13 * Buffermem limits added 12.3.98, Rik van Riel.
14 */
15
16#include <linux/mm.h>
17#include <linux/sched.h>
18#include <linux/kernel_stat.h>
19#include <linux/swap.h>
20#include <linux/mman.h>
21#include <linux/pagemap.h>
22#include <linux/pagevec.h>
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/mm_inline.h>
26#include <linux/buffer_head.h> /* for try_to_release_page() */
27#include <linux/percpu_counter.h>
28#include <linux/percpu.h>
29#include <linux/cpu.h>
30#include <linux/notifier.h>
31#include <linux/backing-dev.h>
32#include <linux/memcontrol.h>
33#include <linux/gfp.h>
34
35#include "internal.h"
36
37/* How many pages do we try to swap or page in/out together? */
38int page_cluster;
39
40static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
41static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
42static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
43
44/*
45 * This path almost never happens for VM activity - pages are normally
46 * freed via pagevecs. But it gets used by networking.
47 */
48static void __page_cache_release(struct page *page)
49{
50 if (PageLRU(page)) {
51 unsigned long flags;
52 struct zone *zone = page_zone(page);
53
54 spin_lock_irqsave(&zone->lru_lock, flags);
55 VM_BUG_ON(!PageLRU(page));
56 __ClearPageLRU(page);
57 del_page_from_lru(zone, page);
58 spin_unlock_irqrestore(&zone->lru_lock, flags);
59 }
60}
61
62static void __put_single_page(struct page *page)
63{
64 __page_cache_release(page);
65 free_hot_cold_page(page, 0);
66}
67
68static void __put_compound_page(struct page *page)
69{
70 compound_page_dtor *dtor;
71
72 __page_cache_release(page);
73 dtor = get_compound_page_dtor(page);
74 (*dtor)(page);
75}
76
77static void put_compound_page(struct page *page)
78{
79 if (unlikely(PageTail(page))) {
80 /* __split_huge_page_refcount can run under us */
81 struct page *page_head = page->first_page;
82 smp_rmb();
83 /*
84 * If PageTail is still set after smp_rmb() we can be sure
85 * that the page->first_page we read wasn't a dangling pointer.
86 * See __split_huge_page_refcount() smp_wmb().
87 */
88 if (likely(PageTail(page) && get_page_unless_zero(page_head))) {
89 unsigned long flags;
90 /*
91 * Verify that our page_head wasn't converted
92 * to a a regular page before we got a
93 * reference on it.
94 */
95 if (unlikely(!PageHead(page_head))) {
96 /* PageHead is cleared after PageTail */
97 smp_rmb();
98 VM_BUG_ON(PageTail(page));
99 goto out_put_head;
100 }
101 /*
102 * Only run compound_lock on a valid PageHead,
103 * after having it pinned with
104 * get_page_unless_zero() above.
105 */
106 smp_mb();
107 /* page_head wasn't a dangling pointer */
108 flags = compound_lock_irqsave(page_head);
109 if (unlikely(!PageTail(page))) {
110 /* __split_huge_page_refcount run before us */
111 compound_unlock_irqrestore(page_head, flags);
112 VM_BUG_ON(PageHead(page_head));
113 out_put_head:
114 if (put_page_testzero(page_head))
115 __put_single_page(page_head);
116 out_put_single:
117 if (put_page_testzero(page))
118 __put_single_page(page);
119 return;
120 }
121 VM_BUG_ON(page_head != page->first_page);
122 /*
123 * We can release the refcount taken by
124 * get_page_unless_zero now that
125 * split_huge_page_refcount is blocked on the
126 * compound_lock.
127 */
128 if (put_page_testzero(page_head))
129 VM_BUG_ON(1);
130 /* __split_huge_page_refcount will wait now */
131 VM_BUG_ON(atomic_read(&page->_count) <= 0);
132 atomic_dec(&page->_count);
133 VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
134 compound_unlock_irqrestore(page_head, flags);
135 if (put_page_testzero(page_head)) {
136 if (PageHead(page_head))
137 __put_compound_page(page_head);
138 else
139 __put_single_page(page_head);
140 }
141 } else {
142 /* page_head is a dangling pointer */
143 VM_BUG_ON(PageTail(page));
144 goto out_put_single;
145 }
146 } else if (put_page_testzero(page)) {
147 if (PageHead(page))
148 __put_compound_page(page);
149 else
150 __put_single_page(page);
151 }
152}
153
154void put_page(struct page *page)
155{
156 if (unlikely(PageCompound(page)))
157 put_compound_page(page);
158 else if (put_page_testzero(page))
159 __put_single_page(page);
160}
161EXPORT_SYMBOL(put_page);
162
163/**
164 * put_pages_list() - release a list of pages
165 * @pages: list of pages threaded on page->lru
166 *
167 * Release a list of pages which are strung together on page.lru. Currently
168 * used by read_cache_pages() and related error recovery code.
169 */
170void put_pages_list(struct list_head *pages)
171{
172 while (!list_empty(pages)) {
173 struct page *victim;
174
175 victim = list_entry(pages->prev, struct page, lru);
176 list_del(&victim->lru);
177 page_cache_release(victim);
178 }
179}
180EXPORT_SYMBOL(put_pages_list);
181
182static void pagevec_lru_move_fn(struct pagevec *pvec,
183 void (*move_fn)(struct page *page, void *arg),
184 void *arg)
185{
186 int i;
187 struct zone *zone = NULL;
188 unsigned long flags = 0;
189
190 for (i = 0; i < pagevec_count(pvec); i++) {
191 struct page *page = pvec->pages[i];
192 struct zone *pagezone = page_zone(page);
193
194 if (pagezone != zone) {
195 if (zone)
196 spin_unlock_irqrestore(&zone->lru_lock, flags);
197 zone = pagezone;
198 spin_lock_irqsave(&zone->lru_lock, flags);
199 }
200
201 (*move_fn)(page, arg);
202 }
203 if (zone)
204 spin_unlock_irqrestore(&zone->lru_lock, flags);
205 release_pages(pvec->pages, pvec->nr, pvec->cold);
206 pagevec_reinit(pvec);
207}
208
209static void pagevec_move_tail_fn(struct page *page, void *arg)
210{
211 int *pgmoved = arg;
212 struct zone *zone = page_zone(page);
213
214 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
215 enum lru_list lru = page_lru_base_type(page);
216 list_move_tail(&page->lru, &zone->lru[lru].list);
217 mem_cgroup_rotate_reclaimable_page(page);
218 (*pgmoved)++;
219 }
220}
221
222/*
223 * pagevec_move_tail() must be called with IRQ disabled.
224 * Otherwise this may cause nasty races.
225 */
226static void pagevec_move_tail(struct pagevec *pvec)
227{
228 int pgmoved = 0;
229
230 pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
231 __count_vm_events(PGROTATED, pgmoved);
232}
233
234/*
235 * Writeback is about to end against a page which has been marked for immediate
236 * reclaim. If it still appears to be reclaimable, move it to the tail of the
237 * inactive list.
238 */
239void rotate_reclaimable_page(struct page *page)
240{
241 if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
242 !PageUnevictable(page) && PageLRU(page)) {
243 struct pagevec *pvec;
244 unsigned long flags;
245
246 page_cache_get(page);
247 local_irq_save(flags);
248 pvec = &__get_cpu_var(lru_rotate_pvecs);
249 if (!pagevec_add(pvec, page))
250 pagevec_move_tail(pvec);
251 local_irq_restore(flags);
252 }
253}
254
255static void update_page_reclaim_stat(struct zone *zone, struct page *page,
256 int file, int rotated)
257{
258 struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat;
259 struct zone_reclaim_stat *memcg_reclaim_stat;
260
261 memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
262
263 reclaim_stat->recent_scanned[file]++;
264 if (rotated)
265 reclaim_stat->recent_rotated[file]++;
266
267 if (!memcg_reclaim_stat)
268 return;
269
270 memcg_reclaim_stat->recent_scanned[file]++;
271 if (rotated)
272 memcg_reclaim_stat->recent_rotated[file]++;
273}
274
275static void __activate_page(struct page *page, void *arg)
276{
277 struct zone *zone = page_zone(page);
278
279 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
280 int file = page_is_file_cache(page);
281 int lru = page_lru_base_type(page);
282 del_page_from_lru_list(zone, page, lru);
283
284 SetPageActive(page);
285 lru += LRU_ACTIVE;
286 add_page_to_lru_list(zone, page, lru);
287 __count_vm_event(PGACTIVATE);
288
289 update_page_reclaim_stat(zone, page, file, 1);
290 }
291}
292
293#ifdef CONFIG_SMP
294static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
295
296static void activate_page_drain(int cpu)
297{
298 struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
299
300 if (pagevec_count(pvec))
301 pagevec_lru_move_fn(pvec, __activate_page, NULL);
302}
303
304void activate_page(struct page *page)
305{
306 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
307 struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
308
309 page_cache_get(page);
310 if (!pagevec_add(pvec, page))
311 pagevec_lru_move_fn(pvec, __activate_page, NULL);
312 put_cpu_var(activate_page_pvecs);
313 }
314}
315
316#else
317static inline void activate_page_drain(int cpu)
318{
319}
320
321void activate_page(struct page *page)
322{
323 struct zone *zone = page_zone(page);
324
325 spin_lock_irq(&zone->lru_lock);
326 __activate_page(page, NULL);
327 spin_unlock_irq(&zone->lru_lock);
328}
329#endif
330
331/*
332 * Mark a page as having seen activity.
333 *
334 * inactive,unreferenced -> inactive,referenced
335 * inactive,referenced -> active,unreferenced
336 * active,unreferenced -> active,referenced
337 */
338void mark_page_accessed(struct page *page)
339{
340 if (!PageActive(page) && !PageUnevictable(page) &&
341 PageReferenced(page) && PageLRU(page)) {
342 activate_page(page);
343 ClearPageReferenced(page);
344 } else if (!PageReferenced(page)) {
345 SetPageReferenced(page);
346 }
347}
348
349EXPORT_SYMBOL(mark_page_accessed);
350
351void __lru_cache_add(struct page *page, enum lru_list lru)
352{
353 struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
354
355 page_cache_get(page);
356 if (!pagevec_add(pvec, page))
357 ____pagevec_lru_add(pvec, lru);
358 put_cpu_var(lru_add_pvecs);
359}
360EXPORT_SYMBOL(__lru_cache_add);
361
362/**
363 * lru_cache_add_lru - add a page to a page list
364 * @page: the page to be added to the LRU.
365 * @lru: the LRU list to which the page is added.
366 */
367void lru_cache_add_lru(struct page *page, enum lru_list lru)
368{
369 if (PageActive(page)) {
370 VM_BUG_ON(PageUnevictable(page));
371 ClearPageActive(page);
372 } else if (PageUnevictable(page)) {
373 VM_BUG_ON(PageActive(page));
374 ClearPageUnevictable(page);
375 }
376
377 VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
378 __lru_cache_add(page, lru);
379}
380
381/**
382 * add_page_to_unevictable_list - add a page to the unevictable list
383 * @page: the page to be added to the unevictable list
384 *
385 * Add page directly to its zone's unevictable list. To avoid races with
386 * tasks that might be making the page evictable, through eg. munlock,
387 * munmap or exit, while it's not on the lru, we want to add the page
388 * while it's locked or otherwise "invisible" to other tasks. This is
389 * difficult to do when using the pagevec cache, so bypass that.
390 */
391void add_page_to_unevictable_list(struct page *page)
392{
393 struct zone *zone = page_zone(page);
394
395 spin_lock_irq(&zone->lru_lock);
396 SetPageUnevictable(page);
397 SetPageLRU(page);
398 add_page_to_lru_list(zone, page, LRU_UNEVICTABLE);
399 spin_unlock_irq(&zone->lru_lock);
400}
401
402/*
403 * If the page can not be invalidated, it is moved to the
404 * inactive list to speed up its reclaim. It is moved to the
405 * head of the list, rather than the tail, to give the flusher
406 * threads some time to write it out, as this is much more
407 * effective than the single-page writeout from reclaim.
408 *
409 * If the page isn't page_mapped and dirty/writeback, the page
410 * could reclaim asap using PG_reclaim.
411 *
412 * 1. active, mapped page -> none
413 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
414 * 3. inactive, mapped page -> none
415 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
416 * 5. inactive, clean -> inactive, tail
417 * 6. Others -> none
418 *
419 * In 4, why it moves inactive's head, the VM expects the page would
420 * be write it out by flusher threads as this is much more effective
421 * than the single-page writeout from reclaim.
422 */
423static void lru_deactivate_fn(struct page *page, void *arg)
424{
425 int lru, file;
426 bool active;
427 struct zone *zone = page_zone(page);
428
429 if (!PageLRU(page))
430 return;
431
432 if (PageUnevictable(page))
433 return;
434
435 /* Some processes are using the page */
436 if (page_mapped(page))
437 return;
438
439 active = PageActive(page);
440
441 file = page_is_file_cache(page);
442 lru = page_lru_base_type(page);
443 del_page_from_lru_list(zone, page, lru + active);
444 ClearPageActive(page);
445 ClearPageReferenced(page);
446 add_page_to_lru_list(zone, page, lru);
447
448 if (PageWriteback(page) || PageDirty(page)) {
449 /*
450 * PG_reclaim could be raced with end_page_writeback
451 * It can make readahead confusing. But race window
452 * is _really_ small and it's non-critical problem.
453 */
454 SetPageReclaim(page);
455 } else {
456 /*
457 * The page's writeback ends up during pagevec
458 * We moves tha page into tail of inactive.
459 */
460 list_move_tail(&page->lru, &zone->lru[lru].list);
461 mem_cgroup_rotate_reclaimable_page(page);
462 __count_vm_event(PGROTATED);
463 }
464
465 if (active)
466 __count_vm_event(PGDEACTIVATE);
467 update_page_reclaim_stat(zone, page, file, 0);
468}
469
470/*
471 * Drain pages out of the cpu's pagevecs.
472 * Either "cpu" is the current CPU, and preemption has already been
473 * disabled; or "cpu" is being hot-unplugged, and is already dead.
474 */
475static void drain_cpu_pagevecs(int cpu)
476{
477 struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu);
478 struct pagevec *pvec;
479 int lru;
480
481 for_each_lru(lru) {
482 pvec = &pvecs[lru - LRU_BASE];
483 if (pagevec_count(pvec))
484 ____pagevec_lru_add(pvec, lru);
485 }
486
487 pvec = &per_cpu(lru_rotate_pvecs, cpu);
488 if (pagevec_count(pvec)) {
489 unsigned long flags;
490
491 /* No harm done if a racing interrupt already did this */
492 local_irq_save(flags);
493 pagevec_move_tail(pvec);
494 local_irq_restore(flags);
495 }
496
497 pvec = &per_cpu(lru_deactivate_pvecs, cpu);
498 if (pagevec_count(pvec))
499 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
500
501 activate_page_drain(cpu);
502}
503
504/**
505 * deactivate_page - forcefully deactivate a page
506 * @page: page to deactivate
507 *
508 * This function hints the VM that @page is a good reclaim candidate,
509 * for example if its invalidation fails due to the page being dirty
510 * or under writeback.
511 */
512void deactivate_page(struct page *page)
513{
514 /*
515 * In a workload with many unevictable page such as mprotect, unevictable
516 * page deactivation for accelerating reclaim is pointless.
517 */
518 if (PageUnevictable(page))
519 return;
520
521 if (likely(get_page_unless_zero(page))) {
522 struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
523
524 if (!pagevec_add(pvec, page))
525 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
526 put_cpu_var(lru_deactivate_pvecs);
527 }
528}
529
530void lru_add_drain(void)
531{
532 drain_cpu_pagevecs(get_cpu());
533 put_cpu();
534}
535
536static void lru_add_drain_per_cpu(struct work_struct *dummy)
537{
538 lru_add_drain();
539}
540
541/*
542 * Returns 0 for success
543 */
544int lru_add_drain_all(void)
545{
546 return schedule_on_each_cpu(lru_add_drain_per_cpu);
547}
548
549/*
550 * Batched page_cache_release(). Decrement the reference count on all the
551 * passed pages. If it fell to zero then remove the page from the LRU and
552 * free it.
553 *
554 * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
555 * for the remainder of the operation.
556 *
557 * The locking in this function is against shrink_inactive_list(): we recheck
558 * the page count inside the lock to see whether shrink_inactive_list()
559 * grabbed the page via the LRU. If it did, give up: shrink_inactive_list()
560 * will free it.
561 */
562void release_pages(struct page **pages, int nr, int cold)
563{
564 int i;
565 struct pagevec pages_to_free;
566 struct zone *zone = NULL;
567 unsigned long uninitialized_var(flags);
568
569 pagevec_init(&pages_to_free, cold);
570 for (i = 0; i < nr; i++) {
571 struct page *page = pages[i];
572
573 if (unlikely(PageCompound(page))) {
574 if (zone) {
575 spin_unlock_irqrestore(&zone->lru_lock, flags);
576 zone = NULL;
577 }
578 put_compound_page(page);
579 continue;
580 }
581
582 if (!put_page_testzero(page))
583 continue;
584
585 if (PageLRU(page)) {
586 struct zone *pagezone = page_zone(page);
587
588 if (pagezone != zone) {
589 if (zone)
590 spin_unlock_irqrestore(&zone->lru_lock,
591 flags);
592 zone = pagezone;
593 spin_lock_irqsave(&zone->lru_lock, flags);
594 }
595 VM_BUG_ON(!PageLRU(page));
596 __ClearPageLRU(page);
597 del_page_from_lru(zone, page);
598 }
599
600 if (!pagevec_add(&pages_to_free, page)) {
601 if (zone) {
602 spin_unlock_irqrestore(&zone->lru_lock, flags);
603 zone = NULL;
604 }
605 __pagevec_free(&pages_to_free);
606 pagevec_reinit(&pages_to_free);
607 }
608 }
609 if (zone)
610 spin_unlock_irqrestore(&zone->lru_lock, flags);
611
612 pagevec_free(&pages_to_free);
613}
614EXPORT_SYMBOL(release_pages);
615
616/*
617 * The pages which we're about to release may be in the deferred lru-addition
618 * queues. That would prevent them from really being freed right now. That's
619 * OK from a correctness point of view but is inefficient - those pages may be
620 * cache-warm and we want to give them back to the page allocator ASAP.
621 *
622 * So __pagevec_release() will drain those queues here. __pagevec_lru_add()
623 * and __pagevec_lru_add_active() call release_pages() directly to avoid
624 * mutual recursion.
625 */
626void __pagevec_release(struct pagevec *pvec)
627{
628 lru_add_drain();
629 release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
630 pagevec_reinit(pvec);
631}
632
633EXPORT_SYMBOL(__pagevec_release);
634
635/* used by __split_huge_page_refcount() */
636void lru_add_page_tail(struct zone* zone,
637 struct page *page, struct page *page_tail)
638{
639 int active;
640 enum lru_list lru;
641 const int file = 0;
642 struct list_head *head;
643
644 VM_BUG_ON(!PageHead(page));
645 VM_BUG_ON(PageCompound(page_tail));
646 VM_BUG_ON(PageLRU(page_tail));
647 VM_BUG_ON(!spin_is_locked(&zone->lru_lock));
648
649 SetPageLRU(page_tail);
650
651 if (page_evictable(page_tail, NULL)) {
652 if (PageActive(page)) {
653 SetPageActive(page_tail);
654 active = 1;
655 lru = LRU_ACTIVE_ANON;
656 } else {
657 active = 0;
658 lru = LRU_INACTIVE_ANON;
659 }
660 update_page_reclaim_stat(zone, page_tail, file, active);
661 if (likely(PageLRU(page)))
662 head = page->lru.prev;
663 else
664 head = &zone->lru[lru].list;
665 __add_page_to_lru_list(zone, page_tail, lru, head);
666 } else {
667 SetPageUnevictable(page_tail);
668 add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE);
669 }
670}
671
672static void ____pagevec_lru_add_fn(struct page *page, void *arg)
673{
674 enum lru_list lru = (enum lru_list)arg;
675 struct zone *zone = page_zone(page);
676 int file = is_file_lru(lru);
677 int active = is_active_lru(lru);
678
679 VM_BUG_ON(PageActive(page));
680 VM_BUG_ON(PageUnevictable(page));
681 VM_BUG_ON(PageLRU(page));
682
683 SetPageLRU(page);
684 if (active)
685 SetPageActive(page);
686 update_page_reclaim_stat(zone, page, file, active);
687 add_page_to_lru_list(zone, page, lru);
688}
689
690/*
691 * Add the passed pages to the LRU, then drop the caller's refcount
692 * on them. Reinitialises the caller's pagevec.
693 */
694void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
695{
696 VM_BUG_ON(is_unevictable_lru(lru));
697
698 pagevec_lru_move_fn(pvec, ____pagevec_lru_add_fn, (void *)lru);
699}
700
701EXPORT_SYMBOL(____pagevec_lru_add);
702
703/*
704 * Try to drop buffers from the pages in a pagevec
705 */
706void pagevec_strip(struct pagevec *pvec)
707{
708 int i;
709
710 for (i = 0; i < pagevec_count(pvec); i++) {
711 struct page *page = pvec->pages[i];
712
713 if (page_has_private(page) && trylock_page(page)) {
714 if (page_has_private(page))
715 try_to_release_page(page, 0);
716 unlock_page(page);
717 }
718 }
719}
720
721/**
722 * pagevec_lookup - gang pagecache lookup
723 * @pvec: Where the resulting pages are placed
724 * @mapping: The address_space to search
725 * @start: The starting page index
726 * @nr_pages: The maximum number of pages
727 *
728 * pagevec_lookup() will search for and return a group of up to @nr_pages pages
729 * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a
730 * reference against the pages in @pvec.
731 *
732 * The search returns a group of mapping-contiguous pages with ascending
733 * indexes. There may be holes in the indices due to not-present pages.
734 *
735 * pagevec_lookup() returns the number of pages which were found.
736 */
737unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
738 pgoff_t start, unsigned nr_pages)
739{
740 pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
741 return pagevec_count(pvec);
742}
743
744EXPORT_SYMBOL(pagevec_lookup);
745
746unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
747 pgoff_t *index, int tag, unsigned nr_pages)
748{
749 pvec->nr = find_get_pages_tag(mapping, index, tag,
750 nr_pages, pvec->pages);
751 return pagevec_count(pvec);
752}
753
754EXPORT_SYMBOL(pagevec_lookup_tag);
755
756/*
757 * Perform any setup for the swap system
758 */
759void __init swap_setup(void)
760{
761 unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
762
763#ifdef CONFIG_SWAP
764 bdi_init(swapper_space.backing_dev_info);
765#endif
766
767 /* Use a smaller cluster for small-memory machines */
768 if (megs < 16)
769 page_cluster = 2;
770 else
771 page_cluster = 3;
772 /*
773 * Right now other parts of the system means that we
774 * _really_ don't want to cluster much more
775 */
776}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/mm/swap.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 */
7
8/*
9 * This file contains the default values for the operation of the
10 * Linux VM subsystem. Fine-tuning documentation can be found in
11 * Documentation/admin-guide/sysctl/vm.rst.
12 * Started 18.12.91
13 * Swap aging added 23.2.95, Stephen Tweedie.
14 * Buffermem limits added 12.3.98, Rik van Riel.
15 */
16
17#include <linux/mm.h>
18#include <linux/sched.h>
19#include <linux/kernel_stat.h>
20#include <linux/swap.h>
21#include <linux/mman.h>
22#include <linux/pagemap.h>
23#include <linux/pagevec.h>
24#include <linux/init.h>
25#include <linux/export.h>
26#include <linux/mm_inline.h>
27#include <linux/percpu_counter.h>
28#include <linux/memremap.h>
29#include <linux/percpu.h>
30#include <linux/cpu.h>
31#include <linux/notifier.h>
32#include <linux/backing-dev.h>
33#include <linux/memcontrol.h>
34#include <linux/gfp.h>
35#include <linux/uio.h>
36#include <linux/hugetlb.h>
37#include <linux/page_idle.h>
38#include <linux/local_lock.h>
39#include <linux/buffer_head.h>
40
41#include "internal.h"
42
43#define CREATE_TRACE_POINTS
44#include <trace/events/pagemap.h>
45
46/* How many pages do we try to swap or page in/out together? As a power of 2 */
47int page_cluster;
48const int page_cluster_max = 31;
49
50struct cpu_fbatches {
51 /*
52 * The following folio batches are grouped together because they are protected
53 * by disabling preemption (and interrupts remain enabled).
54 */
55 local_lock_t lock;
56 struct folio_batch lru_add;
57 struct folio_batch lru_deactivate_file;
58 struct folio_batch lru_deactivate;
59 struct folio_batch lru_lazyfree;
60#ifdef CONFIG_SMP
61 struct folio_batch lru_activate;
62#endif
63 /* Protecting the following batches which require disabling interrupts */
64 local_lock_t lock_irq;
65 struct folio_batch lru_move_tail;
66};
67
68static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = {
69 .lock = INIT_LOCAL_LOCK(lock),
70 .lock_irq = INIT_LOCAL_LOCK(lock_irq),
71};
72
73static void __page_cache_release(struct folio *folio, struct lruvec **lruvecp,
74 unsigned long *flagsp)
75{
76 if (folio_test_lru(folio)) {
77 folio_lruvec_relock_irqsave(folio, lruvecp, flagsp);
78 lruvec_del_folio(*lruvecp, folio);
79 __folio_clear_lru_flags(folio);
80 }
81}
82
83/*
84 * This path almost never happens for VM activity - pages are normally freed
85 * in batches. But it gets used by networking - and for compound pages.
86 */
87static void page_cache_release(struct folio *folio)
88{
89 struct lruvec *lruvec = NULL;
90 unsigned long flags;
91
92 __page_cache_release(folio, &lruvec, &flags);
93 if (lruvec)
94 unlock_page_lruvec_irqrestore(lruvec, flags);
95}
96
97void __folio_put(struct folio *folio)
98{
99 if (unlikely(folio_is_zone_device(folio))) {
100 free_zone_device_folio(folio);
101 return;
102 }
103
104 if (folio_test_hugetlb(folio)) {
105 free_huge_folio(folio);
106 return;
107 }
108
109 page_cache_release(folio);
110 folio_unqueue_deferred_split(folio);
111 mem_cgroup_uncharge(folio);
112 free_unref_page(&folio->page, folio_order(folio));
113}
114EXPORT_SYMBOL(__folio_put);
115
116typedef void (*move_fn_t)(struct lruvec *lruvec, struct folio *folio);
117
118static void lru_add(struct lruvec *lruvec, struct folio *folio)
119{
120 int was_unevictable = folio_test_clear_unevictable(folio);
121 long nr_pages = folio_nr_pages(folio);
122
123 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
124
125 /*
126 * Is an smp_mb__after_atomic() still required here, before
127 * folio_evictable() tests the mlocked flag, to rule out the possibility
128 * of stranding an evictable folio on an unevictable LRU? I think
129 * not, because __munlock_folio() only clears the mlocked flag
130 * while the LRU lock is held.
131 *
132 * (That is not true of __page_cache_release(), and not necessarily
133 * true of folios_put(): but those only clear the mlocked flag after
134 * folio_put_testzero() has excluded any other users of the folio.)
135 */
136 if (folio_evictable(folio)) {
137 if (was_unevictable)
138 __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
139 } else {
140 folio_clear_active(folio);
141 folio_set_unevictable(folio);
142 /*
143 * folio->mlock_count = !!folio_test_mlocked(folio)?
144 * But that leaves __mlock_folio() in doubt whether another
145 * actor has already counted the mlock or not. Err on the
146 * safe side, underestimate, let page reclaim fix it, rather
147 * than leaving a page on the unevictable LRU indefinitely.
148 */
149 folio->mlock_count = 0;
150 if (!was_unevictable)
151 __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
152 }
153
154 lruvec_add_folio(lruvec, folio);
155 trace_mm_lru_insertion(folio);
156}
157
158static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
159{
160 int i;
161 struct lruvec *lruvec = NULL;
162 unsigned long flags = 0;
163
164 for (i = 0; i < folio_batch_count(fbatch); i++) {
165 struct folio *folio = fbatch->folios[i];
166
167 folio_lruvec_relock_irqsave(folio, &lruvec, &flags);
168 move_fn(lruvec, folio);
169
170 folio_set_lru(folio);
171 }
172
173 if (lruvec)
174 unlock_page_lruvec_irqrestore(lruvec, flags);
175 folios_put(fbatch);
176}
177
178static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
179 struct folio *folio, move_fn_t move_fn,
180 bool on_lru, bool disable_irq)
181{
182 unsigned long flags;
183
184 if (on_lru && !folio_test_clear_lru(folio))
185 return;
186
187 folio_get(folio);
188
189 if (disable_irq)
190 local_lock_irqsave(&cpu_fbatches.lock_irq, flags);
191 else
192 local_lock(&cpu_fbatches.lock);
193
194 if (!folio_batch_add(this_cpu_ptr(fbatch), folio) || folio_test_large(folio) ||
195 lru_cache_disabled())
196 folio_batch_move_lru(this_cpu_ptr(fbatch), move_fn);
197
198 if (disable_irq)
199 local_unlock_irqrestore(&cpu_fbatches.lock_irq, flags);
200 else
201 local_unlock(&cpu_fbatches.lock);
202}
203
204#define folio_batch_add_and_move(folio, op, on_lru) \
205 __folio_batch_add_and_move( \
206 &cpu_fbatches.op, \
207 folio, \
208 op, \
209 on_lru, \
210 offsetof(struct cpu_fbatches, op) >= offsetof(struct cpu_fbatches, lock_irq) \
211 )
212
213static void lru_move_tail(struct lruvec *lruvec, struct folio *folio)
214{
215 if (folio_test_unevictable(folio))
216 return;
217
218 lruvec_del_folio(lruvec, folio);
219 folio_clear_active(folio);
220 lruvec_add_folio_tail(lruvec, folio);
221 __count_vm_events(PGROTATED, folio_nr_pages(folio));
222}
223
224/*
225 * Writeback is about to end against a folio which has been marked for
226 * immediate reclaim. If it still appears to be reclaimable, move it
227 * to the tail of the inactive list.
228 *
229 * folio_rotate_reclaimable() must disable IRQs, to prevent nasty races.
230 */
231void folio_rotate_reclaimable(struct folio *folio)
232{
233 if (folio_test_locked(folio) || folio_test_dirty(folio) ||
234 folio_test_unevictable(folio))
235 return;
236
237 folio_batch_add_and_move(folio, lru_move_tail, true);
238}
239
240void lru_note_cost(struct lruvec *lruvec, bool file,
241 unsigned int nr_io, unsigned int nr_rotated)
242{
243 unsigned long cost;
244
245 /*
246 * Reflect the relative cost of incurring IO and spending CPU
247 * time on rotations. This doesn't attempt to make a precise
248 * comparison, it just says: if reloads are about comparable
249 * between the LRU lists, or rotations are overwhelmingly
250 * different between them, adjust scan balance for CPU work.
251 */
252 cost = nr_io * SWAP_CLUSTER_MAX + nr_rotated;
253
254 do {
255 unsigned long lrusize;
256
257 /*
258 * Hold lruvec->lru_lock is safe here, since
259 * 1) The pinned lruvec in reclaim, or
260 * 2) From a pre-LRU page during refault (which also holds the
261 * rcu lock, so would be safe even if the page was on the LRU
262 * and could move simultaneously to a new lruvec).
263 */
264 spin_lock_irq(&lruvec->lru_lock);
265 /* Record cost event */
266 if (file)
267 lruvec->file_cost += cost;
268 else
269 lruvec->anon_cost += cost;
270
271 /*
272 * Decay previous events
273 *
274 * Because workloads change over time (and to avoid
275 * overflow) we keep these statistics as a floating
276 * average, which ends up weighing recent refaults
277 * more than old ones.
278 */
279 lrusize = lruvec_page_state(lruvec, NR_INACTIVE_ANON) +
280 lruvec_page_state(lruvec, NR_ACTIVE_ANON) +
281 lruvec_page_state(lruvec, NR_INACTIVE_FILE) +
282 lruvec_page_state(lruvec, NR_ACTIVE_FILE);
283
284 if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) {
285 lruvec->file_cost /= 2;
286 lruvec->anon_cost /= 2;
287 }
288 spin_unlock_irq(&lruvec->lru_lock);
289 } while ((lruvec = parent_lruvec(lruvec)));
290}
291
292void lru_note_cost_refault(struct folio *folio)
293{
294 lru_note_cost(folio_lruvec(folio), folio_is_file_lru(folio),
295 folio_nr_pages(folio), 0);
296}
297
298static void lru_activate(struct lruvec *lruvec, struct folio *folio)
299{
300 long nr_pages = folio_nr_pages(folio);
301
302 if (folio_test_active(folio) || folio_test_unevictable(folio))
303 return;
304
305
306 lruvec_del_folio(lruvec, folio);
307 folio_set_active(folio);
308 lruvec_add_folio(lruvec, folio);
309 trace_mm_lru_activate(folio);
310
311 __count_vm_events(PGACTIVATE, nr_pages);
312 __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE, nr_pages);
313}
314
315#ifdef CONFIG_SMP
316static void folio_activate_drain(int cpu)
317{
318 struct folio_batch *fbatch = &per_cpu(cpu_fbatches.lru_activate, cpu);
319
320 if (folio_batch_count(fbatch))
321 folio_batch_move_lru(fbatch, lru_activate);
322}
323
324void folio_activate(struct folio *folio)
325{
326 if (folio_test_active(folio) || folio_test_unevictable(folio))
327 return;
328
329 folio_batch_add_and_move(folio, lru_activate, true);
330}
331
332#else
333static inline void folio_activate_drain(int cpu)
334{
335}
336
337void folio_activate(struct folio *folio)
338{
339 struct lruvec *lruvec;
340
341 if (!folio_test_clear_lru(folio))
342 return;
343
344 lruvec = folio_lruvec_lock_irq(folio);
345 lru_activate(lruvec, folio);
346 unlock_page_lruvec_irq(lruvec);
347 folio_set_lru(folio);
348}
349#endif
350
351static void __lru_cache_activate_folio(struct folio *folio)
352{
353 struct folio_batch *fbatch;
354 int i;
355
356 local_lock(&cpu_fbatches.lock);
357 fbatch = this_cpu_ptr(&cpu_fbatches.lru_add);
358
359 /*
360 * Search backwards on the optimistic assumption that the folio being
361 * activated has just been added to this batch. Note that only
362 * the local batch is examined as a !LRU folio could be in the
363 * process of being released, reclaimed, migrated or on a remote
364 * batch that is currently being drained. Furthermore, marking
365 * a remote batch's folio active potentially hits a race where
366 * a folio is marked active just after it is added to the inactive
367 * list causing accounting errors and BUG_ON checks to trigger.
368 */
369 for (i = folio_batch_count(fbatch) - 1; i >= 0; i--) {
370 struct folio *batch_folio = fbatch->folios[i];
371
372 if (batch_folio == folio) {
373 folio_set_active(folio);
374 break;
375 }
376 }
377
378 local_unlock(&cpu_fbatches.lock);
379}
380
381#ifdef CONFIG_LRU_GEN
382static void folio_inc_refs(struct folio *folio)
383{
384 unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
385
386 if (folio_test_unevictable(folio))
387 return;
388
389 if (!folio_test_referenced(folio)) {
390 folio_set_referenced(folio);
391 return;
392 }
393
394 if (!folio_test_workingset(folio)) {
395 folio_set_workingset(folio);
396 return;
397 }
398
399 /* see the comment on MAX_NR_TIERS */
400 do {
401 new_flags = old_flags & LRU_REFS_MASK;
402 if (new_flags == LRU_REFS_MASK)
403 break;
404
405 new_flags += BIT(LRU_REFS_PGOFF);
406 new_flags |= old_flags & ~LRU_REFS_MASK;
407 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags));
408}
409#else
410static void folio_inc_refs(struct folio *folio)
411{
412}
413#endif /* CONFIG_LRU_GEN */
414
415/**
416 * folio_mark_accessed - Mark a folio as having seen activity.
417 * @folio: The folio to mark.
418 *
419 * This function will perform one of the following transitions:
420 *
421 * * inactive,unreferenced -> inactive,referenced
422 * * inactive,referenced -> active,unreferenced
423 * * active,unreferenced -> active,referenced
424 *
425 * When a newly allocated folio is not yet visible, so safe for non-atomic ops,
426 * __folio_set_referenced() may be substituted for folio_mark_accessed().
427 */
428void folio_mark_accessed(struct folio *folio)
429{
430 if (lru_gen_enabled()) {
431 folio_inc_refs(folio);
432 return;
433 }
434
435 if (!folio_test_referenced(folio)) {
436 folio_set_referenced(folio);
437 } else if (folio_test_unevictable(folio)) {
438 /*
439 * Unevictable pages are on the "LRU_UNEVICTABLE" list. But,
440 * this list is never rotated or maintained, so marking an
441 * unevictable page accessed has no effect.
442 */
443 } else if (!folio_test_active(folio)) {
444 /*
445 * If the folio is on the LRU, queue it for activation via
446 * cpu_fbatches.lru_activate. Otherwise, assume the folio is in a
447 * folio_batch, mark it active and it'll be moved to the active
448 * LRU on the next drain.
449 */
450 if (folio_test_lru(folio))
451 folio_activate(folio);
452 else
453 __lru_cache_activate_folio(folio);
454 folio_clear_referenced(folio);
455 workingset_activation(folio);
456 }
457 if (folio_test_idle(folio))
458 folio_clear_idle(folio);
459}
460EXPORT_SYMBOL(folio_mark_accessed);
461
462/**
463 * folio_add_lru - Add a folio to an LRU list.
464 * @folio: The folio to be added to the LRU.
465 *
466 * Queue the folio for addition to the LRU. The decision on whether
467 * to add the page to the [in]active [file|anon] list is deferred until the
468 * folio_batch is drained. This gives a chance for the caller of folio_add_lru()
469 * have the folio added to the active list using folio_mark_accessed().
470 */
471void folio_add_lru(struct folio *folio)
472{
473 VM_BUG_ON_FOLIO(folio_test_active(folio) &&
474 folio_test_unevictable(folio), folio);
475 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
476
477 /* see the comment in lru_gen_add_folio() */
478 if (lru_gen_enabled() && !folio_test_unevictable(folio) &&
479 lru_gen_in_fault() && !(current->flags & PF_MEMALLOC))
480 folio_set_active(folio);
481
482 folio_batch_add_and_move(folio, lru_add, false);
483}
484EXPORT_SYMBOL(folio_add_lru);
485
486/**
487 * folio_add_lru_vma() - Add a folio to the appropate LRU list for this VMA.
488 * @folio: The folio to be added to the LRU.
489 * @vma: VMA in which the folio is mapped.
490 *
491 * If the VMA is mlocked, @folio is added to the unevictable list.
492 * Otherwise, it is treated the same way as folio_add_lru().
493 */
494void folio_add_lru_vma(struct folio *folio, struct vm_area_struct *vma)
495{
496 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
497
498 if (unlikely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED))
499 mlock_new_folio(folio);
500 else
501 folio_add_lru(folio);
502}
503
504/*
505 * If the folio cannot be invalidated, it is moved to the
506 * inactive list to speed up its reclaim. It is moved to the
507 * head of the list, rather than the tail, to give the flusher
508 * threads some time to write it out, as this is much more
509 * effective than the single-page writeout from reclaim.
510 *
511 * If the folio isn't mapped and dirty/writeback, the folio
512 * could be reclaimed asap using the reclaim flag.
513 *
514 * 1. active, mapped folio -> none
515 * 2. active, dirty/writeback folio -> inactive, head, reclaim
516 * 3. inactive, mapped folio -> none
517 * 4. inactive, dirty/writeback folio -> inactive, head, reclaim
518 * 5. inactive, clean -> inactive, tail
519 * 6. Others -> none
520 *
521 * In 4, it moves to the head of the inactive list so the folio is
522 * written out by flusher threads as this is much more efficient
523 * than the single-page writeout from reclaim.
524 */
525static void lru_deactivate_file(struct lruvec *lruvec, struct folio *folio)
526{
527 bool active = folio_test_active(folio);
528 long nr_pages = folio_nr_pages(folio);
529
530 if (folio_test_unevictable(folio))
531 return;
532
533 /* Some processes are using the folio */
534 if (folio_mapped(folio))
535 return;
536
537 lruvec_del_folio(lruvec, folio);
538 folio_clear_active(folio);
539 folio_clear_referenced(folio);
540
541 if (folio_test_writeback(folio) || folio_test_dirty(folio)) {
542 /*
543 * Setting the reclaim flag could race with
544 * folio_end_writeback() and confuse readahead. But the
545 * race window is _really_ small and it's not a critical
546 * problem.
547 */
548 lruvec_add_folio(lruvec, folio);
549 folio_set_reclaim(folio);
550 } else {
551 /*
552 * The folio's writeback ended while it was in the batch.
553 * We move that folio to the tail of the inactive list.
554 */
555 lruvec_add_folio_tail(lruvec, folio);
556 __count_vm_events(PGROTATED, nr_pages);
557 }
558
559 if (active) {
560 __count_vm_events(PGDEACTIVATE, nr_pages);
561 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
562 nr_pages);
563 }
564}
565
566static void lru_deactivate(struct lruvec *lruvec, struct folio *folio)
567{
568 long nr_pages = folio_nr_pages(folio);
569
570 if (folio_test_unevictable(folio) || !(folio_test_active(folio) || lru_gen_enabled()))
571 return;
572
573 lruvec_del_folio(lruvec, folio);
574 folio_clear_active(folio);
575 folio_clear_referenced(folio);
576 lruvec_add_folio(lruvec, folio);
577
578 __count_vm_events(PGDEACTIVATE, nr_pages);
579 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_pages);
580}
581
582static void lru_lazyfree(struct lruvec *lruvec, struct folio *folio)
583{
584 long nr_pages = folio_nr_pages(folio);
585
586 if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) ||
587 folio_test_swapcache(folio) || folio_test_unevictable(folio))
588 return;
589
590 lruvec_del_folio(lruvec, folio);
591 folio_clear_active(folio);
592 folio_clear_referenced(folio);
593 /*
594 * Lazyfree folios are clean anonymous folios. They have
595 * the swapbacked flag cleared, to distinguish them from normal
596 * anonymous folios
597 */
598 folio_clear_swapbacked(folio);
599 lruvec_add_folio(lruvec, folio);
600
601 __count_vm_events(PGLAZYFREE, nr_pages);
602 __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE, nr_pages);
603}
604
605/*
606 * Drain pages out of the cpu's folio_batch.
607 * Either "cpu" is the current CPU, and preemption has already been
608 * disabled; or "cpu" is being hot-unplugged, and is already dead.
609 */
610void lru_add_drain_cpu(int cpu)
611{
612 struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu);
613 struct folio_batch *fbatch = &fbatches->lru_add;
614
615 if (folio_batch_count(fbatch))
616 folio_batch_move_lru(fbatch, lru_add);
617
618 fbatch = &fbatches->lru_move_tail;
619 /* Disabling interrupts below acts as a compiler barrier. */
620 if (data_race(folio_batch_count(fbatch))) {
621 unsigned long flags;
622
623 /* No harm done if a racing interrupt already did this */
624 local_lock_irqsave(&cpu_fbatches.lock_irq, flags);
625 folio_batch_move_lru(fbatch, lru_move_tail);
626 local_unlock_irqrestore(&cpu_fbatches.lock_irq, flags);
627 }
628
629 fbatch = &fbatches->lru_deactivate_file;
630 if (folio_batch_count(fbatch))
631 folio_batch_move_lru(fbatch, lru_deactivate_file);
632
633 fbatch = &fbatches->lru_deactivate;
634 if (folio_batch_count(fbatch))
635 folio_batch_move_lru(fbatch, lru_deactivate);
636
637 fbatch = &fbatches->lru_lazyfree;
638 if (folio_batch_count(fbatch))
639 folio_batch_move_lru(fbatch, lru_lazyfree);
640
641 folio_activate_drain(cpu);
642}
643
644/**
645 * deactivate_file_folio() - Deactivate a file folio.
646 * @folio: Folio to deactivate.
647 *
648 * This function hints to the VM that @folio is a good reclaim candidate,
649 * for example if its invalidation fails due to the folio being dirty
650 * or under writeback.
651 *
652 * Context: Caller holds a reference on the folio.
653 */
654void deactivate_file_folio(struct folio *folio)
655{
656 /* Deactivating an unevictable folio will not accelerate reclaim */
657 if (folio_test_unevictable(folio))
658 return;
659
660 folio_batch_add_and_move(folio, lru_deactivate_file, true);
661}
662
663/*
664 * folio_deactivate - deactivate a folio
665 * @folio: folio to deactivate
666 *
667 * folio_deactivate() moves @folio to the inactive list if @folio was on the
668 * active list and was not unevictable. This is done to accelerate the
669 * reclaim of @folio.
670 */
671void folio_deactivate(struct folio *folio)
672{
673 if (folio_test_unevictable(folio) || !(folio_test_active(folio) || lru_gen_enabled()))
674 return;
675
676 folio_batch_add_and_move(folio, lru_deactivate, true);
677}
678
679/**
680 * folio_mark_lazyfree - make an anon folio lazyfree
681 * @folio: folio to deactivate
682 *
683 * folio_mark_lazyfree() moves @folio to the inactive file list.
684 * This is done to accelerate the reclaim of @folio.
685 */
686void folio_mark_lazyfree(struct folio *folio)
687{
688 if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) ||
689 folio_test_swapcache(folio) || folio_test_unevictable(folio))
690 return;
691
692 folio_batch_add_and_move(folio, lru_lazyfree, true);
693}
694
695void lru_add_drain(void)
696{
697 local_lock(&cpu_fbatches.lock);
698 lru_add_drain_cpu(smp_processor_id());
699 local_unlock(&cpu_fbatches.lock);
700 mlock_drain_local();
701}
702
703/*
704 * It's called from per-cpu workqueue context in SMP case so
705 * lru_add_drain_cpu and invalidate_bh_lrus_cpu should run on
706 * the same cpu. It shouldn't be a problem in !SMP case since
707 * the core is only one and the locks will disable preemption.
708 */
709static void lru_add_and_bh_lrus_drain(void)
710{
711 local_lock(&cpu_fbatches.lock);
712 lru_add_drain_cpu(smp_processor_id());
713 local_unlock(&cpu_fbatches.lock);
714 invalidate_bh_lrus_cpu();
715 mlock_drain_local();
716}
717
718void lru_add_drain_cpu_zone(struct zone *zone)
719{
720 local_lock(&cpu_fbatches.lock);
721 lru_add_drain_cpu(smp_processor_id());
722 drain_local_pages(zone);
723 local_unlock(&cpu_fbatches.lock);
724 mlock_drain_local();
725}
726
727#ifdef CONFIG_SMP
728
729static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
730
731static void lru_add_drain_per_cpu(struct work_struct *dummy)
732{
733 lru_add_and_bh_lrus_drain();
734}
735
736static bool cpu_needs_drain(unsigned int cpu)
737{
738 struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu);
739
740 /* Check these in order of likelihood that they're not zero */
741 return folio_batch_count(&fbatches->lru_add) ||
742 folio_batch_count(&fbatches->lru_move_tail) ||
743 folio_batch_count(&fbatches->lru_deactivate_file) ||
744 folio_batch_count(&fbatches->lru_deactivate) ||
745 folio_batch_count(&fbatches->lru_lazyfree) ||
746 folio_batch_count(&fbatches->lru_activate) ||
747 need_mlock_drain(cpu) ||
748 has_bh_in_lru(cpu, NULL);
749}
750
751/*
752 * Doesn't need any cpu hotplug locking because we do rely on per-cpu
753 * kworkers being shut down before our page_alloc_cpu_dead callback is
754 * executed on the offlined cpu.
755 * Calling this function with cpu hotplug locks held can actually lead
756 * to obscure indirect dependencies via WQ context.
757 */
758static inline void __lru_add_drain_all(bool force_all_cpus)
759{
760 /*
761 * lru_drain_gen - Global pages generation number
762 *
763 * (A) Definition: global lru_drain_gen = x implies that all generations
764 * 0 < n <= x are already *scheduled* for draining.
765 *
766 * This is an optimization for the highly-contended use case where a
767 * user space workload keeps constantly generating a flow of pages for
768 * each CPU.
769 */
770 static unsigned int lru_drain_gen;
771 static struct cpumask has_work;
772 static DEFINE_MUTEX(lock);
773 unsigned cpu, this_gen;
774
775 /*
776 * Make sure nobody triggers this path before mm_percpu_wq is fully
777 * initialized.
778 */
779 if (WARN_ON(!mm_percpu_wq))
780 return;
781
782 /*
783 * Guarantee folio_batch counter stores visible by this CPU
784 * are visible to other CPUs before loading the current drain
785 * generation.
786 */
787 smp_mb();
788
789 /*
790 * (B) Locally cache global LRU draining generation number
791 *
792 * The read barrier ensures that the counter is loaded before the mutex
793 * is taken. It pairs with smp_mb() inside the mutex critical section
794 * at (D).
795 */
796 this_gen = smp_load_acquire(&lru_drain_gen);
797
798 mutex_lock(&lock);
799
800 /*
801 * (C) Exit the draining operation if a newer generation, from another
802 * lru_add_drain_all(), was already scheduled for draining. Check (A).
803 */
804 if (unlikely(this_gen != lru_drain_gen && !force_all_cpus))
805 goto done;
806
807 /*
808 * (D) Increment global generation number
809 *
810 * Pairs with smp_load_acquire() at (B), outside of the critical
811 * section. Use a full memory barrier to guarantee that the
812 * new global drain generation number is stored before loading
813 * folio_batch counters.
814 *
815 * This pairing must be done here, before the for_each_online_cpu loop
816 * below which drains the page vectors.
817 *
818 * Let x, y, and z represent some system CPU numbers, where x < y < z.
819 * Assume CPU #z is in the middle of the for_each_online_cpu loop
820 * below and has already reached CPU #y's per-cpu data. CPU #x comes
821 * along, adds some pages to its per-cpu vectors, then calls
822 * lru_add_drain_all().
823 *
824 * If the paired barrier is done at any later step, e.g. after the
825 * loop, CPU #x will just exit at (C) and miss flushing out all of its
826 * added pages.
827 */
828 WRITE_ONCE(lru_drain_gen, lru_drain_gen + 1);
829 smp_mb();
830
831 cpumask_clear(&has_work);
832 for_each_online_cpu(cpu) {
833 struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
834
835 if (cpu_needs_drain(cpu)) {
836 INIT_WORK(work, lru_add_drain_per_cpu);
837 queue_work_on(cpu, mm_percpu_wq, work);
838 __cpumask_set_cpu(cpu, &has_work);
839 }
840 }
841
842 for_each_cpu(cpu, &has_work)
843 flush_work(&per_cpu(lru_add_drain_work, cpu));
844
845done:
846 mutex_unlock(&lock);
847}
848
849void lru_add_drain_all(void)
850{
851 __lru_add_drain_all(false);
852}
853#else
854void lru_add_drain_all(void)
855{
856 lru_add_drain();
857}
858#endif /* CONFIG_SMP */
859
860atomic_t lru_disable_count = ATOMIC_INIT(0);
861
862/*
863 * lru_cache_disable() needs to be called before we start compiling
864 * a list of folios to be migrated using folio_isolate_lru().
865 * It drains folios on LRU cache and then disable on all cpus until
866 * lru_cache_enable is called.
867 *
868 * Must be paired with a call to lru_cache_enable().
869 */
870void lru_cache_disable(void)
871{
872 atomic_inc(&lru_disable_count);
873 /*
874 * Readers of lru_disable_count are protected by either disabling
875 * preemption or rcu_read_lock:
876 *
877 * preempt_disable, local_irq_disable [bh_lru_lock()]
878 * rcu_read_lock [rt_spin_lock CONFIG_PREEMPT_RT]
879 * preempt_disable [local_lock !CONFIG_PREEMPT_RT]
880 *
881 * Since v5.1 kernel, synchronize_rcu() is guaranteed to wait on
882 * preempt_disable() regions of code. So any CPU which sees
883 * lru_disable_count = 0 will have exited the critical
884 * section when synchronize_rcu() returns.
885 */
886 synchronize_rcu_expedited();
887#ifdef CONFIG_SMP
888 __lru_add_drain_all(true);
889#else
890 lru_add_and_bh_lrus_drain();
891#endif
892}
893
894/**
895 * folios_put_refs - Reduce the reference count on a batch of folios.
896 * @folios: The folios.
897 * @refs: The number of refs to subtract from each folio.
898 *
899 * Like folio_put(), but for a batch of folios. This is more efficient
900 * than writing the loop yourself as it will optimise the locks which need
901 * to be taken if the folios are freed. The folios batch is returned
902 * empty and ready to be reused for another batch; there is no need
903 * to reinitialise it. If @refs is NULL, we subtract one from each
904 * folio refcount.
905 *
906 * Context: May be called in process or interrupt context, but not in NMI
907 * context. May be called while holding a spinlock.
908 */
909void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
910{
911 int i, j;
912 struct lruvec *lruvec = NULL;
913 unsigned long flags = 0;
914
915 for (i = 0, j = 0; i < folios->nr; i++) {
916 struct folio *folio = folios->folios[i];
917 unsigned int nr_refs = refs ? refs[i] : 1;
918
919 if (is_huge_zero_folio(folio))
920 continue;
921
922 if (folio_is_zone_device(folio)) {
923 if (lruvec) {
924 unlock_page_lruvec_irqrestore(lruvec, flags);
925 lruvec = NULL;
926 }
927 if (put_devmap_managed_folio_refs(folio, nr_refs))
928 continue;
929 if (folio_ref_sub_and_test(folio, nr_refs))
930 free_zone_device_folio(folio);
931 continue;
932 }
933
934 if (!folio_ref_sub_and_test(folio, nr_refs))
935 continue;
936
937 /* hugetlb has its own memcg */
938 if (folio_test_hugetlb(folio)) {
939 if (lruvec) {
940 unlock_page_lruvec_irqrestore(lruvec, flags);
941 lruvec = NULL;
942 }
943 free_huge_folio(folio);
944 continue;
945 }
946 folio_unqueue_deferred_split(folio);
947 __page_cache_release(folio, &lruvec, &flags);
948
949 if (j != i)
950 folios->folios[j] = folio;
951 j++;
952 }
953 if (lruvec)
954 unlock_page_lruvec_irqrestore(lruvec, flags);
955 if (!j) {
956 folio_batch_reinit(folios);
957 return;
958 }
959
960 folios->nr = j;
961 mem_cgroup_uncharge_folios(folios);
962 free_unref_folios(folios);
963}
964EXPORT_SYMBOL(folios_put_refs);
965
966/**
967 * release_pages - batched put_page()
968 * @arg: array of pages to release
969 * @nr: number of pages
970 *
971 * Decrement the reference count on all the pages in @arg. If it
972 * fell to zero, remove the page from the LRU and free it.
973 *
974 * Note that the argument can be an array of pages, encoded pages,
975 * or folio pointers. We ignore any encoded bits, and turn any of
976 * them into just a folio that gets free'd.
977 */
978void release_pages(release_pages_arg arg, int nr)
979{
980 struct folio_batch fbatch;
981 int refs[PAGEVEC_SIZE];
982 struct encoded_page **encoded = arg.encoded_pages;
983 int i;
984
985 folio_batch_init(&fbatch);
986 for (i = 0; i < nr; i++) {
987 /* Turn any of the argument types into a folio */
988 struct folio *folio = page_folio(encoded_page_ptr(encoded[i]));
989
990 /* Is our next entry actually "nr_pages" -> "nr_refs" ? */
991 refs[fbatch.nr] = 1;
992 if (unlikely(encoded_page_flags(encoded[i]) &
993 ENCODED_PAGE_BIT_NR_PAGES_NEXT))
994 refs[fbatch.nr] = encoded_nr_pages(encoded[++i]);
995
996 if (folio_batch_add(&fbatch, folio) > 0)
997 continue;
998 folios_put_refs(&fbatch, refs);
999 }
1000
1001 if (fbatch.nr)
1002 folios_put_refs(&fbatch, refs);
1003}
1004EXPORT_SYMBOL(release_pages);
1005
1006/*
1007 * The folios which we're about to release may be in the deferred lru-addition
1008 * queues. That would prevent them from really being freed right now. That's
1009 * OK from a correctness point of view but is inefficient - those folios may be
1010 * cache-warm and we want to give them back to the page allocator ASAP.
1011 *
1012 * So __folio_batch_release() will drain those queues here.
1013 * folio_batch_move_lru() calls folios_put() directly to avoid
1014 * mutual recursion.
1015 */
1016void __folio_batch_release(struct folio_batch *fbatch)
1017{
1018 if (!fbatch->percpu_pvec_drained) {
1019 lru_add_drain();
1020 fbatch->percpu_pvec_drained = true;
1021 }
1022 folios_put(fbatch);
1023}
1024EXPORT_SYMBOL(__folio_batch_release);
1025
1026/**
1027 * folio_batch_remove_exceptionals() - Prune non-folios from a batch.
1028 * @fbatch: The batch to prune
1029 *
1030 * find_get_entries() fills a batch with both folios and shadow/swap/DAX
1031 * entries. This function prunes all the non-folio entries from @fbatch
1032 * without leaving holes, so that it can be passed on to folio-only batch
1033 * operations.
1034 */
1035void folio_batch_remove_exceptionals(struct folio_batch *fbatch)
1036{
1037 unsigned int i, j;
1038
1039 for (i = 0, j = 0; i < folio_batch_count(fbatch); i++) {
1040 struct folio *folio = fbatch->folios[i];
1041 if (!xa_is_value(folio))
1042 fbatch->folios[j++] = folio;
1043 }
1044 fbatch->nr = j;
1045}
1046
1047/*
1048 * Perform any setup for the swap system
1049 */
1050void __init swap_setup(void)
1051{
1052 unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT);
1053
1054 /* Use a smaller cluster for small-memory machines */
1055 if (megs < 16)
1056 page_cluster = 2;
1057 else
1058 page_cluster = 3;
1059 /*
1060 * Right now other parts of the system means that we
1061 * _really_ don't want to cluster much more
1062 */
1063}