Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/mm/swap_state.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie
7 *
8 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
9 */
10#include <linux/mm.h>
11#include <linux/gfp.h>
12#include <linux/kernel_stat.h>
13#include <linux/swap.h>
14#include <linux/swapops.h>
15#include <linux/init.h>
16#include <linux/pagemap.h>
17#include <linux/backing-dev.h>
18#include <linux/blkdev.h>
19#include <linux/pagevec.h>
20#include <linux/migrate.h>
21#include <linux/vmalloc.h>
22#include <linux/swap_slots.h>
23#include <linux/huge_mm.h>
24
25#include <asm/pgtable.h>
26
27/*
28 * swapper_space is a fiction, retained to simplify the path through
29 * vmscan's shrink_page_list.
30 */
31static const struct address_space_operations swap_aops = {
32 .writepage = swap_writepage,
33 .set_page_dirty = swap_set_page_dirty,
34#ifdef CONFIG_MIGRATION
35 .migratepage = migrate_page,
36#endif
37};
38
39struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
40static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
41static bool enable_vma_readahead __read_mostly = true;
42
43#define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
44#define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
45#define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
46#define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
47
48#define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK)
49#define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
50#define SWAP_RA_ADDR(v) ((v) & PAGE_MASK)
51
52#define SWAP_RA_VAL(addr, win, hits) \
53 (((addr) & PAGE_MASK) | \
54 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \
55 ((hits) & SWAP_RA_HITS_MASK))
56
57/* Initial readahead hits is 4 to start up with a small window */
58#define GET_SWAP_RA_VAL(vma) \
59 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
60
61#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
62#define ADD_CACHE_INFO(x, nr) do { swap_cache_info.x += (nr); } while (0)
63
64static struct {
65 unsigned long add_total;
66 unsigned long del_total;
67 unsigned long find_success;
68 unsigned long find_total;
69} swap_cache_info;
70
71unsigned long total_swapcache_pages(void)
72{
73 unsigned int i, j, nr;
74 unsigned long ret = 0;
75 struct address_space *spaces;
76 struct swap_info_struct *si;
77
78 for (i = 0; i < MAX_SWAPFILES; i++) {
79 swp_entry_t entry = swp_entry(i, 1);
80
81 /* Avoid get_swap_device() to warn for bad swap entry */
82 if (!swp_swap_info(entry))
83 continue;
84 /* Prevent swapoff to free swapper_spaces */
85 si = get_swap_device(entry);
86 if (!si)
87 continue;
88 nr = nr_swapper_spaces[i];
89 spaces = swapper_spaces[i];
90 for (j = 0; j < nr; j++)
91 ret += spaces[j].nrpages;
92 put_swap_device(si);
93 }
94 return ret;
95}
96
97static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
98
99void show_swap_cache_info(void)
100{
101 printk("%lu pages in swap cache\n", total_swapcache_pages());
102 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
103 swap_cache_info.add_total, swap_cache_info.del_total,
104 swap_cache_info.find_success, swap_cache_info.find_total);
105 printk("Free swap = %ldkB\n",
106 get_nr_swap_pages() << (PAGE_SHIFT - 10));
107 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
108}
109
110/*
111 * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
112 * but sets SwapCache flag and private instead of mapping and index.
113 */
114int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
115{
116 struct address_space *address_space = swap_address_space(entry);
117 pgoff_t idx = swp_offset(entry);
118 XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
119 unsigned long i, nr = compound_nr(page);
120
121 VM_BUG_ON_PAGE(!PageLocked(page), page);
122 VM_BUG_ON_PAGE(PageSwapCache(page), page);
123 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
124
125 page_ref_add(page, nr);
126 SetPageSwapCache(page);
127
128 do {
129 xas_lock_irq(&xas);
130 xas_create_range(&xas);
131 if (xas_error(&xas))
132 goto unlock;
133 for (i = 0; i < nr; i++) {
134 VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
135 set_page_private(page + i, entry.val + i);
136 xas_store(&xas, page);
137 xas_next(&xas);
138 }
139 address_space->nrpages += nr;
140 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
141 ADD_CACHE_INFO(add_total, nr);
142unlock:
143 xas_unlock_irq(&xas);
144 } while (xas_nomem(&xas, gfp));
145
146 if (!xas_error(&xas))
147 return 0;
148
149 ClearPageSwapCache(page);
150 page_ref_sub(page, nr);
151 return xas_error(&xas);
152}
153
154/*
155 * This must be called only on pages that have
156 * been verified to be in the swap cache.
157 */
158void __delete_from_swap_cache(struct page *page, swp_entry_t entry)
159{
160 struct address_space *address_space = swap_address_space(entry);
161 int i, nr = hpage_nr_pages(page);
162 pgoff_t idx = swp_offset(entry);
163 XA_STATE(xas, &address_space->i_pages, idx);
164
165 VM_BUG_ON_PAGE(!PageLocked(page), page);
166 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
167 VM_BUG_ON_PAGE(PageWriteback(page), page);
168
169 for (i = 0; i < nr; i++) {
170 void *entry = xas_store(&xas, NULL);
171 VM_BUG_ON_PAGE(entry != page, entry);
172 set_page_private(page + i, 0);
173 xas_next(&xas);
174 }
175 ClearPageSwapCache(page);
176 address_space->nrpages -= nr;
177 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
178 ADD_CACHE_INFO(del_total, nr);
179}
180
181/**
182 * add_to_swap - allocate swap space for a page
183 * @page: page we want to move to swap
184 *
185 * Allocate swap space for the page and add the page to the
186 * swap cache. Caller needs to hold the page lock.
187 */
188int add_to_swap(struct page *page)
189{
190 swp_entry_t entry;
191 int err;
192
193 VM_BUG_ON_PAGE(!PageLocked(page), page);
194 VM_BUG_ON_PAGE(!PageUptodate(page), page);
195
196 entry = get_swap_page(page);
197 if (!entry.val)
198 return 0;
199
200 /*
201 * XArray node allocations from PF_MEMALLOC contexts could
202 * completely exhaust the page allocator. __GFP_NOMEMALLOC
203 * stops emergency reserves from being allocated.
204 *
205 * TODO: this could cause a theoretical memory reclaim
206 * deadlock in the swap out path.
207 */
208 /*
209 * Add it to the swap cache.
210 */
211 err = add_to_swap_cache(page, entry,
212 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
213 if (err)
214 /*
215 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
216 * clear SWAP_HAS_CACHE flag.
217 */
218 goto fail;
219 /*
220 * Normally the page will be dirtied in unmap because its pte should be
221 * dirty. A special case is MADV_FREE page. The page'e pte could have
222 * dirty bit cleared but the page's SwapBacked bit is still set because
223 * clearing the dirty bit and SwapBacked bit has no lock protected. For
224 * such page, unmap will not set dirty bit for it, so page reclaim will
225 * not write the page out. This can cause data corruption when the page
226 * is swap in later. Always setting the dirty bit for the page solves
227 * the problem.
228 */
229 set_page_dirty(page);
230
231 return 1;
232
233fail:
234 put_swap_page(page, entry);
235 return 0;
236}
237
238/*
239 * This must be called only on pages that have
240 * been verified to be in the swap cache and locked.
241 * It will never put the page into the free list,
242 * the caller has a reference on the page.
243 */
244void delete_from_swap_cache(struct page *page)
245{
246 swp_entry_t entry = { .val = page_private(page) };
247 struct address_space *address_space = swap_address_space(entry);
248
249 xa_lock_irq(&address_space->i_pages);
250 __delete_from_swap_cache(page, entry);
251 xa_unlock_irq(&address_space->i_pages);
252
253 put_swap_page(page, entry);
254 page_ref_sub(page, hpage_nr_pages(page));
255}
256
257/*
258 * If we are the only user, then try to free up the swap cache.
259 *
260 * Its ok to check for PageSwapCache without the page lock
261 * here because we are going to recheck again inside
262 * try_to_free_swap() _with_ the lock.
263 * - Marcelo
264 */
265static inline void free_swap_cache(struct page *page)
266{
267 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
268 try_to_free_swap(page);
269 unlock_page(page);
270 }
271}
272
273/*
274 * Perform a free_page(), also freeing any swap cache associated with
275 * this page if it is the last user of the page.
276 */
277void free_page_and_swap_cache(struct page *page)
278{
279 free_swap_cache(page);
280 if (!is_huge_zero_page(page))
281 put_page(page);
282}
283
284/*
285 * Passed an array of pages, drop them all from swapcache and then release
286 * them. They are removed from the LRU and freed if this is their last use.
287 */
288void free_pages_and_swap_cache(struct page **pages, int nr)
289{
290 struct page **pagep = pages;
291 int i;
292
293 lru_add_drain();
294 for (i = 0; i < nr; i++)
295 free_swap_cache(pagep[i]);
296 release_pages(pagep, nr);
297}
298
299static inline bool swap_use_vma_readahead(void)
300{
301 return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
302}
303
304/*
305 * Lookup a swap entry in the swap cache. A found page will be returned
306 * unlocked and with its refcount incremented - we rely on the kernel
307 * lock getting page table operations atomic even if we drop the page
308 * lock before returning.
309 */
310struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
311 unsigned long addr)
312{
313 struct page *page;
314 struct swap_info_struct *si;
315
316 si = get_swap_device(entry);
317 if (!si)
318 return NULL;
319 page = find_get_page(swap_address_space(entry), swp_offset(entry));
320 put_swap_device(si);
321
322 INC_CACHE_INFO(find_total);
323 if (page) {
324 bool vma_ra = swap_use_vma_readahead();
325 bool readahead;
326
327 INC_CACHE_INFO(find_success);
328 /*
329 * At the moment, we don't support PG_readahead for anon THP
330 * so let's bail out rather than confusing the readahead stat.
331 */
332 if (unlikely(PageTransCompound(page)))
333 return page;
334
335 readahead = TestClearPageReadahead(page);
336 if (vma && vma_ra) {
337 unsigned long ra_val;
338 int win, hits;
339
340 ra_val = GET_SWAP_RA_VAL(vma);
341 win = SWAP_RA_WIN(ra_val);
342 hits = SWAP_RA_HITS(ra_val);
343 if (readahead)
344 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
345 atomic_long_set(&vma->swap_readahead_info,
346 SWAP_RA_VAL(addr, win, hits));
347 }
348
349 if (readahead) {
350 count_vm_event(SWAP_RA_HIT);
351 if (!vma || !vma_ra)
352 atomic_inc(&swapin_readahead_hits);
353 }
354 }
355
356 return page;
357}
358
359struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
360 struct vm_area_struct *vma, unsigned long addr,
361 bool *new_page_allocated)
362{
363 struct page *found_page = NULL, *new_page = NULL;
364 struct swap_info_struct *si;
365 int err;
366 *new_page_allocated = false;
367
368 do {
369 /*
370 * First check the swap cache. Since this is normally
371 * called after lookup_swap_cache() failed, re-calling
372 * that would confuse statistics.
373 */
374 si = get_swap_device(entry);
375 if (!si)
376 break;
377 found_page = find_get_page(swap_address_space(entry),
378 swp_offset(entry));
379 put_swap_device(si);
380 if (found_page)
381 break;
382
383 /*
384 * Just skip read ahead for unused swap slot.
385 * During swap_off when swap_slot_cache is disabled,
386 * we have to handle the race between putting
387 * swap entry in swap cache and marking swap slot
388 * as SWAP_HAS_CACHE. That's done in later part of code or
389 * else swap_off will be aborted if we return NULL.
390 */
391 if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
392 break;
393
394 /*
395 * Get a new page to read into from swap.
396 */
397 if (!new_page) {
398 new_page = alloc_page_vma(gfp_mask, vma, addr);
399 if (!new_page)
400 break; /* Out of memory */
401 }
402
403 /*
404 * Swap entry may have been freed since our caller observed it.
405 */
406 err = swapcache_prepare(entry);
407 if (err == -EEXIST) {
408 /*
409 * We might race against get_swap_page() and stumble
410 * across a SWAP_HAS_CACHE swap_map entry whose page
411 * has not been brought into the swapcache yet.
412 */
413 cond_resched();
414 continue;
415 } else if (err) /* swp entry is obsolete ? */
416 break;
417
418 /* May fail (-ENOMEM) if XArray node allocation failed. */
419 __SetPageLocked(new_page);
420 __SetPageSwapBacked(new_page);
421 err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
422 if (likely(!err)) {
423 /* Initiate read into locked page */
424 SetPageWorkingset(new_page);
425 lru_cache_add_anon(new_page);
426 *new_page_allocated = true;
427 return new_page;
428 }
429 __ClearPageLocked(new_page);
430 /*
431 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
432 * clear SWAP_HAS_CACHE flag.
433 */
434 put_swap_page(new_page, entry);
435 } while (err != -ENOMEM);
436
437 if (new_page)
438 put_page(new_page);
439 return found_page;
440}
441
442/*
443 * Locate a page of swap in physical memory, reserving swap cache space
444 * and reading the disk if it is not already cached.
445 * A failure return means that either the page allocation failed or that
446 * the swap entry is no longer in use.
447 */
448struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
449 struct vm_area_struct *vma, unsigned long addr, bool do_poll)
450{
451 bool page_was_allocated;
452 struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
453 vma, addr, &page_was_allocated);
454
455 if (page_was_allocated)
456 swap_readpage(retpage, do_poll);
457
458 return retpage;
459}
460
461static unsigned int __swapin_nr_pages(unsigned long prev_offset,
462 unsigned long offset,
463 int hits,
464 int max_pages,
465 int prev_win)
466{
467 unsigned int pages, last_ra;
468
469 /*
470 * This heuristic has been found to work well on both sequential and
471 * random loads, swapping to hard disk or to SSD: please don't ask
472 * what the "+ 2" means, it just happens to work well, that's all.
473 */
474 pages = hits + 2;
475 if (pages == 2) {
476 /*
477 * We can have no readahead hits to judge by: but must not get
478 * stuck here forever, so check for an adjacent offset instead
479 * (and don't even bother to check whether swap type is same).
480 */
481 if (offset != prev_offset + 1 && offset != prev_offset - 1)
482 pages = 1;
483 } else {
484 unsigned int roundup = 4;
485 while (roundup < pages)
486 roundup <<= 1;
487 pages = roundup;
488 }
489
490 if (pages > max_pages)
491 pages = max_pages;
492
493 /* Don't shrink readahead too fast */
494 last_ra = prev_win / 2;
495 if (pages < last_ra)
496 pages = last_ra;
497
498 return pages;
499}
500
501static unsigned long swapin_nr_pages(unsigned long offset)
502{
503 static unsigned long prev_offset;
504 unsigned int hits, pages, max_pages;
505 static atomic_t last_readahead_pages;
506
507 max_pages = 1 << READ_ONCE(page_cluster);
508 if (max_pages <= 1)
509 return 1;
510
511 hits = atomic_xchg(&swapin_readahead_hits, 0);
512 pages = __swapin_nr_pages(prev_offset, offset, hits, max_pages,
513 atomic_read(&last_readahead_pages));
514 if (!hits)
515 prev_offset = offset;
516 atomic_set(&last_readahead_pages, pages);
517
518 return pages;
519}
520
521/**
522 * swap_cluster_readahead - swap in pages in hope we need them soon
523 * @entry: swap entry of this memory
524 * @gfp_mask: memory allocation flags
525 * @vmf: fault information
526 *
527 * Returns the struct page for entry and addr, after queueing swapin.
528 *
529 * Primitive swap readahead code. We simply read an aligned block of
530 * (1 << page_cluster) entries in the swap area. This method is chosen
531 * because it doesn't cost us any seek time. We also make sure to queue
532 * the 'original' request together with the readahead ones...
533 *
534 * This has been extended to use the NUMA policies from the mm triggering
535 * the readahead.
536 *
537 * Caller must hold read mmap_sem if vmf->vma is not NULL.
538 */
539struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
540 struct vm_fault *vmf)
541{
542 struct page *page;
543 unsigned long entry_offset = swp_offset(entry);
544 unsigned long offset = entry_offset;
545 unsigned long start_offset, end_offset;
546 unsigned long mask;
547 struct swap_info_struct *si = swp_swap_info(entry);
548 struct blk_plug plug;
549 bool do_poll = true, page_allocated;
550 struct vm_area_struct *vma = vmf->vma;
551 unsigned long addr = vmf->address;
552
553 mask = swapin_nr_pages(offset) - 1;
554 if (!mask)
555 goto skip;
556
557 /* Test swap type to make sure the dereference is safe */
558 if (likely(si->flags & (SWP_BLKDEV | SWP_FS))) {
559 struct inode *inode = si->swap_file->f_mapping->host;
560 if (inode_read_congested(inode))
561 goto skip;
562 }
563
564 do_poll = false;
565 /* Read a page_cluster sized and aligned cluster around offset. */
566 start_offset = offset & ~mask;
567 end_offset = offset | mask;
568 if (!start_offset) /* First page is swap header. */
569 start_offset++;
570 if (end_offset >= si->max)
571 end_offset = si->max - 1;
572
573 blk_start_plug(&plug);
574 for (offset = start_offset; offset <= end_offset ; offset++) {
575 /* Ok, do the async read-ahead now */
576 page = __read_swap_cache_async(
577 swp_entry(swp_type(entry), offset),
578 gfp_mask, vma, addr, &page_allocated);
579 if (!page)
580 continue;
581 if (page_allocated) {
582 swap_readpage(page, false);
583 if (offset != entry_offset) {
584 SetPageReadahead(page);
585 count_vm_event(SWAP_RA);
586 }
587 }
588 put_page(page);
589 }
590 blk_finish_plug(&plug);
591
592 lru_add_drain(); /* Push any new pages onto the LRU now */
593skip:
594 return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll);
595}
596
597int init_swap_address_space(unsigned int type, unsigned long nr_pages)
598{
599 struct address_space *spaces, *space;
600 unsigned int i, nr;
601
602 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
603 spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
604 if (!spaces)
605 return -ENOMEM;
606 for (i = 0; i < nr; i++) {
607 space = spaces + i;
608 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
609 atomic_set(&space->i_mmap_writable, 0);
610 space->a_ops = &swap_aops;
611 /* swap cache doesn't use writeback related tags */
612 mapping_set_no_writeback_tags(space);
613 }
614 nr_swapper_spaces[type] = nr;
615 swapper_spaces[type] = spaces;
616
617 return 0;
618}
619
620void exit_swap_address_space(unsigned int type)
621{
622 kvfree(swapper_spaces[type]);
623 nr_swapper_spaces[type] = 0;
624 swapper_spaces[type] = NULL;
625}
626
627static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
628 unsigned long faddr,
629 unsigned long lpfn,
630 unsigned long rpfn,
631 unsigned long *start,
632 unsigned long *end)
633{
634 *start = max3(lpfn, PFN_DOWN(vma->vm_start),
635 PFN_DOWN(faddr & PMD_MASK));
636 *end = min3(rpfn, PFN_DOWN(vma->vm_end),
637 PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
638}
639
640static void swap_ra_info(struct vm_fault *vmf,
641 struct vma_swap_readahead *ra_info)
642{
643 struct vm_area_struct *vma = vmf->vma;
644 unsigned long ra_val;
645 swp_entry_t entry;
646 unsigned long faddr, pfn, fpfn;
647 unsigned long start, end;
648 pte_t *pte, *orig_pte;
649 unsigned int max_win, hits, prev_win, win, left;
650#ifndef CONFIG_64BIT
651 pte_t *tpte;
652#endif
653
654 max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
655 SWAP_RA_ORDER_CEILING);
656 if (max_win == 1) {
657 ra_info->win = 1;
658 return;
659 }
660
661 faddr = vmf->address;
662 orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
663 entry = pte_to_swp_entry(*pte);
664 if ((unlikely(non_swap_entry(entry)))) {
665 pte_unmap(orig_pte);
666 return;
667 }
668
669 fpfn = PFN_DOWN(faddr);
670 ra_val = GET_SWAP_RA_VAL(vma);
671 pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
672 prev_win = SWAP_RA_WIN(ra_val);
673 hits = SWAP_RA_HITS(ra_val);
674 ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
675 max_win, prev_win);
676 atomic_long_set(&vma->swap_readahead_info,
677 SWAP_RA_VAL(faddr, win, 0));
678
679 if (win == 1) {
680 pte_unmap(orig_pte);
681 return;
682 }
683
684 /* Copy the PTEs because the page table may be unmapped */
685 if (fpfn == pfn + 1)
686 swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end);
687 else if (pfn == fpfn + 1)
688 swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1,
689 &start, &end);
690 else {
691 left = (win - 1) / 2;
692 swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
693 &start, &end);
694 }
695 ra_info->nr_pte = end - start;
696 ra_info->offset = fpfn - start;
697 pte -= ra_info->offset;
698#ifdef CONFIG_64BIT
699 ra_info->ptes = pte;
700#else
701 tpte = ra_info->ptes;
702 for (pfn = start; pfn != end; pfn++)
703 *tpte++ = *pte++;
704#endif
705 pte_unmap(orig_pte);
706}
707
708/**
709 * swap_vma_readahead - swap in pages in hope we need them soon
710 * @entry: swap entry of this memory
711 * @gfp_mask: memory allocation flags
712 * @vmf: fault information
713 *
714 * Returns the struct page for entry and addr, after queueing swapin.
715 *
716 * Primitive swap readahead code. We simply read in a few pages whoes
717 * virtual addresses are around the fault address in the same vma.
718 *
719 * Caller must hold read mmap_sem if vmf->vma is not NULL.
720 *
721 */
722static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
723 struct vm_fault *vmf)
724{
725 struct blk_plug plug;
726 struct vm_area_struct *vma = vmf->vma;
727 struct page *page;
728 pte_t *pte, pentry;
729 swp_entry_t entry;
730 unsigned int i;
731 bool page_allocated;
732 struct vma_swap_readahead ra_info = {0,};
733
734 swap_ra_info(vmf, &ra_info);
735 if (ra_info.win == 1)
736 goto skip;
737
738 blk_start_plug(&plug);
739 for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
740 i++, pte++) {
741 pentry = *pte;
742 if (pte_none(pentry))
743 continue;
744 if (pte_present(pentry))
745 continue;
746 entry = pte_to_swp_entry(pentry);
747 if (unlikely(non_swap_entry(entry)))
748 continue;
749 page = __read_swap_cache_async(entry, gfp_mask, vma,
750 vmf->address, &page_allocated);
751 if (!page)
752 continue;
753 if (page_allocated) {
754 swap_readpage(page, false);
755 if (i != ra_info.offset) {
756 SetPageReadahead(page);
757 count_vm_event(SWAP_RA);
758 }
759 }
760 put_page(page);
761 }
762 blk_finish_plug(&plug);
763 lru_add_drain();
764skip:
765 return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
766 ra_info.win == 1);
767}
768
769/**
770 * swapin_readahead - swap in pages in hope we need them soon
771 * @entry: swap entry of this memory
772 * @gfp_mask: memory allocation flags
773 * @vmf: fault information
774 *
775 * Returns the struct page for entry and addr, after queueing swapin.
776 *
777 * It's a main entry function for swap readahead. By the configuration,
778 * it will read ahead blocks by cluster-based(ie, physical disk based)
779 * or vma-based(ie, virtual address based on faulty address) readahead.
780 */
781struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
782 struct vm_fault *vmf)
783{
784 return swap_use_vma_readahead() ?
785 swap_vma_readahead(entry, gfp_mask, vmf) :
786 swap_cluster_readahead(entry, gfp_mask, vmf);
787}
788
789#ifdef CONFIG_SYSFS
790static ssize_t vma_ra_enabled_show(struct kobject *kobj,
791 struct kobj_attribute *attr, char *buf)
792{
793 return sprintf(buf, "%s\n", enable_vma_readahead ? "true" : "false");
794}
795static ssize_t vma_ra_enabled_store(struct kobject *kobj,
796 struct kobj_attribute *attr,
797 const char *buf, size_t count)
798{
799 if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
800 enable_vma_readahead = true;
801 else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
802 enable_vma_readahead = false;
803 else
804 return -EINVAL;
805
806 return count;
807}
808static struct kobj_attribute vma_ra_enabled_attr =
809 __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show,
810 vma_ra_enabled_store);
811
812static struct attribute *swap_attrs[] = {
813 &vma_ra_enabled_attr.attr,
814 NULL,
815};
816
817static struct attribute_group swap_attr_group = {
818 .attrs = swap_attrs,
819};
820
821static int __init swap_init_sysfs(void)
822{
823 int err;
824 struct kobject *swap_kobj;
825
826 swap_kobj = kobject_create_and_add("swap", mm_kobj);
827 if (!swap_kobj) {
828 pr_err("failed to create swap kobject\n");
829 return -ENOMEM;
830 }
831 err = sysfs_create_group(swap_kobj, &swap_attr_group);
832 if (err) {
833 pr_err("failed to register swap group\n");
834 goto delete_obj;
835 }
836 return 0;
837
838delete_obj:
839 kobject_put(swap_kobj);
840 return err;
841}
842subsys_initcall(swap_init_sysfs);
843#endif
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/mm/swap_state.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie
7 *
8 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
9 */
10#include <linux/mm.h>
11#include <linux/gfp.h>
12#include <linux/kernel_stat.h>
13#include <linux/swap.h>
14#include <linux/swapops.h>
15#include <linux/init.h>
16#include <linux/pagemap.h>
17#include <linux/backing-dev.h>
18#include <linux/blkdev.h>
19#include <linux/pagevec.h>
20#include <linux/migrate.h>
21#include <linux/vmalloc.h>
22#include <linux/swap_slots.h>
23#include <linux/huge_mm.h>
24#include "internal.h"
25
26/*
27 * swapper_space is a fiction, retained to simplify the path through
28 * vmscan's shrink_page_list.
29 */
30static const struct address_space_operations swap_aops = {
31 .writepage = swap_writepage,
32 .set_page_dirty = swap_set_page_dirty,
33#ifdef CONFIG_MIGRATION
34 .migratepage = migrate_page,
35#endif
36};
37
38struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
39static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
40static bool enable_vma_readahead __read_mostly = true;
41
42#define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
43#define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
44#define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
45#define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
46
47#define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK)
48#define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
49#define SWAP_RA_ADDR(v) ((v) & PAGE_MASK)
50
51#define SWAP_RA_VAL(addr, win, hits) \
52 (((addr) & PAGE_MASK) | \
53 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \
54 ((hits) & SWAP_RA_HITS_MASK))
55
56/* Initial readahead hits is 4 to start up with a small window */
57#define GET_SWAP_RA_VAL(vma) \
58 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
59
60#define INC_CACHE_INFO(x) data_race(swap_cache_info.x++)
61#define ADD_CACHE_INFO(x, nr) data_race(swap_cache_info.x += (nr))
62
63static struct {
64 unsigned long add_total;
65 unsigned long del_total;
66 unsigned long find_success;
67 unsigned long find_total;
68} swap_cache_info;
69
70unsigned long total_swapcache_pages(void)
71{
72 unsigned int i, j, nr;
73 unsigned long ret = 0;
74 struct address_space *spaces;
75 struct swap_info_struct *si;
76
77 for (i = 0; i < MAX_SWAPFILES; i++) {
78 swp_entry_t entry = swp_entry(i, 1);
79
80 /* Avoid get_swap_device() to warn for bad swap entry */
81 if (!swp_swap_info(entry))
82 continue;
83 /* Prevent swapoff to free swapper_spaces */
84 si = get_swap_device(entry);
85 if (!si)
86 continue;
87 nr = nr_swapper_spaces[i];
88 spaces = swapper_spaces[i];
89 for (j = 0; j < nr; j++)
90 ret += spaces[j].nrpages;
91 put_swap_device(si);
92 }
93 return ret;
94}
95
96static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
97
98void show_swap_cache_info(void)
99{
100 printk("%lu pages in swap cache\n", total_swapcache_pages());
101 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
102 swap_cache_info.add_total, swap_cache_info.del_total,
103 swap_cache_info.find_success, swap_cache_info.find_total);
104 printk("Free swap = %ldkB\n",
105 get_nr_swap_pages() << (PAGE_SHIFT - 10));
106 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
107}
108
109void *get_shadow_from_swap_cache(swp_entry_t entry)
110{
111 struct address_space *address_space = swap_address_space(entry);
112 pgoff_t idx = swp_offset(entry);
113 struct page *page;
114
115 page = find_get_entry(address_space, idx);
116 if (xa_is_value(page))
117 return page;
118 if (page)
119 put_page(page);
120 return NULL;
121}
122
123/*
124 * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
125 * but sets SwapCache flag and private instead of mapping and index.
126 */
127int add_to_swap_cache(struct page *page, swp_entry_t entry,
128 gfp_t gfp, void **shadowp)
129{
130 struct address_space *address_space = swap_address_space(entry);
131 pgoff_t idx = swp_offset(entry);
132 XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
133 unsigned long i, nr = thp_nr_pages(page);
134 void *old;
135
136 VM_BUG_ON_PAGE(!PageLocked(page), page);
137 VM_BUG_ON_PAGE(PageSwapCache(page), page);
138 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
139
140 page_ref_add(page, nr);
141 SetPageSwapCache(page);
142
143 do {
144 unsigned long nr_shadows = 0;
145
146 xas_lock_irq(&xas);
147 xas_create_range(&xas);
148 if (xas_error(&xas))
149 goto unlock;
150 for (i = 0; i < nr; i++) {
151 VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
152 old = xas_load(&xas);
153 if (xa_is_value(old)) {
154 nr_shadows++;
155 if (shadowp)
156 *shadowp = old;
157 }
158 set_page_private(page + i, entry.val + i);
159 xas_store(&xas, page);
160 xas_next(&xas);
161 }
162 address_space->nrexceptional -= nr_shadows;
163 address_space->nrpages += nr;
164 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
165 ADD_CACHE_INFO(add_total, nr);
166unlock:
167 xas_unlock_irq(&xas);
168 } while (xas_nomem(&xas, gfp));
169
170 if (!xas_error(&xas))
171 return 0;
172
173 ClearPageSwapCache(page);
174 page_ref_sub(page, nr);
175 return xas_error(&xas);
176}
177
178/*
179 * This must be called only on pages that have
180 * been verified to be in the swap cache.
181 */
182void __delete_from_swap_cache(struct page *page,
183 swp_entry_t entry, void *shadow)
184{
185 struct address_space *address_space = swap_address_space(entry);
186 int i, nr = thp_nr_pages(page);
187 pgoff_t idx = swp_offset(entry);
188 XA_STATE(xas, &address_space->i_pages, idx);
189
190 VM_BUG_ON_PAGE(!PageLocked(page), page);
191 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
192 VM_BUG_ON_PAGE(PageWriteback(page), page);
193
194 for (i = 0; i < nr; i++) {
195 void *entry = xas_store(&xas, shadow);
196 VM_BUG_ON_PAGE(entry != page, entry);
197 set_page_private(page + i, 0);
198 xas_next(&xas);
199 }
200 ClearPageSwapCache(page);
201 if (shadow)
202 address_space->nrexceptional += nr;
203 address_space->nrpages -= nr;
204 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
205 ADD_CACHE_INFO(del_total, nr);
206}
207
208/**
209 * add_to_swap - allocate swap space for a page
210 * @page: page we want to move to swap
211 *
212 * Allocate swap space for the page and add the page to the
213 * swap cache. Caller needs to hold the page lock.
214 */
215int add_to_swap(struct page *page)
216{
217 swp_entry_t entry;
218 int err;
219
220 VM_BUG_ON_PAGE(!PageLocked(page), page);
221 VM_BUG_ON_PAGE(!PageUptodate(page), page);
222
223 entry = get_swap_page(page);
224 if (!entry.val)
225 return 0;
226
227 /*
228 * XArray node allocations from PF_MEMALLOC contexts could
229 * completely exhaust the page allocator. __GFP_NOMEMALLOC
230 * stops emergency reserves from being allocated.
231 *
232 * TODO: this could cause a theoretical memory reclaim
233 * deadlock in the swap out path.
234 */
235 /*
236 * Add it to the swap cache.
237 */
238 err = add_to_swap_cache(page, entry,
239 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
240 if (err)
241 /*
242 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
243 * clear SWAP_HAS_CACHE flag.
244 */
245 goto fail;
246 /*
247 * Normally the page will be dirtied in unmap because its pte should be
248 * dirty. A special case is MADV_FREE page. The page'e pte could have
249 * dirty bit cleared but the page's SwapBacked bit is still set because
250 * clearing the dirty bit and SwapBacked bit has no lock protected. For
251 * such page, unmap will not set dirty bit for it, so page reclaim will
252 * not write the page out. This can cause data corruption when the page
253 * is swap in later. Always setting the dirty bit for the page solves
254 * the problem.
255 */
256 set_page_dirty(page);
257
258 return 1;
259
260fail:
261 put_swap_page(page, entry);
262 return 0;
263}
264
265/*
266 * This must be called only on pages that have
267 * been verified to be in the swap cache and locked.
268 * It will never put the page into the free list,
269 * the caller has a reference on the page.
270 */
271void delete_from_swap_cache(struct page *page)
272{
273 swp_entry_t entry = { .val = page_private(page) };
274 struct address_space *address_space = swap_address_space(entry);
275
276 xa_lock_irq(&address_space->i_pages);
277 __delete_from_swap_cache(page, entry, NULL);
278 xa_unlock_irq(&address_space->i_pages);
279
280 put_swap_page(page, entry);
281 page_ref_sub(page, thp_nr_pages(page));
282}
283
284void clear_shadow_from_swap_cache(int type, unsigned long begin,
285 unsigned long end)
286{
287 unsigned long curr = begin;
288 void *old;
289
290 for (;;) {
291 unsigned long nr_shadows = 0;
292 swp_entry_t entry = swp_entry(type, curr);
293 struct address_space *address_space = swap_address_space(entry);
294 XA_STATE(xas, &address_space->i_pages, curr);
295
296 xa_lock_irq(&address_space->i_pages);
297 xas_for_each(&xas, old, end) {
298 if (!xa_is_value(old))
299 continue;
300 xas_store(&xas, NULL);
301 nr_shadows++;
302 }
303 address_space->nrexceptional -= nr_shadows;
304 xa_unlock_irq(&address_space->i_pages);
305
306 /* search the next swapcache until we meet end */
307 curr >>= SWAP_ADDRESS_SPACE_SHIFT;
308 curr++;
309 curr <<= SWAP_ADDRESS_SPACE_SHIFT;
310 if (curr > end)
311 break;
312 }
313}
314
315/*
316 * If we are the only user, then try to free up the swap cache.
317 *
318 * Its ok to check for PageSwapCache without the page lock
319 * here because we are going to recheck again inside
320 * try_to_free_swap() _with_ the lock.
321 * - Marcelo
322 */
323static inline void free_swap_cache(struct page *page)
324{
325 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
326 try_to_free_swap(page);
327 unlock_page(page);
328 }
329}
330
331/*
332 * Perform a free_page(), also freeing any swap cache associated with
333 * this page if it is the last user of the page.
334 */
335void free_page_and_swap_cache(struct page *page)
336{
337 free_swap_cache(page);
338 if (!is_huge_zero_page(page))
339 put_page(page);
340}
341
342/*
343 * Passed an array of pages, drop them all from swapcache and then release
344 * them. They are removed from the LRU and freed if this is their last use.
345 */
346void free_pages_and_swap_cache(struct page **pages, int nr)
347{
348 struct page **pagep = pages;
349 int i;
350
351 lru_add_drain();
352 for (i = 0; i < nr; i++)
353 free_swap_cache(pagep[i]);
354 release_pages(pagep, nr);
355}
356
357static inline bool swap_use_vma_readahead(void)
358{
359 return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
360}
361
362/*
363 * Lookup a swap entry in the swap cache. A found page will be returned
364 * unlocked and with its refcount incremented - we rely on the kernel
365 * lock getting page table operations atomic even if we drop the page
366 * lock before returning.
367 */
368struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
369 unsigned long addr)
370{
371 struct page *page;
372 struct swap_info_struct *si;
373
374 si = get_swap_device(entry);
375 if (!si)
376 return NULL;
377 page = find_get_page(swap_address_space(entry), swp_offset(entry));
378 put_swap_device(si);
379
380 INC_CACHE_INFO(find_total);
381 if (page) {
382 bool vma_ra = swap_use_vma_readahead();
383 bool readahead;
384
385 INC_CACHE_INFO(find_success);
386 /*
387 * At the moment, we don't support PG_readahead for anon THP
388 * so let's bail out rather than confusing the readahead stat.
389 */
390 if (unlikely(PageTransCompound(page)))
391 return page;
392
393 readahead = TestClearPageReadahead(page);
394 if (vma && vma_ra) {
395 unsigned long ra_val;
396 int win, hits;
397
398 ra_val = GET_SWAP_RA_VAL(vma);
399 win = SWAP_RA_WIN(ra_val);
400 hits = SWAP_RA_HITS(ra_val);
401 if (readahead)
402 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
403 atomic_long_set(&vma->swap_readahead_info,
404 SWAP_RA_VAL(addr, win, hits));
405 }
406
407 if (readahead) {
408 count_vm_event(SWAP_RA_HIT);
409 if (!vma || !vma_ra)
410 atomic_inc(&swapin_readahead_hits);
411 }
412 }
413
414 return page;
415}
416
417struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
418 struct vm_area_struct *vma, unsigned long addr,
419 bool *new_page_allocated)
420{
421 struct swap_info_struct *si;
422 struct page *page;
423 void *shadow = NULL;
424
425 *new_page_allocated = false;
426
427 for (;;) {
428 int err;
429 /*
430 * First check the swap cache. Since this is normally
431 * called after lookup_swap_cache() failed, re-calling
432 * that would confuse statistics.
433 */
434 si = get_swap_device(entry);
435 if (!si)
436 return NULL;
437 page = find_get_page(swap_address_space(entry),
438 swp_offset(entry));
439 put_swap_device(si);
440 if (page)
441 return page;
442
443 /*
444 * Just skip read ahead for unused swap slot.
445 * During swap_off when swap_slot_cache is disabled,
446 * we have to handle the race between putting
447 * swap entry in swap cache and marking swap slot
448 * as SWAP_HAS_CACHE. That's done in later part of code or
449 * else swap_off will be aborted if we return NULL.
450 */
451 if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
452 return NULL;
453
454 /*
455 * Get a new page to read into from swap. Allocate it now,
456 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
457 * cause any racers to loop around until we add it to cache.
458 */
459 page = alloc_page_vma(gfp_mask, vma, addr);
460 if (!page)
461 return NULL;
462
463 /*
464 * Swap entry may have been freed since our caller observed it.
465 */
466 err = swapcache_prepare(entry);
467 if (!err)
468 break;
469
470 put_page(page);
471 if (err != -EEXIST)
472 return NULL;
473
474 /*
475 * We might race against __delete_from_swap_cache(), and
476 * stumble across a swap_map entry whose SWAP_HAS_CACHE
477 * has not yet been cleared. Or race against another
478 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
479 * in swap_map, but not yet added its page to swap cache.
480 */
481 cond_resched();
482 }
483
484 /*
485 * The swap entry is ours to swap in. Prepare the new page.
486 */
487
488 __SetPageLocked(page);
489 __SetPageSwapBacked(page);
490
491 /* May fail (-ENOMEM) if XArray node allocation failed. */
492 if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) {
493 put_swap_page(page, entry);
494 goto fail_unlock;
495 }
496
497 if (mem_cgroup_charge(page, NULL, gfp_mask)) {
498 delete_from_swap_cache(page);
499 goto fail_unlock;
500 }
501
502 if (shadow)
503 workingset_refault(page, shadow);
504
505 /* Caller will initiate read into locked page */
506 SetPageWorkingset(page);
507 lru_cache_add(page);
508 *new_page_allocated = true;
509 return page;
510
511fail_unlock:
512 unlock_page(page);
513 put_page(page);
514 return NULL;
515}
516
517/*
518 * Locate a page of swap in physical memory, reserving swap cache space
519 * and reading the disk if it is not already cached.
520 * A failure return means that either the page allocation failed or that
521 * the swap entry is no longer in use.
522 */
523struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
524 struct vm_area_struct *vma, unsigned long addr, bool do_poll)
525{
526 bool page_was_allocated;
527 struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
528 vma, addr, &page_was_allocated);
529
530 if (page_was_allocated)
531 swap_readpage(retpage, do_poll);
532
533 return retpage;
534}
535
536static unsigned int __swapin_nr_pages(unsigned long prev_offset,
537 unsigned long offset,
538 int hits,
539 int max_pages,
540 int prev_win)
541{
542 unsigned int pages, last_ra;
543
544 /*
545 * This heuristic has been found to work well on both sequential and
546 * random loads, swapping to hard disk or to SSD: please don't ask
547 * what the "+ 2" means, it just happens to work well, that's all.
548 */
549 pages = hits + 2;
550 if (pages == 2) {
551 /*
552 * We can have no readahead hits to judge by: but must not get
553 * stuck here forever, so check for an adjacent offset instead
554 * (and don't even bother to check whether swap type is same).
555 */
556 if (offset != prev_offset + 1 && offset != prev_offset - 1)
557 pages = 1;
558 } else {
559 unsigned int roundup = 4;
560 while (roundup < pages)
561 roundup <<= 1;
562 pages = roundup;
563 }
564
565 if (pages > max_pages)
566 pages = max_pages;
567
568 /* Don't shrink readahead too fast */
569 last_ra = prev_win / 2;
570 if (pages < last_ra)
571 pages = last_ra;
572
573 return pages;
574}
575
576static unsigned long swapin_nr_pages(unsigned long offset)
577{
578 static unsigned long prev_offset;
579 unsigned int hits, pages, max_pages;
580 static atomic_t last_readahead_pages;
581
582 max_pages = 1 << READ_ONCE(page_cluster);
583 if (max_pages <= 1)
584 return 1;
585
586 hits = atomic_xchg(&swapin_readahead_hits, 0);
587 pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
588 max_pages,
589 atomic_read(&last_readahead_pages));
590 if (!hits)
591 WRITE_ONCE(prev_offset, offset);
592 atomic_set(&last_readahead_pages, pages);
593
594 return pages;
595}
596
597/**
598 * swap_cluster_readahead - swap in pages in hope we need them soon
599 * @entry: swap entry of this memory
600 * @gfp_mask: memory allocation flags
601 * @vmf: fault information
602 *
603 * Returns the struct page for entry and addr, after queueing swapin.
604 *
605 * Primitive swap readahead code. We simply read an aligned block of
606 * (1 << page_cluster) entries in the swap area. This method is chosen
607 * because it doesn't cost us any seek time. We also make sure to queue
608 * the 'original' request together with the readahead ones...
609 *
610 * This has been extended to use the NUMA policies from the mm triggering
611 * the readahead.
612 *
613 * Caller must hold read mmap_lock if vmf->vma is not NULL.
614 */
615struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
616 struct vm_fault *vmf)
617{
618 struct page *page;
619 unsigned long entry_offset = swp_offset(entry);
620 unsigned long offset = entry_offset;
621 unsigned long start_offset, end_offset;
622 unsigned long mask;
623 struct swap_info_struct *si = swp_swap_info(entry);
624 struct blk_plug plug;
625 bool do_poll = true, page_allocated;
626 struct vm_area_struct *vma = vmf->vma;
627 unsigned long addr = vmf->address;
628
629 mask = swapin_nr_pages(offset) - 1;
630 if (!mask)
631 goto skip;
632
633 /* Test swap type to make sure the dereference is safe */
634 if (likely(si->flags & (SWP_BLKDEV | SWP_FS))) {
635 struct inode *inode = si->swap_file->f_mapping->host;
636 if (inode_read_congested(inode))
637 goto skip;
638 }
639
640 do_poll = false;
641 /* Read a page_cluster sized and aligned cluster around offset. */
642 start_offset = offset & ~mask;
643 end_offset = offset | mask;
644 if (!start_offset) /* First page is swap header. */
645 start_offset++;
646 if (end_offset >= si->max)
647 end_offset = si->max - 1;
648
649 blk_start_plug(&plug);
650 for (offset = start_offset; offset <= end_offset ; offset++) {
651 /* Ok, do the async read-ahead now */
652 page = __read_swap_cache_async(
653 swp_entry(swp_type(entry), offset),
654 gfp_mask, vma, addr, &page_allocated);
655 if (!page)
656 continue;
657 if (page_allocated) {
658 swap_readpage(page, false);
659 if (offset != entry_offset) {
660 SetPageReadahead(page);
661 count_vm_event(SWAP_RA);
662 }
663 }
664 put_page(page);
665 }
666 blk_finish_plug(&plug);
667
668 lru_add_drain(); /* Push any new pages onto the LRU now */
669skip:
670 return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll);
671}
672
673int init_swap_address_space(unsigned int type, unsigned long nr_pages)
674{
675 struct address_space *spaces, *space;
676 unsigned int i, nr;
677
678 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
679 spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
680 if (!spaces)
681 return -ENOMEM;
682 for (i = 0; i < nr; i++) {
683 space = spaces + i;
684 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
685 atomic_set(&space->i_mmap_writable, 0);
686 space->a_ops = &swap_aops;
687 /* swap cache doesn't use writeback related tags */
688 mapping_set_no_writeback_tags(space);
689 }
690 nr_swapper_spaces[type] = nr;
691 swapper_spaces[type] = spaces;
692
693 return 0;
694}
695
696void exit_swap_address_space(unsigned int type)
697{
698 kvfree(swapper_spaces[type]);
699 nr_swapper_spaces[type] = 0;
700 swapper_spaces[type] = NULL;
701}
702
703static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
704 unsigned long faddr,
705 unsigned long lpfn,
706 unsigned long rpfn,
707 unsigned long *start,
708 unsigned long *end)
709{
710 *start = max3(lpfn, PFN_DOWN(vma->vm_start),
711 PFN_DOWN(faddr & PMD_MASK));
712 *end = min3(rpfn, PFN_DOWN(vma->vm_end),
713 PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
714}
715
716static void swap_ra_info(struct vm_fault *vmf,
717 struct vma_swap_readahead *ra_info)
718{
719 struct vm_area_struct *vma = vmf->vma;
720 unsigned long ra_val;
721 swp_entry_t entry;
722 unsigned long faddr, pfn, fpfn;
723 unsigned long start, end;
724 pte_t *pte, *orig_pte;
725 unsigned int max_win, hits, prev_win, win, left;
726#ifndef CONFIG_64BIT
727 pte_t *tpte;
728#endif
729
730 max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
731 SWAP_RA_ORDER_CEILING);
732 if (max_win == 1) {
733 ra_info->win = 1;
734 return;
735 }
736
737 faddr = vmf->address;
738 orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
739 entry = pte_to_swp_entry(*pte);
740 if ((unlikely(non_swap_entry(entry)))) {
741 pte_unmap(orig_pte);
742 return;
743 }
744
745 fpfn = PFN_DOWN(faddr);
746 ra_val = GET_SWAP_RA_VAL(vma);
747 pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
748 prev_win = SWAP_RA_WIN(ra_val);
749 hits = SWAP_RA_HITS(ra_val);
750 ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
751 max_win, prev_win);
752 atomic_long_set(&vma->swap_readahead_info,
753 SWAP_RA_VAL(faddr, win, 0));
754
755 if (win == 1) {
756 pte_unmap(orig_pte);
757 return;
758 }
759
760 /* Copy the PTEs because the page table may be unmapped */
761 if (fpfn == pfn + 1)
762 swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end);
763 else if (pfn == fpfn + 1)
764 swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1,
765 &start, &end);
766 else {
767 left = (win - 1) / 2;
768 swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
769 &start, &end);
770 }
771 ra_info->nr_pte = end - start;
772 ra_info->offset = fpfn - start;
773 pte -= ra_info->offset;
774#ifdef CONFIG_64BIT
775 ra_info->ptes = pte;
776#else
777 tpte = ra_info->ptes;
778 for (pfn = start; pfn != end; pfn++)
779 *tpte++ = *pte++;
780#endif
781 pte_unmap(orig_pte);
782}
783
784/**
785 * swap_vma_readahead - swap in pages in hope we need them soon
786 * @fentry: swap entry of this memory
787 * @gfp_mask: memory allocation flags
788 * @vmf: fault information
789 *
790 * Returns the struct page for entry and addr, after queueing swapin.
791 *
792 * Primitive swap readahead code. We simply read in a few pages whoes
793 * virtual addresses are around the fault address in the same vma.
794 *
795 * Caller must hold read mmap_lock if vmf->vma is not NULL.
796 *
797 */
798static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
799 struct vm_fault *vmf)
800{
801 struct blk_plug plug;
802 struct vm_area_struct *vma = vmf->vma;
803 struct page *page;
804 pte_t *pte, pentry;
805 swp_entry_t entry;
806 unsigned int i;
807 bool page_allocated;
808 struct vma_swap_readahead ra_info = {0,};
809
810 swap_ra_info(vmf, &ra_info);
811 if (ra_info.win == 1)
812 goto skip;
813
814 blk_start_plug(&plug);
815 for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
816 i++, pte++) {
817 pentry = *pte;
818 if (pte_none(pentry))
819 continue;
820 if (pte_present(pentry))
821 continue;
822 entry = pte_to_swp_entry(pentry);
823 if (unlikely(non_swap_entry(entry)))
824 continue;
825 page = __read_swap_cache_async(entry, gfp_mask, vma,
826 vmf->address, &page_allocated);
827 if (!page)
828 continue;
829 if (page_allocated) {
830 swap_readpage(page, false);
831 if (i != ra_info.offset) {
832 SetPageReadahead(page);
833 count_vm_event(SWAP_RA);
834 }
835 }
836 put_page(page);
837 }
838 blk_finish_plug(&plug);
839 lru_add_drain();
840skip:
841 return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
842 ra_info.win == 1);
843}
844
845/**
846 * swapin_readahead - swap in pages in hope we need them soon
847 * @entry: swap entry of this memory
848 * @gfp_mask: memory allocation flags
849 * @vmf: fault information
850 *
851 * Returns the struct page for entry and addr, after queueing swapin.
852 *
853 * It's a main entry function for swap readahead. By the configuration,
854 * it will read ahead blocks by cluster-based(ie, physical disk based)
855 * or vma-based(ie, virtual address based on faulty address) readahead.
856 */
857struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
858 struct vm_fault *vmf)
859{
860 return swap_use_vma_readahead() ?
861 swap_vma_readahead(entry, gfp_mask, vmf) :
862 swap_cluster_readahead(entry, gfp_mask, vmf);
863}
864
865#ifdef CONFIG_SYSFS
866static ssize_t vma_ra_enabled_show(struct kobject *kobj,
867 struct kobj_attribute *attr, char *buf)
868{
869 return sprintf(buf, "%s\n", enable_vma_readahead ? "true" : "false");
870}
871static ssize_t vma_ra_enabled_store(struct kobject *kobj,
872 struct kobj_attribute *attr,
873 const char *buf, size_t count)
874{
875 if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
876 enable_vma_readahead = true;
877 else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
878 enable_vma_readahead = false;
879 else
880 return -EINVAL;
881
882 return count;
883}
884static struct kobj_attribute vma_ra_enabled_attr =
885 __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show,
886 vma_ra_enabled_store);
887
888static struct attribute *swap_attrs[] = {
889 &vma_ra_enabled_attr.attr,
890 NULL,
891};
892
893static struct attribute_group swap_attr_group = {
894 .attrs = swap_attrs,
895};
896
897static int __init swap_init_sysfs(void)
898{
899 int err;
900 struct kobject *swap_kobj;
901
902 swap_kobj = kobject_create_and_add("swap", mm_kobj);
903 if (!swap_kobj) {
904 pr_err("failed to create swap kobject\n");
905 return -ENOMEM;
906 }
907 err = sysfs_create_group(swap_kobj, &swap_attr_group);
908 if (err) {
909 pr_err("failed to register swap group\n");
910 goto delete_obj;
911 }
912 return 0;
913
914delete_obj:
915 kobject_put(swap_kobj);
916 return err;
917}
918subsys_initcall(swap_init_sysfs);
919#endif