Loading...
1/*
2 * linux/mm/swap_state.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
6 *
7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
8 */
9#include <linux/module.h>
10#include <linux/mm.h>
11#include <linux/gfp.h>
12#include <linux/kernel_stat.h>
13#include <linux/swap.h>
14#include <linux/swapops.h>
15#include <linux/init.h>
16#include <linux/pagemap.h>
17#include <linux/buffer_head.h>
18#include <linux/backing-dev.h>
19#include <linux/pagevec.h>
20#include <linux/migrate.h>
21#include <linux/page_cgroup.h>
22
23#include <asm/pgtable.h>
24
25/*
26 * swapper_space is a fiction, retained to simplify the path through
27 * vmscan's shrink_page_list.
28 */
29static const struct address_space_operations swap_aops = {
30 .writepage = swap_writepage,
31 .set_page_dirty = __set_page_dirty_nobuffers,
32 .migratepage = migrate_page,
33};
34
35static struct backing_dev_info swap_backing_dev_info = {
36 .name = "swap",
37 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
38};
39
40struct address_space swapper_space = {
41 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
42 .tree_lock = __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock),
43 .a_ops = &swap_aops,
44 .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
45 .backing_dev_info = &swap_backing_dev_info,
46};
47
48#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
49
50static struct {
51 unsigned long add_total;
52 unsigned long del_total;
53 unsigned long find_success;
54 unsigned long find_total;
55} swap_cache_info;
56
57void show_swap_cache_info(void)
58{
59 printk("%lu pages in swap cache\n", total_swapcache_pages);
60 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
61 swap_cache_info.add_total, swap_cache_info.del_total,
62 swap_cache_info.find_success, swap_cache_info.find_total);
63 printk("Free swap = %ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10));
64 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
65}
66
67/*
68 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
69 * but sets SwapCache flag and private instead of mapping and index.
70 */
71static int __add_to_swap_cache(struct page *page, swp_entry_t entry)
72{
73 int error;
74
75 VM_BUG_ON(!PageLocked(page));
76 VM_BUG_ON(PageSwapCache(page));
77 VM_BUG_ON(!PageSwapBacked(page));
78
79 page_cache_get(page);
80 SetPageSwapCache(page);
81 set_page_private(page, entry.val);
82
83 spin_lock_irq(&swapper_space.tree_lock);
84 error = radix_tree_insert(&swapper_space.page_tree, entry.val, page);
85 if (likely(!error)) {
86 total_swapcache_pages++;
87 __inc_zone_page_state(page, NR_FILE_PAGES);
88 INC_CACHE_INFO(add_total);
89 }
90 spin_unlock_irq(&swapper_space.tree_lock);
91
92 if (unlikely(error)) {
93 /*
94 * Only the context which have set SWAP_HAS_CACHE flag
95 * would call add_to_swap_cache().
96 * So add_to_swap_cache() doesn't returns -EEXIST.
97 */
98 VM_BUG_ON(error == -EEXIST);
99 set_page_private(page, 0UL);
100 ClearPageSwapCache(page);
101 page_cache_release(page);
102 }
103
104 return error;
105}
106
107
108int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
109{
110 int error;
111
112 error = radix_tree_preload(gfp_mask);
113 if (!error) {
114 error = __add_to_swap_cache(page, entry);
115 radix_tree_preload_end();
116 }
117 return error;
118}
119
120/*
121 * This must be called only on pages that have
122 * been verified to be in the swap cache.
123 */
124void __delete_from_swap_cache(struct page *page)
125{
126 VM_BUG_ON(!PageLocked(page));
127 VM_BUG_ON(!PageSwapCache(page));
128 VM_BUG_ON(PageWriteback(page));
129
130 radix_tree_delete(&swapper_space.page_tree, page_private(page));
131 set_page_private(page, 0);
132 ClearPageSwapCache(page);
133 total_swapcache_pages--;
134 __dec_zone_page_state(page, NR_FILE_PAGES);
135 INC_CACHE_INFO(del_total);
136}
137
138/**
139 * add_to_swap - allocate swap space for a page
140 * @page: page we want to move to swap
141 *
142 * Allocate swap space for the page and add the page to the
143 * swap cache. Caller needs to hold the page lock.
144 */
145int add_to_swap(struct page *page)
146{
147 swp_entry_t entry;
148 int err;
149
150 VM_BUG_ON(!PageLocked(page));
151 VM_BUG_ON(!PageUptodate(page));
152
153 entry = get_swap_page();
154 if (!entry.val)
155 return 0;
156
157 if (unlikely(PageTransHuge(page)))
158 if (unlikely(split_huge_page(page))) {
159 swapcache_free(entry, NULL);
160 return 0;
161 }
162
163 /*
164 * Radix-tree node allocations from PF_MEMALLOC contexts could
165 * completely exhaust the page allocator. __GFP_NOMEMALLOC
166 * stops emergency reserves from being allocated.
167 *
168 * TODO: this could cause a theoretical memory reclaim
169 * deadlock in the swap out path.
170 */
171 /*
172 * Add it to the swap cache and mark it dirty
173 */
174 err = add_to_swap_cache(page, entry,
175 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
176
177 if (!err) { /* Success */
178 SetPageDirty(page);
179 return 1;
180 } else { /* -ENOMEM radix-tree allocation failure */
181 /*
182 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
183 * clear SWAP_HAS_CACHE flag.
184 */
185 swapcache_free(entry, NULL);
186 return 0;
187 }
188}
189
190/*
191 * This must be called only on pages that have
192 * been verified to be in the swap cache and locked.
193 * It will never put the page into the free list,
194 * the caller has a reference on the page.
195 */
196void delete_from_swap_cache(struct page *page)
197{
198 swp_entry_t entry;
199
200 entry.val = page_private(page);
201
202 spin_lock_irq(&swapper_space.tree_lock);
203 __delete_from_swap_cache(page);
204 spin_unlock_irq(&swapper_space.tree_lock);
205
206 swapcache_free(entry, page);
207 page_cache_release(page);
208}
209
210/*
211 * If we are the only user, then try to free up the swap cache.
212 *
213 * Its ok to check for PageSwapCache without the page lock
214 * here because we are going to recheck again inside
215 * try_to_free_swap() _with_ the lock.
216 * - Marcelo
217 */
218static inline void free_swap_cache(struct page *page)
219{
220 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
221 try_to_free_swap(page);
222 unlock_page(page);
223 }
224}
225
226/*
227 * Perform a free_page(), also freeing any swap cache associated with
228 * this page if it is the last user of the page.
229 */
230void free_page_and_swap_cache(struct page *page)
231{
232 free_swap_cache(page);
233 page_cache_release(page);
234}
235
236/*
237 * Passed an array of pages, drop them all from swapcache and then release
238 * them. They are removed from the LRU and freed if this is their last use.
239 */
240void free_pages_and_swap_cache(struct page **pages, int nr)
241{
242 struct page **pagep = pages;
243
244 lru_add_drain();
245 while (nr) {
246 int todo = min(nr, PAGEVEC_SIZE);
247 int i;
248
249 for (i = 0; i < todo; i++)
250 free_swap_cache(pagep[i]);
251 release_pages(pagep, todo, 0);
252 pagep += todo;
253 nr -= todo;
254 }
255}
256
257/*
258 * Lookup a swap entry in the swap cache. A found page will be returned
259 * unlocked and with its refcount incremented - we rely on the kernel
260 * lock getting page table operations atomic even if we drop the page
261 * lock before returning.
262 */
263struct page * lookup_swap_cache(swp_entry_t entry)
264{
265 struct page *page;
266
267 page = find_get_page(&swapper_space, entry.val);
268
269 if (page)
270 INC_CACHE_INFO(find_success);
271
272 INC_CACHE_INFO(find_total);
273 return page;
274}
275
276/*
277 * Locate a page of swap in physical memory, reserving swap cache space
278 * and reading the disk if it is not already cached.
279 * A failure return means that either the page allocation failed or that
280 * the swap entry is no longer in use.
281 */
282struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
283 struct vm_area_struct *vma, unsigned long addr)
284{
285 struct page *found_page, *new_page = NULL;
286 int err;
287
288 do {
289 /*
290 * First check the swap cache. Since this is normally
291 * called after lookup_swap_cache() failed, re-calling
292 * that would confuse statistics.
293 */
294 found_page = find_get_page(&swapper_space, entry.val);
295 if (found_page)
296 break;
297
298 /*
299 * Get a new page to read into from swap.
300 */
301 if (!new_page) {
302 new_page = alloc_page_vma(gfp_mask, vma, addr);
303 if (!new_page)
304 break; /* Out of memory */
305 }
306
307 /*
308 * call radix_tree_preload() while we can wait.
309 */
310 err = radix_tree_preload(gfp_mask & GFP_KERNEL);
311 if (err)
312 break;
313
314 /*
315 * Swap entry may have been freed since our caller observed it.
316 */
317 err = swapcache_prepare(entry);
318 if (err == -EEXIST) { /* seems racy */
319 radix_tree_preload_end();
320 continue;
321 }
322 if (err) { /* swp entry is obsolete ? */
323 radix_tree_preload_end();
324 break;
325 }
326
327 /* May fail (-ENOMEM) if radix-tree node allocation failed. */
328 __set_page_locked(new_page);
329 SetPageSwapBacked(new_page);
330 err = __add_to_swap_cache(new_page, entry);
331 if (likely(!err)) {
332 radix_tree_preload_end();
333 /*
334 * Initiate read into locked page and return.
335 */
336 lru_cache_add_anon(new_page);
337 swap_readpage(new_page);
338 return new_page;
339 }
340 radix_tree_preload_end();
341 ClearPageSwapBacked(new_page);
342 __clear_page_locked(new_page);
343 /*
344 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
345 * clear SWAP_HAS_CACHE flag.
346 */
347 swapcache_free(entry, NULL);
348 } while (err != -ENOMEM);
349
350 if (new_page)
351 page_cache_release(new_page);
352 return found_page;
353}
354
355/**
356 * swapin_readahead - swap in pages in hope we need them soon
357 * @entry: swap entry of this memory
358 * @gfp_mask: memory allocation flags
359 * @vma: user vma this address belongs to
360 * @addr: target address for mempolicy
361 *
362 * Returns the struct page for entry and addr, after queueing swapin.
363 *
364 * Primitive swap readahead code. We simply read an aligned block of
365 * (1 << page_cluster) entries in the swap area. This method is chosen
366 * because it doesn't cost us any seek time. We also make sure to queue
367 * the 'original' request together with the readahead ones...
368 *
369 * This has been extended to use the NUMA policies from the mm triggering
370 * the readahead.
371 *
372 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
373 */
374struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
375 struct vm_area_struct *vma, unsigned long addr)
376{
377 int nr_pages;
378 struct page *page;
379 unsigned long offset;
380 unsigned long end_offset;
381
382 /*
383 * Get starting offset for readaround, and number of pages to read.
384 * Adjust starting address by readbehind (for NUMA interleave case)?
385 * No, it's very unlikely that swap layout would follow vma layout,
386 * more likely that neighbouring swap pages came from the same node:
387 * so use the same "addr" to choose the same node for each swap read.
388 */
389 nr_pages = valid_swaphandles(entry, &offset);
390 for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
391 /* Ok, do the async read-ahead now */
392 page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
393 gfp_mask, vma, addr);
394 if (!page)
395 break;
396 page_cache_release(page);
397 }
398 lru_add_drain(); /* Push any new pages onto the LRU now */
399 return read_swap_cache_async(entry, gfp_mask, vma, addr);
400}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/mm/swap_state.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie
7 *
8 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
9 */
10#include <linux/mm.h>
11#include <linux/gfp.h>
12#include <linux/kernel_stat.h>
13#include <linux/mempolicy.h>
14#include <linux/swap.h>
15#include <linux/swapops.h>
16#include <linux/init.h>
17#include <linux/pagemap.h>
18#include <linux/backing-dev.h>
19#include <linux/blkdev.h>
20#include <linux/migrate.h>
21#include <linux/vmalloc.h>
22#include <linux/swap_slots.h>
23#include <linux/huge_mm.h>
24#include <linux/shmem_fs.h>
25#include "internal.h"
26#include "swap.h"
27
28/*
29 * swapper_space is a fiction, retained to simplify the path through
30 * vmscan's shrink_page_list.
31 */
32static const struct address_space_operations swap_aops = {
33 .writepage = swap_writepage,
34 .dirty_folio = noop_dirty_folio,
35#ifdef CONFIG_MIGRATION
36 .migrate_folio = migrate_folio,
37#endif
38};
39
40struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
41static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
42static bool enable_vma_readahead __read_mostly = true;
43
44#define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
45#define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
46#define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
47#define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
48
49#define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK)
50#define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
51#define SWAP_RA_ADDR(v) ((v) & PAGE_MASK)
52
53#define SWAP_RA_VAL(addr, win, hits) \
54 (((addr) & PAGE_MASK) | \
55 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \
56 ((hits) & SWAP_RA_HITS_MASK))
57
58/* Initial readahead hits is 4 to start up with a small window */
59#define GET_SWAP_RA_VAL(vma) \
60 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
61
62static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
63
64void show_swap_cache_info(void)
65{
66 printk("%lu pages in swap cache\n", total_swapcache_pages());
67 printk("Free swap = %ldkB\n", K(get_nr_swap_pages()));
68 printk("Total swap = %lukB\n", K(total_swap_pages));
69}
70
71void *get_shadow_from_swap_cache(swp_entry_t entry)
72{
73 struct address_space *address_space = swap_address_space(entry);
74 pgoff_t idx = swp_offset(entry);
75 struct page *page;
76
77 page = xa_load(&address_space->i_pages, idx);
78 if (xa_is_value(page))
79 return page;
80 return NULL;
81}
82
83/*
84 * add_to_swap_cache resembles filemap_add_folio on swapper_space,
85 * but sets SwapCache flag and private instead of mapping and index.
86 */
87int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
88 gfp_t gfp, void **shadowp)
89{
90 struct address_space *address_space = swap_address_space(entry);
91 pgoff_t idx = swp_offset(entry);
92 XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio));
93 unsigned long i, nr = folio_nr_pages(folio);
94 void *old;
95
96 xas_set_update(&xas, workingset_update_node);
97
98 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
99 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
100 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
101
102 folio_ref_add(folio, nr);
103 folio_set_swapcache(folio);
104 folio->swap = entry;
105
106 do {
107 xas_lock_irq(&xas);
108 xas_create_range(&xas);
109 if (xas_error(&xas))
110 goto unlock;
111 for (i = 0; i < nr; i++) {
112 VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio);
113 if (shadowp) {
114 old = xas_load(&xas);
115 if (xa_is_value(old))
116 *shadowp = old;
117 }
118 xas_store(&xas, folio);
119 xas_next(&xas);
120 }
121 address_space->nrpages += nr;
122 __node_stat_mod_folio(folio, NR_FILE_PAGES, nr);
123 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr);
124unlock:
125 xas_unlock_irq(&xas);
126 } while (xas_nomem(&xas, gfp));
127
128 if (!xas_error(&xas))
129 return 0;
130
131 folio_clear_swapcache(folio);
132 folio_ref_sub(folio, nr);
133 return xas_error(&xas);
134}
135
136/*
137 * This must be called only on folios that have
138 * been verified to be in the swap cache.
139 */
140void __delete_from_swap_cache(struct folio *folio,
141 swp_entry_t entry, void *shadow)
142{
143 struct address_space *address_space = swap_address_space(entry);
144 int i;
145 long nr = folio_nr_pages(folio);
146 pgoff_t idx = swp_offset(entry);
147 XA_STATE(xas, &address_space->i_pages, idx);
148
149 xas_set_update(&xas, workingset_update_node);
150
151 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
152 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
153 VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
154
155 for (i = 0; i < nr; i++) {
156 void *entry = xas_store(&xas, shadow);
157 VM_BUG_ON_PAGE(entry != folio, entry);
158 xas_next(&xas);
159 }
160 folio->swap.val = 0;
161 folio_clear_swapcache(folio);
162 address_space->nrpages -= nr;
163 __node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
164 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
165}
166
167/**
168 * add_to_swap - allocate swap space for a folio
169 * @folio: folio we want to move to swap
170 *
171 * Allocate swap space for the folio and add the folio to the
172 * swap cache.
173 *
174 * Context: Caller needs to hold the folio lock.
175 * Return: Whether the folio was added to the swap cache.
176 */
177bool add_to_swap(struct folio *folio)
178{
179 swp_entry_t entry;
180 int err;
181
182 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
183 VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
184
185 entry = folio_alloc_swap(folio);
186 if (!entry.val)
187 return false;
188
189 /*
190 * XArray node allocations from PF_MEMALLOC contexts could
191 * completely exhaust the page allocator. __GFP_NOMEMALLOC
192 * stops emergency reserves from being allocated.
193 *
194 * TODO: this could cause a theoretical memory reclaim
195 * deadlock in the swap out path.
196 */
197 /*
198 * Add it to the swap cache.
199 */
200 err = add_to_swap_cache(folio, entry,
201 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
202 if (err)
203 /*
204 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
205 * clear SWAP_HAS_CACHE flag.
206 */
207 goto fail;
208 /*
209 * Normally the folio will be dirtied in unmap because its
210 * pte should be dirty. A special case is MADV_FREE page. The
211 * page's pte could have dirty bit cleared but the folio's
212 * SwapBacked flag is still set because clearing the dirty bit
213 * and SwapBacked flag has no lock protected. For such folio,
214 * unmap will not set dirty bit for it, so folio reclaim will
215 * not write the folio out. This can cause data corruption when
216 * the folio is swapped in later. Always setting the dirty flag
217 * for the folio solves the problem.
218 */
219 folio_mark_dirty(folio);
220
221 return true;
222
223fail:
224 put_swap_folio(folio, entry);
225 return false;
226}
227
228/*
229 * This must be called only on folios that have
230 * been verified to be in the swap cache and locked.
231 * It will never put the folio into the free list,
232 * the caller has a reference on the folio.
233 */
234void delete_from_swap_cache(struct folio *folio)
235{
236 swp_entry_t entry = folio->swap;
237 struct address_space *address_space = swap_address_space(entry);
238
239 xa_lock_irq(&address_space->i_pages);
240 __delete_from_swap_cache(folio, entry, NULL);
241 xa_unlock_irq(&address_space->i_pages);
242
243 put_swap_folio(folio, entry);
244 folio_ref_sub(folio, folio_nr_pages(folio));
245}
246
247void clear_shadow_from_swap_cache(int type, unsigned long begin,
248 unsigned long end)
249{
250 unsigned long curr = begin;
251 void *old;
252
253 for (;;) {
254 swp_entry_t entry = swp_entry(type, curr);
255 struct address_space *address_space = swap_address_space(entry);
256 XA_STATE(xas, &address_space->i_pages, curr);
257
258 xas_set_update(&xas, workingset_update_node);
259
260 xa_lock_irq(&address_space->i_pages);
261 xas_for_each(&xas, old, end) {
262 if (!xa_is_value(old))
263 continue;
264 xas_store(&xas, NULL);
265 }
266 xa_unlock_irq(&address_space->i_pages);
267
268 /* search the next swapcache until we meet end */
269 curr >>= SWAP_ADDRESS_SPACE_SHIFT;
270 curr++;
271 curr <<= SWAP_ADDRESS_SPACE_SHIFT;
272 if (curr > end)
273 break;
274 }
275}
276
277/*
278 * If we are the only user, then try to free up the swap cache.
279 *
280 * Its ok to check the swapcache flag without the folio lock
281 * here because we are going to recheck again inside
282 * folio_free_swap() _with_ the lock.
283 * - Marcelo
284 */
285void free_swap_cache(struct page *page)
286{
287 struct folio *folio = page_folio(page);
288
289 if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
290 folio_trylock(folio)) {
291 folio_free_swap(folio);
292 folio_unlock(folio);
293 }
294}
295
296/*
297 * Perform a free_page(), also freeing any swap cache associated with
298 * this page if it is the last user of the page.
299 */
300void free_page_and_swap_cache(struct page *page)
301{
302 free_swap_cache(page);
303 if (!is_huge_zero_page(page))
304 put_page(page);
305}
306
307/*
308 * Passed an array of pages, drop them all from swapcache and then release
309 * them. They are removed from the LRU and freed if this is their last use.
310 */
311void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
312{
313 lru_add_drain();
314 for (int i = 0; i < nr; i++)
315 free_swap_cache(encoded_page_ptr(pages[i]));
316 release_pages(pages, nr);
317}
318
319static inline bool swap_use_vma_readahead(void)
320{
321 return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
322}
323
324/*
325 * Lookup a swap entry in the swap cache. A found folio will be returned
326 * unlocked and with its refcount incremented - we rely on the kernel
327 * lock getting page table operations atomic even if we drop the folio
328 * lock before returning.
329 *
330 * Caller must lock the swap device or hold a reference to keep it valid.
331 */
332struct folio *swap_cache_get_folio(swp_entry_t entry,
333 struct vm_area_struct *vma, unsigned long addr)
334{
335 struct folio *folio;
336
337 folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry));
338 if (!IS_ERR(folio)) {
339 bool vma_ra = swap_use_vma_readahead();
340 bool readahead;
341
342 /*
343 * At the moment, we don't support PG_readahead for anon THP
344 * so let's bail out rather than confusing the readahead stat.
345 */
346 if (unlikely(folio_test_large(folio)))
347 return folio;
348
349 readahead = folio_test_clear_readahead(folio);
350 if (vma && vma_ra) {
351 unsigned long ra_val;
352 int win, hits;
353
354 ra_val = GET_SWAP_RA_VAL(vma);
355 win = SWAP_RA_WIN(ra_val);
356 hits = SWAP_RA_HITS(ra_val);
357 if (readahead)
358 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
359 atomic_long_set(&vma->swap_readahead_info,
360 SWAP_RA_VAL(addr, win, hits));
361 }
362
363 if (readahead) {
364 count_vm_event(SWAP_RA_HIT);
365 if (!vma || !vma_ra)
366 atomic_inc(&swapin_readahead_hits);
367 }
368 } else {
369 folio = NULL;
370 }
371
372 return folio;
373}
374
375/**
376 * filemap_get_incore_folio - Find and get a folio from the page or swap caches.
377 * @mapping: The address_space to search.
378 * @index: The page cache index.
379 *
380 * This differs from filemap_get_folio() in that it will also look for the
381 * folio in the swap cache.
382 *
383 * Return: The found folio or %NULL.
384 */
385struct folio *filemap_get_incore_folio(struct address_space *mapping,
386 pgoff_t index)
387{
388 swp_entry_t swp;
389 struct swap_info_struct *si;
390 struct folio *folio = filemap_get_entry(mapping, index);
391
392 if (!folio)
393 return ERR_PTR(-ENOENT);
394 if (!xa_is_value(folio))
395 return folio;
396 if (!shmem_mapping(mapping))
397 return ERR_PTR(-ENOENT);
398
399 swp = radix_to_swp_entry(folio);
400 /* There might be swapin error entries in shmem mapping. */
401 if (non_swap_entry(swp))
402 return ERR_PTR(-ENOENT);
403 /* Prevent swapoff from happening to us */
404 si = get_swap_device(swp);
405 if (!si)
406 return ERR_PTR(-ENOENT);
407 index = swp_offset(swp);
408 folio = filemap_get_folio(swap_address_space(swp), index);
409 put_swap_device(si);
410 return folio;
411}
412
413struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
414 struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
415 bool skip_if_exists)
416{
417 struct swap_info_struct *si;
418 struct folio *folio;
419 void *shadow = NULL;
420
421 *new_page_allocated = false;
422 si = get_swap_device(entry);
423 if (!si)
424 return NULL;
425
426 for (;;) {
427 int err;
428 /*
429 * First check the swap cache. Since this is normally
430 * called after swap_cache_get_folio() failed, re-calling
431 * that would confuse statistics.
432 */
433 folio = filemap_get_folio(swap_address_space(entry),
434 swp_offset(entry));
435 if (!IS_ERR(folio))
436 goto got_folio;
437
438 /*
439 * Just skip read ahead for unused swap slot.
440 * During swap_off when swap_slot_cache is disabled,
441 * we have to handle the race between putting
442 * swap entry in swap cache and marking swap slot
443 * as SWAP_HAS_CACHE. That's done in later part of code or
444 * else swap_off will be aborted if we return NULL.
445 */
446 if (!swap_swapcount(si, entry) && swap_slot_cache_enabled)
447 goto fail_put_swap;
448
449 /*
450 * Get a new folio to read into from swap. Allocate it now,
451 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
452 * cause any racers to loop around until we add it to cache.
453 */
454 folio = (struct folio *)alloc_pages_mpol(gfp_mask, 0,
455 mpol, ilx, numa_node_id());
456 if (!folio)
457 goto fail_put_swap;
458
459 /*
460 * Swap entry may have been freed since our caller observed it.
461 */
462 err = swapcache_prepare(entry);
463 if (!err)
464 break;
465
466 folio_put(folio);
467 if (err != -EEXIST)
468 goto fail_put_swap;
469
470 /*
471 * Protect against a recursive call to __read_swap_cache_async()
472 * on the same entry waiting forever here because SWAP_HAS_CACHE
473 * is set but the folio is not the swap cache yet. This can
474 * happen today if mem_cgroup_swapin_charge_folio() below
475 * triggers reclaim through zswap, which may call
476 * __read_swap_cache_async() in the writeback path.
477 */
478 if (skip_if_exists)
479 goto fail_put_swap;
480
481 /*
482 * We might race against __delete_from_swap_cache(), and
483 * stumble across a swap_map entry whose SWAP_HAS_CACHE
484 * has not yet been cleared. Or race against another
485 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
486 * in swap_map, but not yet added its folio to swap cache.
487 */
488 schedule_timeout_uninterruptible(1);
489 }
490
491 /*
492 * The swap entry is ours to swap in. Prepare the new folio.
493 */
494
495 __folio_set_locked(folio);
496 __folio_set_swapbacked(folio);
497
498 if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
499 goto fail_unlock;
500
501 /* May fail (-ENOMEM) if XArray node allocation failed. */
502 if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
503 goto fail_unlock;
504
505 mem_cgroup_swapin_uncharge_swap(entry);
506
507 if (shadow)
508 workingset_refault(folio, shadow);
509
510 /* Caller will initiate read into locked folio */
511 folio_add_lru(folio);
512 *new_page_allocated = true;
513got_folio:
514 put_swap_device(si);
515 return folio;
516
517fail_unlock:
518 put_swap_folio(folio, entry);
519 folio_unlock(folio);
520 folio_put(folio);
521fail_put_swap:
522 put_swap_device(si);
523 return NULL;
524}
525
526/*
527 * Locate a page of swap in physical memory, reserving swap cache space
528 * and reading the disk if it is not already cached.
529 * A failure return means that either the page allocation failed or that
530 * the swap entry is no longer in use.
531 *
532 * get/put_swap_device() aren't needed to call this function, because
533 * __read_swap_cache_async() call them and swap_read_folio() holds the
534 * swap cache folio lock.
535 */
536struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
537 struct vm_area_struct *vma, unsigned long addr,
538 struct swap_iocb **plug)
539{
540 bool page_allocated;
541 struct mempolicy *mpol;
542 pgoff_t ilx;
543 struct folio *folio;
544
545 mpol = get_vma_policy(vma, addr, 0, &ilx);
546 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
547 &page_allocated, false);
548 mpol_cond_put(mpol);
549
550 if (page_allocated)
551 swap_read_folio(folio, false, plug);
552 return folio;
553}
554
555static unsigned int __swapin_nr_pages(unsigned long prev_offset,
556 unsigned long offset,
557 int hits,
558 int max_pages,
559 int prev_win)
560{
561 unsigned int pages, last_ra;
562
563 /*
564 * This heuristic has been found to work well on both sequential and
565 * random loads, swapping to hard disk or to SSD: please don't ask
566 * what the "+ 2" means, it just happens to work well, that's all.
567 */
568 pages = hits + 2;
569 if (pages == 2) {
570 /*
571 * We can have no readahead hits to judge by: but must not get
572 * stuck here forever, so check for an adjacent offset instead
573 * (and don't even bother to check whether swap type is same).
574 */
575 if (offset != prev_offset + 1 && offset != prev_offset - 1)
576 pages = 1;
577 } else {
578 unsigned int roundup = 4;
579 while (roundup < pages)
580 roundup <<= 1;
581 pages = roundup;
582 }
583
584 if (pages > max_pages)
585 pages = max_pages;
586
587 /* Don't shrink readahead too fast */
588 last_ra = prev_win / 2;
589 if (pages < last_ra)
590 pages = last_ra;
591
592 return pages;
593}
594
595static unsigned long swapin_nr_pages(unsigned long offset)
596{
597 static unsigned long prev_offset;
598 unsigned int hits, pages, max_pages;
599 static atomic_t last_readahead_pages;
600
601 max_pages = 1 << READ_ONCE(page_cluster);
602 if (max_pages <= 1)
603 return 1;
604
605 hits = atomic_xchg(&swapin_readahead_hits, 0);
606 pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
607 max_pages,
608 atomic_read(&last_readahead_pages));
609 if (!hits)
610 WRITE_ONCE(prev_offset, offset);
611 atomic_set(&last_readahead_pages, pages);
612
613 return pages;
614}
615
616/**
617 * swap_cluster_readahead - swap in pages in hope we need them soon
618 * @entry: swap entry of this memory
619 * @gfp_mask: memory allocation flags
620 * @mpol: NUMA memory allocation policy to be applied
621 * @ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
622 *
623 * Returns the struct folio for entry and addr, after queueing swapin.
624 *
625 * Primitive swap readahead code. We simply read an aligned block of
626 * (1 << page_cluster) entries in the swap area. This method is chosen
627 * because it doesn't cost us any seek time. We also make sure to queue
628 * the 'original' request together with the readahead ones...
629 *
630 * Note: it is intentional that the same NUMA policy and interleave index
631 * are used for every page of the readahead: neighbouring pages on swap
632 * are fairly likely to have been swapped out from the same node.
633 */
634struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
635 struct mempolicy *mpol, pgoff_t ilx)
636{
637 struct folio *folio;
638 unsigned long entry_offset = swp_offset(entry);
639 unsigned long offset = entry_offset;
640 unsigned long start_offset, end_offset;
641 unsigned long mask;
642 struct swap_info_struct *si = swp_swap_info(entry);
643 struct blk_plug plug;
644 struct swap_iocb *splug = NULL;
645 bool page_allocated;
646
647 mask = swapin_nr_pages(offset) - 1;
648 if (!mask)
649 goto skip;
650
651 /* Read a page_cluster sized and aligned cluster around offset. */
652 start_offset = offset & ~mask;
653 end_offset = offset | mask;
654 if (!start_offset) /* First page is swap header. */
655 start_offset++;
656 if (end_offset >= si->max)
657 end_offset = si->max - 1;
658
659 blk_start_plug(&plug);
660 for (offset = start_offset; offset <= end_offset ; offset++) {
661 /* Ok, do the async read-ahead now */
662 folio = __read_swap_cache_async(
663 swp_entry(swp_type(entry), offset),
664 gfp_mask, mpol, ilx, &page_allocated, false);
665 if (!folio)
666 continue;
667 if (page_allocated) {
668 swap_read_folio(folio, false, &splug);
669 if (offset != entry_offset) {
670 folio_set_readahead(folio);
671 count_vm_event(SWAP_RA);
672 }
673 }
674 folio_put(folio);
675 }
676 blk_finish_plug(&plug);
677 swap_read_unplug(splug);
678 lru_add_drain(); /* Push any new pages onto the LRU now */
679skip:
680 /* The page was likely read above, so no need for plugging here */
681 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
682 &page_allocated, false);
683 if (unlikely(page_allocated)) {
684 zswap_folio_swapin(folio);
685 swap_read_folio(folio, false, NULL);
686 }
687 return folio;
688}
689
690int init_swap_address_space(unsigned int type, unsigned long nr_pages)
691{
692 struct address_space *spaces, *space;
693 unsigned int i, nr;
694
695 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
696 spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
697 if (!spaces)
698 return -ENOMEM;
699 for (i = 0; i < nr; i++) {
700 space = spaces + i;
701 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
702 atomic_set(&space->i_mmap_writable, 0);
703 space->a_ops = &swap_aops;
704 /* swap cache doesn't use writeback related tags */
705 mapping_set_no_writeback_tags(space);
706 }
707 nr_swapper_spaces[type] = nr;
708 swapper_spaces[type] = spaces;
709
710 return 0;
711}
712
713void exit_swap_address_space(unsigned int type)
714{
715 int i;
716 struct address_space *spaces = swapper_spaces[type];
717
718 for (i = 0; i < nr_swapper_spaces[type]; i++)
719 VM_WARN_ON_ONCE(!mapping_empty(&spaces[i]));
720 kvfree(spaces);
721 nr_swapper_spaces[type] = 0;
722 swapper_spaces[type] = NULL;
723}
724
725#define SWAP_RA_ORDER_CEILING 5
726
727struct vma_swap_readahead {
728 unsigned short win;
729 unsigned short offset;
730 unsigned short nr_pte;
731};
732
733static void swap_ra_info(struct vm_fault *vmf,
734 struct vma_swap_readahead *ra_info)
735{
736 struct vm_area_struct *vma = vmf->vma;
737 unsigned long ra_val;
738 unsigned long faddr, pfn, fpfn, lpfn, rpfn;
739 unsigned long start, end;
740 unsigned int max_win, hits, prev_win, win;
741
742 max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
743 SWAP_RA_ORDER_CEILING);
744 if (max_win == 1) {
745 ra_info->win = 1;
746 return;
747 }
748
749 faddr = vmf->address;
750 fpfn = PFN_DOWN(faddr);
751 ra_val = GET_SWAP_RA_VAL(vma);
752 pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
753 prev_win = SWAP_RA_WIN(ra_val);
754 hits = SWAP_RA_HITS(ra_val);
755 ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
756 max_win, prev_win);
757 atomic_long_set(&vma->swap_readahead_info,
758 SWAP_RA_VAL(faddr, win, 0));
759 if (win == 1)
760 return;
761
762 if (fpfn == pfn + 1) {
763 lpfn = fpfn;
764 rpfn = fpfn + win;
765 } else if (pfn == fpfn + 1) {
766 lpfn = fpfn - win + 1;
767 rpfn = fpfn + 1;
768 } else {
769 unsigned int left = (win - 1) / 2;
770
771 lpfn = fpfn - left;
772 rpfn = fpfn + win - left;
773 }
774 start = max3(lpfn, PFN_DOWN(vma->vm_start),
775 PFN_DOWN(faddr & PMD_MASK));
776 end = min3(rpfn, PFN_DOWN(vma->vm_end),
777 PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
778
779 ra_info->nr_pte = end - start;
780 ra_info->offset = fpfn - start;
781}
782
783/**
784 * swap_vma_readahead - swap in pages in hope we need them soon
785 * @targ_entry: swap entry of the targeted memory
786 * @gfp_mask: memory allocation flags
787 * @mpol: NUMA memory allocation policy to be applied
788 * @targ_ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
789 * @vmf: fault information
790 *
791 * Returns the struct folio for entry and addr, after queueing swapin.
792 *
793 * Primitive swap readahead code. We simply read in a few pages whose
794 * virtual addresses are around the fault address in the same vma.
795 *
796 * Caller must hold read mmap_lock if vmf->vma is not NULL.
797 *
798 */
799static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
800 struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf)
801{
802 struct blk_plug plug;
803 struct swap_iocb *splug = NULL;
804 struct folio *folio;
805 pte_t *pte = NULL, pentry;
806 unsigned long addr;
807 swp_entry_t entry;
808 pgoff_t ilx;
809 unsigned int i;
810 bool page_allocated;
811 struct vma_swap_readahead ra_info = {
812 .win = 1,
813 };
814
815 swap_ra_info(vmf, &ra_info);
816 if (ra_info.win == 1)
817 goto skip;
818
819 addr = vmf->address - (ra_info.offset * PAGE_SIZE);
820 ilx = targ_ilx - ra_info.offset;
821
822 blk_start_plug(&plug);
823 for (i = 0; i < ra_info.nr_pte; i++, ilx++, addr += PAGE_SIZE) {
824 if (!pte++) {
825 pte = pte_offset_map(vmf->pmd, addr);
826 if (!pte)
827 break;
828 }
829 pentry = ptep_get_lockless(pte);
830 if (!is_swap_pte(pentry))
831 continue;
832 entry = pte_to_swp_entry(pentry);
833 if (unlikely(non_swap_entry(entry)))
834 continue;
835 pte_unmap(pte);
836 pte = NULL;
837 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
838 &page_allocated, false);
839 if (!folio)
840 continue;
841 if (page_allocated) {
842 swap_read_folio(folio, false, &splug);
843 if (i != ra_info.offset) {
844 folio_set_readahead(folio);
845 count_vm_event(SWAP_RA);
846 }
847 }
848 folio_put(folio);
849 }
850 if (pte)
851 pte_unmap(pte);
852 blk_finish_plug(&plug);
853 swap_read_unplug(splug);
854 lru_add_drain();
855skip:
856 /* The folio was likely read above, so no need for plugging here */
857 folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
858 &page_allocated, false);
859 if (unlikely(page_allocated)) {
860 zswap_folio_swapin(folio);
861 swap_read_folio(folio, false, NULL);
862 }
863 return folio;
864}
865
866/**
867 * swapin_readahead - swap in pages in hope we need them soon
868 * @entry: swap entry of this memory
869 * @gfp_mask: memory allocation flags
870 * @vmf: fault information
871 *
872 * Returns the struct page for entry and addr, after queueing swapin.
873 *
874 * It's a main entry function for swap readahead. By the configuration,
875 * it will read ahead blocks by cluster-based(ie, physical disk based)
876 * or vma-based(ie, virtual address based on faulty address) readahead.
877 */
878struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
879 struct vm_fault *vmf)
880{
881 struct mempolicy *mpol;
882 pgoff_t ilx;
883 struct folio *folio;
884
885 mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx);
886 folio = swap_use_vma_readahead() ?
887 swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf) :
888 swap_cluster_readahead(entry, gfp_mask, mpol, ilx);
889 mpol_cond_put(mpol);
890
891 if (!folio)
892 return NULL;
893 return folio_file_page(folio, swp_offset(entry));
894}
895
896#ifdef CONFIG_SYSFS
897static ssize_t vma_ra_enabled_show(struct kobject *kobj,
898 struct kobj_attribute *attr, char *buf)
899{
900 return sysfs_emit(buf, "%s\n",
901 enable_vma_readahead ? "true" : "false");
902}
903static ssize_t vma_ra_enabled_store(struct kobject *kobj,
904 struct kobj_attribute *attr,
905 const char *buf, size_t count)
906{
907 ssize_t ret;
908
909 ret = kstrtobool(buf, &enable_vma_readahead);
910 if (ret)
911 return ret;
912
913 return count;
914}
915static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled);
916
917static struct attribute *swap_attrs[] = {
918 &vma_ra_enabled_attr.attr,
919 NULL,
920};
921
922static const struct attribute_group swap_attr_group = {
923 .attrs = swap_attrs,
924};
925
926static int __init swap_init_sysfs(void)
927{
928 int err;
929 struct kobject *swap_kobj;
930
931 swap_kobj = kobject_create_and_add("swap", mm_kobj);
932 if (!swap_kobj) {
933 pr_err("failed to create swap kobject\n");
934 return -ENOMEM;
935 }
936 err = sysfs_create_group(swap_kobj, &swap_attr_group);
937 if (err) {
938 pr_err("failed to register swap group\n");
939 goto delete_obj;
940 }
941 return 0;
942
943delete_obj:
944 kobject_put(swap_kobj);
945 return err;
946}
947subsys_initcall(swap_init_sysfs);
948#endif