Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/mm/swap_state.c
  4 *
  5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  6 *  Swap reorganised 29.12.95, Stephen Tweedie
  7 *
  8 *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
  9 */
 10#include <linux/mm.h>
 11#include <linux/gfp.h>
 12#include <linux/kernel_stat.h>
 
 13#include <linux/swap.h>
 14#include <linux/swapops.h>
 15#include <linux/init.h>
 16#include <linux/pagemap.h>
 17#include <linux/backing-dev.h>
 18#include <linux/blkdev.h>
 19#include <linux/pagevec.h>
 20#include <linux/migrate.h>
 21#include <linux/vmalloc.h>
 22#include <linux/swap_slots.h>
 23#include <linux/huge_mm.h>
 24#include <linux/shmem_fs.h>
 25#include "internal.h"
 
 26
 27/*
 28 * swapper_space is a fiction, retained to simplify the path through
 29 * vmscan's shrink_page_list.
 30 */
 31static const struct address_space_operations swap_aops = {
 32	.writepage	= swap_writepage,
 33	.set_page_dirty	= swap_set_page_dirty,
 34#ifdef CONFIG_MIGRATION
 35	.migratepage	= migrate_page,
 36#endif
 37};
 38
 39struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
 40static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
 41static bool enable_vma_readahead __read_mostly = true;
 42
 43#define SWAP_RA_WIN_SHIFT	(PAGE_SHIFT / 2)
 44#define SWAP_RA_HITS_MASK	((1UL << SWAP_RA_WIN_SHIFT) - 1)
 45#define SWAP_RA_HITS_MAX	SWAP_RA_HITS_MASK
 46#define SWAP_RA_WIN_MASK	(~PAGE_MASK & ~SWAP_RA_HITS_MASK)
 47
 48#define SWAP_RA_HITS(v)		((v) & SWAP_RA_HITS_MASK)
 49#define SWAP_RA_WIN(v)		(((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
 50#define SWAP_RA_ADDR(v)		((v) & PAGE_MASK)
 51
 52#define SWAP_RA_VAL(addr, win, hits)				\
 53	(((addr) & PAGE_MASK) |					\
 54	 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) |	\
 55	 ((hits) & SWAP_RA_HITS_MASK))
 56
 57/* Initial readahead hits is 4 to start up with a small window */
 58#define GET_SWAP_RA_VAL(vma)					\
 59	(atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
 60
 61#define INC_CACHE_INFO(x)	data_race(swap_cache_info.x++)
 62#define ADD_CACHE_INFO(x, nr)	data_race(swap_cache_info.x += (nr))
 63
 64static struct {
 65	unsigned long add_total;
 66	unsigned long del_total;
 67	unsigned long find_success;
 68	unsigned long find_total;
 69} swap_cache_info;
 70
 71static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
 72
 73void show_swap_cache_info(void)
 74{
 75	printk("%lu pages in swap cache\n", total_swapcache_pages());
 76	printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
 77		swap_cache_info.add_total, swap_cache_info.del_total,
 78		swap_cache_info.find_success, swap_cache_info.find_total);
 79	printk("Free swap  = %ldkB\n",
 80		get_nr_swap_pages() << (PAGE_SHIFT - 10));
 81	printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
 82}
 83
 84void *get_shadow_from_swap_cache(swp_entry_t entry)
 85{
 86	struct address_space *address_space = swap_address_space(entry);
 87	pgoff_t idx = swp_offset(entry);
 88	struct page *page;
 89
 90	page = xa_load(&address_space->i_pages, idx);
 91	if (xa_is_value(page))
 92		return page;
 93	return NULL;
 94}
 95
 96/*
 97 * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
 98 * but sets SwapCache flag and private instead of mapping and index.
 99 */
100int add_to_swap_cache(struct page *page, swp_entry_t entry,
101			gfp_t gfp, void **shadowp)
102{
103	struct address_space *address_space = swap_address_space(entry);
104	pgoff_t idx = swp_offset(entry);
105	XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
106	unsigned long i, nr = thp_nr_pages(page);
107	void *old;
108
109	VM_BUG_ON_PAGE(!PageLocked(page), page);
110	VM_BUG_ON_PAGE(PageSwapCache(page), page);
111	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
112
113	page_ref_add(page, nr);
114	SetPageSwapCache(page);
 
 
 
 
 
115
116	do {
117		xas_lock_irq(&xas);
118		xas_create_range(&xas);
119		if (xas_error(&xas))
120			goto unlock;
121		for (i = 0; i < nr; i++) {
122			VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
123			old = xas_load(&xas);
124			if (xa_is_value(old)) {
125				if (shadowp)
126					*shadowp = old;
127			}
128			set_page_private(page + i, entry.val + i);
129			xas_store(&xas, page);
130			xas_next(&xas);
131		}
132		address_space->nrpages += nr;
133		__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
134		__mod_lruvec_page_state(page, NR_SWAPCACHE, nr);
135		ADD_CACHE_INFO(add_total, nr);
136unlock:
137		xas_unlock_irq(&xas);
138	} while (xas_nomem(&xas, gfp));
139
140	if (!xas_error(&xas))
141		return 0;
142
143	ClearPageSwapCache(page);
144	page_ref_sub(page, nr);
145	return xas_error(&xas);
146}
147
148/*
149 * This must be called only on pages that have
150 * been verified to be in the swap cache.
151 */
152void __delete_from_swap_cache(struct page *page,
153			swp_entry_t entry, void *shadow)
154{
155	struct address_space *address_space = swap_address_space(entry);
156	int i, nr = thp_nr_pages(page);
 
157	pgoff_t idx = swp_offset(entry);
158	XA_STATE(xas, &address_space->i_pages, idx);
159
160	VM_BUG_ON_PAGE(!PageLocked(page), page);
161	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
162	VM_BUG_ON_PAGE(PageWriteback(page), page);
 
 
163
164	for (i = 0; i < nr; i++) {
165		void *entry = xas_store(&xas, shadow);
166		VM_BUG_ON_PAGE(entry != page, entry);
167		set_page_private(page + i, 0);
168		xas_next(&xas);
169	}
170	ClearPageSwapCache(page);
 
171	address_space->nrpages -= nr;
172	__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
173	__mod_lruvec_page_state(page, NR_SWAPCACHE, -nr);
174	ADD_CACHE_INFO(del_total, nr);
175}
176
177/**
178 * add_to_swap - allocate swap space for a page
179 * @page: page we want to move to swap
 
 
 
180 *
181 * Allocate swap space for the page and add the page to the
182 * swap cache.  Caller needs to hold the page lock. 
183 */
184int add_to_swap(struct page *page)
185{
186	swp_entry_t entry;
187	int err;
188
189	VM_BUG_ON_PAGE(!PageLocked(page), page);
190	VM_BUG_ON_PAGE(!PageUptodate(page), page);
191
192	entry = get_swap_page(page);
193	if (!entry.val)
194		return 0;
195
196	/*
197	 * XArray node allocations from PF_MEMALLOC contexts could
198	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
199	 * stops emergency reserves from being allocated.
200	 *
201	 * TODO: this could cause a theoretical memory reclaim
202	 * deadlock in the swap out path.
203	 */
204	/*
205	 * Add it to the swap cache.
206	 */
207	err = add_to_swap_cache(page, entry,
208			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
209	if (err)
210		/*
211		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
212		 * clear SWAP_HAS_CACHE flag.
213		 */
214		goto fail;
215	/*
216	 * Normally the page will be dirtied in unmap because its pte should be
217	 * dirty. A special case is MADV_FREE page. The page's pte could have
218	 * dirty bit cleared but the page's SwapBacked bit is still set because
219	 * clearing the dirty bit and SwapBacked bit has no lock protected. For
220	 * such page, unmap will not set dirty bit for it, so page reclaim will
221	 * not write the page out. This can cause data corruption when the page
222	 * is swap in later. Always setting the dirty bit for the page solves
223	 * the problem.
 
224	 */
225	set_page_dirty(page);
226
227	return 1;
228
229fail:
230	put_swap_page(page, entry);
231	return 0;
232}
233
234/*
235 * This must be called only on pages that have
236 * been verified to be in the swap cache and locked.
237 * It will never put the page into the free list,
238 * the caller has a reference on the page.
239 */
240void delete_from_swap_cache(struct page *page)
241{
242	swp_entry_t entry = { .val = page_private(page) };
243	struct address_space *address_space = swap_address_space(entry);
244
245	xa_lock_irq(&address_space->i_pages);
246	__delete_from_swap_cache(page, entry, NULL);
247	xa_unlock_irq(&address_space->i_pages);
248
249	put_swap_page(page, entry);
250	page_ref_sub(page, thp_nr_pages(page));
251}
252
253void clear_shadow_from_swap_cache(int type, unsigned long begin,
254				unsigned long end)
255{
256	unsigned long curr = begin;
257	void *old;
258
259	for (;;) {
260		swp_entry_t entry = swp_entry(type, curr);
261		struct address_space *address_space = swap_address_space(entry);
262		XA_STATE(xas, &address_space->i_pages, curr);
263
 
 
264		xa_lock_irq(&address_space->i_pages);
265		xas_for_each(&xas, old, end) {
266			if (!xa_is_value(old))
267				continue;
268			xas_store(&xas, NULL);
269		}
270		xa_unlock_irq(&address_space->i_pages);
271
272		/* search the next swapcache until we meet end */
273		curr >>= SWAP_ADDRESS_SPACE_SHIFT;
274		curr++;
275		curr <<= SWAP_ADDRESS_SPACE_SHIFT;
276		if (curr > end)
277			break;
278	}
279}
280
281/* 
282 * If we are the only user, then try to free up the swap cache. 
283 * 
284 * Its ok to check for PageSwapCache without the page lock
285 * here because we are going to recheck again inside
286 * try_to_free_swap() _with_ the lock.
287 * 					- Marcelo
288 */
289void free_swap_cache(struct page *page)
290{
291	if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
292		try_to_free_swap(page);
293		unlock_page(page);
 
 
 
294	}
295}
296
297/* 
298 * Perform a free_page(), also freeing any swap cache associated with
299 * this page if it is the last user of the page.
300 */
301void free_page_and_swap_cache(struct page *page)
302{
303	free_swap_cache(page);
304	if (!is_huge_zero_page(page))
305		put_page(page);
306}
307
308/*
309 * Passed an array of pages, drop them all from swapcache and then release
310 * them.  They are removed from the LRU and freed if this is their last use.
311 */
312void free_pages_and_swap_cache(struct page **pages, int nr)
313{
314	struct page **pagep = pages;
315	int i;
316
317	lru_add_drain();
318	for (i = 0; i < nr; i++)
319		free_swap_cache(pagep[i]);
320	release_pages(pagep, nr);
321}
322
323static inline bool swap_use_vma_readahead(void)
324{
325	return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
326}
327
328/*
329 * Lookup a swap entry in the swap cache. A found page will be returned
330 * unlocked and with its refcount incremented - we rely on the kernel
331 * lock getting page table operations atomic even if we drop the page
332 * lock before returning.
 
 
333 */
334struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
335			       unsigned long addr)
336{
337	struct page *page;
338	struct swap_info_struct *si;
339
340	si = get_swap_device(entry);
341	if (!si)
342		return NULL;
343	page = find_get_page(swap_address_space(entry), swp_offset(entry));
344	put_swap_device(si);
345
346	INC_CACHE_INFO(find_total);
347	if (page) {
348		bool vma_ra = swap_use_vma_readahead();
349		bool readahead;
350
351		INC_CACHE_INFO(find_success);
352		/*
353		 * At the moment, we don't support PG_readahead for anon THP
354		 * so let's bail out rather than confusing the readahead stat.
355		 */
356		if (unlikely(PageTransCompound(page)))
357			return page;
358
359		readahead = TestClearPageReadahead(page);
360		if (vma && vma_ra) {
361			unsigned long ra_val;
362			int win, hits;
363
364			ra_val = GET_SWAP_RA_VAL(vma);
365			win = SWAP_RA_WIN(ra_val);
366			hits = SWAP_RA_HITS(ra_val);
367			if (readahead)
368				hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
369			atomic_long_set(&vma->swap_readahead_info,
370					SWAP_RA_VAL(addr, win, hits));
371		}
372
373		if (readahead) {
374			count_vm_event(SWAP_RA_HIT);
375			if (!vma || !vma_ra)
376				atomic_inc(&swapin_readahead_hits);
377		}
 
 
378	}
379
380	return page;
381}
382
383/**
384 * find_get_incore_page - Find and get a page from the page or swap caches.
385 * @mapping: The address_space to search.
386 * @index: The page cache index.
387 *
388 * This differs from find_get_page() in that it will also look for the
389 * page in the swap cache.
390 *
391 * Return: The found page or %NULL.
392 */
393struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index)
 
394{
395	swp_entry_t swp;
396	struct swap_info_struct *si;
397	struct page *page = pagecache_get_page(mapping, index,
398						FGP_ENTRY | FGP_HEAD, 0);
399
400	if (!page)
401		return page;
402	if (!xa_is_value(page))
403		return find_subpage(page, index);
404	if (!shmem_mapping(mapping))
405		return NULL;
406
407	swp = radix_to_swp_entry(page);
 
 
 
408	/* Prevent swapoff from happening to us */
409	si = get_swap_device(swp);
410	if (!si)
411		return NULL;
412	page = find_get_page(swap_address_space(swp), swp_offset(swp));
 
413	put_swap_device(si);
414	return page;
415}
416
417struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
418			struct vm_area_struct *vma, unsigned long addr,
419			bool *new_page_allocated)
420{
421	struct swap_info_struct *si;
422	struct page *page;
423	void *shadow = NULL;
424
425	*new_page_allocated = false;
 
 
 
426
427	for (;;) {
428		int err;
429		/*
430		 * First check the swap cache.  Since this is normally
431		 * called after lookup_swap_cache() failed, re-calling
432		 * that would confuse statistics.
433		 */
434		si = get_swap_device(entry);
435		if (!si)
436			return NULL;
437		page = find_get_page(swap_address_space(entry),
438				     swp_offset(entry));
439		put_swap_device(si);
440		if (page)
441			return page;
442
443		/*
444		 * Just skip read ahead for unused swap slot.
445		 * During swap_off when swap_slot_cache is disabled,
446		 * we have to handle the race between putting
447		 * swap entry in swap cache and marking swap slot
448		 * as SWAP_HAS_CACHE.  That's done in later part of code or
449		 * else swap_off will be aborted if we return NULL.
450		 */
451		if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
452			return NULL;
453
454		/*
455		 * Get a new page to read into from swap.  Allocate it now,
456		 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
457		 * cause any racers to loop around until we add it to cache.
458		 */
459		page = alloc_page_vma(gfp_mask, vma, addr);
460		if (!page)
461			return NULL;
 
462
463		/*
464		 * Swap entry may have been freed since our caller observed it.
465		 */
466		err = swapcache_prepare(entry);
467		if (!err)
468			break;
469
470		put_page(page);
471		if (err != -EEXIST)
472			return NULL;
 
 
 
 
 
 
 
 
 
 
 
473
474		/*
475		 * We might race against __delete_from_swap_cache(), and
476		 * stumble across a swap_map entry whose SWAP_HAS_CACHE
477		 * has not yet been cleared.  Or race against another
478		 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
479		 * in swap_map, but not yet added its page to swap cache.
480		 */
481		cond_resched();
482	}
483
484	/*
485	 * The swap entry is ours to swap in. Prepare the new page.
486	 */
487
488	__SetPageLocked(page);
489	__SetPageSwapBacked(page);
490
491	if (mem_cgroup_swapin_charge_page(page, NULL, gfp_mask, entry))
492		goto fail_unlock;
493
494	/* May fail (-ENOMEM) if XArray node allocation failed. */
495	if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
496		goto fail_unlock;
497
498	mem_cgroup_swapin_uncharge_swap(entry);
499
500	if (shadow)
501		workingset_refault(page, shadow);
502
503	/* Caller will initiate read into locked page */
504	lru_cache_add(page);
505	*new_page_allocated = true;
506	return page;
 
 
507
508fail_unlock:
509	put_swap_page(page, entry);
510	unlock_page(page);
511	put_page(page);
 
 
512	return NULL;
513}
514
515/*
516 * Locate a page of swap in physical memory, reserving swap cache space
517 * and reading the disk if it is not already cached.
518 * A failure return means that either the page allocation failed or that
519 * the swap entry is no longer in use.
520 */
521struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
522		struct vm_area_struct *vma, unsigned long addr, bool do_poll)
 
 
 
 
 
523{
524	bool page_was_allocated;
525	struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
526			vma, addr, &page_was_allocated);
527
528	if (page_was_allocated)
529		swap_readpage(retpage, do_poll);
530
531	return retpage;
 
 
 
 
 
532}
533
534static unsigned int __swapin_nr_pages(unsigned long prev_offset,
535				      unsigned long offset,
536				      int hits,
537				      int max_pages,
538				      int prev_win)
539{
540	unsigned int pages, last_ra;
541
542	/*
543	 * This heuristic has been found to work well on both sequential and
544	 * random loads, swapping to hard disk or to SSD: please don't ask
545	 * what the "+ 2" means, it just happens to work well, that's all.
546	 */
547	pages = hits + 2;
548	if (pages == 2) {
549		/*
550		 * We can have no readahead hits to judge by: but must not get
551		 * stuck here forever, so check for an adjacent offset instead
552		 * (and don't even bother to check whether swap type is same).
553		 */
554		if (offset != prev_offset + 1 && offset != prev_offset - 1)
555			pages = 1;
556	} else {
557		unsigned int roundup = 4;
558		while (roundup < pages)
559			roundup <<= 1;
560		pages = roundup;
561	}
562
563	if (pages > max_pages)
564		pages = max_pages;
565
566	/* Don't shrink readahead too fast */
567	last_ra = prev_win / 2;
568	if (pages < last_ra)
569		pages = last_ra;
570
571	return pages;
572}
573
574static unsigned long swapin_nr_pages(unsigned long offset)
575{
576	static unsigned long prev_offset;
577	unsigned int hits, pages, max_pages;
578	static atomic_t last_readahead_pages;
579
580	max_pages = 1 << READ_ONCE(page_cluster);
581	if (max_pages <= 1)
582		return 1;
583
584	hits = atomic_xchg(&swapin_readahead_hits, 0);
585	pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
586				  max_pages,
587				  atomic_read(&last_readahead_pages));
588	if (!hits)
589		WRITE_ONCE(prev_offset, offset);
590	atomic_set(&last_readahead_pages, pages);
591
592	return pages;
593}
594
595/**
596 * swap_cluster_readahead - swap in pages in hope we need them soon
597 * @entry: swap entry of this memory
598 * @gfp_mask: memory allocation flags
599 * @vmf: fault information
 
600 *
601 * Returns the struct page for entry and addr, after queueing swapin.
602 *
603 * Primitive swap readahead code. We simply read an aligned block of
604 * (1 << page_cluster) entries in the swap area. This method is chosen
605 * because it doesn't cost us any seek time.  We also make sure to queue
606 * the 'original' request together with the readahead ones...
607 *
608 * This has been extended to use the NUMA policies from the mm triggering
609 * the readahead.
610 *
611 * Caller must hold read mmap_lock if vmf->vma is not NULL.
612 */
613struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
614				struct vm_fault *vmf)
615{
616	struct page *page;
617	unsigned long entry_offset = swp_offset(entry);
618	unsigned long offset = entry_offset;
619	unsigned long start_offset, end_offset;
620	unsigned long mask;
621	struct swap_info_struct *si = swp_swap_info(entry);
622	struct blk_plug plug;
623	bool do_poll = true, page_allocated;
624	struct vm_area_struct *vma = vmf->vma;
625	unsigned long addr = vmf->address;
626
627	mask = swapin_nr_pages(offset) - 1;
628	if (!mask)
629		goto skip;
630
631	do_poll = false;
632	/* Read a page_cluster sized and aligned cluster around offset. */
633	start_offset = offset & ~mask;
634	end_offset = offset | mask;
635	if (!start_offset)	/* First page is swap header. */
636		start_offset++;
637	if (end_offset >= si->max)
638		end_offset = si->max - 1;
639
640	blk_start_plug(&plug);
641	for (offset = start_offset; offset <= end_offset ; offset++) {
642		/* Ok, do the async read-ahead now */
643		page = __read_swap_cache_async(
644			swp_entry(swp_type(entry), offset),
645			gfp_mask, vma, addr, &page_allocated);
646		if (!page)
647			continue;
648		if (page_allocated) {
649			swap_readpage(page, false);
650			if (offset != entry_offset) {
651				SetPageReadahead(page);
652				count_vm_event(SWAP_RA);
653			}
654		}
655		put_page(page);
656	}
657	blk_finish_plug(&plug);
658
659	lru_add_drain();	/* Push any new pages onto the LRU now */
660skip:
661	return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll);
 
 
 
 
 
 
 
662}
663
664int init_swap_address_space(unsigned int type, unsigned long nr_pages)
665{
666	struct address_space *spaces, *space;
667	unsigned int i, nr;
668
669	nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
670	spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
671	if (!spaces)
672		return -ENOMEM;
673	for (i = 0; i < nr; i++) {
674		space = spaces + i;
675		xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
676		atomic_set(&space->i_mmap_writable, 0);
677		space->a_ops = &swap_aops;
678		/* swap cache doesn't use writeback related tags */
679		mapping_set_no_writeback_tags(space);
680	}
681	nr_swapper_spaces[type] = nr;
682	swapper_spaces[type] = spaces;
683
684	return 0;
685}
686
687void exit_swap_address_space(unsigned int type)
688{
689	int i;
690	struct address_space *spaces = swapper_spaces[type];
691
692	for (i = 0; i < nr_swapper_spaces[type]; i++)
693		VM_WARN_ON_ONCE(!mapping_empty(&spaces[i]));
694	kvfree(spaces);
695	nr_swapper_spaces[type] = 0;
696	swapper_spaces[type] = NULL;
697}
698
699static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
700				     unsigned long faddr,
701				     unsigned long lpfn,
702				     unsigned long rpfn,
703				     unsigned long *start,
704				     unsigned long *end)
705{
706	*start = max3(lpfn, PFN_DOWN(vma->vm_start),
707		      PFN_DOWN(faddr & PMD_MASK));
708	*end = min3(rpfn, PFN_DOWN(vma->vm_end),
709		    PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
710}
711
712static void swap_ra_info(struct vm_fault *vmf,
713			struct vma_swap_readahead *ra_info)
714{
715	struct vm_area_struct *vma = vmf->vma;
716	unsigned long ra_val;
717	unsigned long faddr, pfn, fpfn;
718	unsigned long start, end;
719	pte_t *pte, *orig_pte;
720	unsigned int max_win, hits, prev_win, win, left;
721#ifndef CONFIG_64BIT
722	pte_t *tpte;
723#endif
724
725	max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
726			     SWAP_RA_ORDER_CEILING);
727	if (max_win == 1) {
728		ra_info->win = 1;
729		return;
730	}
731
732	faddr = vmf->address;
733	orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
734
735	fpfn = PFN_DOWN(faddr);
736	ra_val = GET_SWAP_RA_VAL(vma);
737	pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
738	prev_win = SWAP_RA_WIN(ra_val);
739	hits = SWAP_RA_HITS(ra_val);
740	ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
741					       max_win, prev_win);
742	atomic_long_set(&vma->swap_readahead_info,
743			SWAP_RA_VAL(faddr, win, 0));
744
745	if (win == 1) {
746		pte_unmap(orig_pte);
747		return;
748	}
749
750	/* Copy the PTEs because the page table may be unmapped */
751	if (fpfn == pfn + 1)
752		swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end);
753	else if (pfn == fpfn + 1)
754		swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1,
755				  &start, &end);
756	else {
757		left = (win - 1) / 2;
758		swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
759				  &start, &end);
 
760	}
 
 
 
 
 
761	ra_info->nr_pte = end - start;
762	ra_info->offset = fpfn - start;
763	pte -= ra_info->offset;
764#ifdef CONFIG_64BIT
765	ra_info->ptes = pte;
766#else
767	tpte = ra_info->ptes;
768	for (pfn = start; pfn != end; pfn++)
769		*tpte++ = *pte++;
770#endif
771	pte_unmap(orig_pte);
772}
773
774/**
775 * swap_vma_readahead - swap in pages in hope we need them soon
776 * @fentry: swap entry of this memory
777 * @gfp_mask: memory allocation flags
 
 
778 * @vmf: fault information
779 *
780 * Returns the struct page for entry and addr, after queueing swapin.
781 *
782 * Primitive swap readahead code. We simply read in a few pages whose
783 * virtual addresses are around the fault address in the same vma.
784 *
785 * Caller must hold read mmap_lock if vmf->vma is not NULL.
786 *
787 */
788static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
789				       struct vm_fault *vmf)
790{
791	struct blk_plug plug;
792	struct vm_area_struct *vma = vmf->vma;
793	struct page *page;
794	pte_t *pte, pentry;
 
795	swp_entry_t entry;
 
796	unsigned int i;
797	bool page_allocated;
798	struct vma_swap_readahead ra_info = {
799		.win = 1,
800	};
801
802	swap_ra_info(vmf, &ra_info);
803	if (ra_info.win == 1)
804		goto skip;
805
 
 
 
806	blk_start_plug(&plug);
807	for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
808	     i++, pte++) {
809		pentry = *pte;
810		if (pte_none(pentry))
811			continue;
812		if (pte_present(pentry))
 
 
813			continue;
814		entry = pte_to_swp_entry(pentry);
815		if (unlikely(non_swap_entry(entry)))
816			continue;
817		page = __read_swap_cache_async(entry, gfp_mask, vma,
818					       vmf->address, &page_allocated);
819		if (!page)
 
 
820			continue;
821		if (page_allocated) {
822			swap_readpage(page, false);
823			if (i != ra_info.offset) {
824				SetPageReadahead(page);
825				count_vm_event(SWAP_RA);
826			}
827		}
828		put_page(page);
829	}
 
 
830	blk_finish_plug(&plug);
 
831	lru_add_drain();
832skip:
833	return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
834				     ra_info.win == 1);
 
 
 
 
 
 
835}
836
837/**
838 * swapin_readahead - swap in pages in hope we need them soon
839 * @entry: swap entry of this memory
840 * @gfp_mask: memory allocation flags
841 * @vmf: fault information
842 *
843 * Returns the struct page for entry and addr, after queueing swapin.
844 *
845 * It's a main entry function for swap readahead. By the configuration,
846 * it will read ahead blocks by cluster-based(ie, physical disk based)
847 * or vma-based(ie, virtual address based on faulty address) readahead.
848 */
849struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
850				struct vm_fault *vmf)
851{
852	return swap_use_vma_readahead() ?
853			swap_vma_readahead(entry, gfp_mask, vmf) :
854			swap_cluster_readahead(entry, gfp_mask, vmf);
 
 
 
 
 
 
 
 
 
 
855}
856
857#ifdef CONFIG_SYSFS
858static ssize_t vma_ra_enabled_show(struct kobject *kobj,
859				     struct kobj_attribute *attr, char *buf)
860{
861	return sysfs_emit(buf, "%s\n",
862			  enable_vma_readahead ? "true" : "false");
863}
864static ssize_t vma_ra_enabled_store(struct kobject *kobj,
865				      struct kobj_attribute *attr,
866				      const char *buf, size_t count)
867{
868	if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
869		enable_vma_readahead = true;
870	else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
871		enable_vma_readahead = false;
872	else
873		return -EINVAL;
874
875	return count;
876}
877static struct kobj_attribute vma_ra_enabled_attr =
878	__ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show,
879	       vma_ra_enabled_store);
880
881static struct attribute *swap_attrs[] = {
882	&vma_ra_enabled_attr.attr,
883	NULL,
884};
885
886static const struct attribute_group swap_attr_group = {
887	.attrs = swap_attrs,
888};
889
890static int __init swap_init_sysfs(void)
891{
892	int err;
893	struct kobject *swap_kobj;
894
895	swap_kobj = kobject_create_and_add("swap", mm_kobj);
896	if (!swap_kobj) {
897		pr_err("failed to create swap kobject\n");
898		return -ENOMEM;
899	}
900	err = sysfs_create_group(swap_kobj, &swap_attr_group);
901	if (err) {
902		pr_err("failed to register swap group\n");
903		goto delete_obj;
904	}
905	return 0;
906
907delete_obj:
908	kobject_put(swap_kobj);
909	return err;
910}
911subsys_initcall(swap_init_sysfs);
912#endif
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/mm/swap_state.c
  4 *
  5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  6 *  Swap reorganised 29.12.95, Stephen Tweedie
  7 *
  8 *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
  9 */
 10#include <linux/mm.h>
 11#include <linux/gfp.h>
 12#include <linux/kernel_stat.h>
 13#include <linux/mempolicy.h>
 14#include <linux/swap.h>
 15#include <linux/swapops.h>
 16#include <linux/init.h>
 17#include <linux/pagemap.h>
 18#include <linux/backing-dev.h>
 19#include <linux/blkdev.h>
 
 20#include <linux/migrate.h>
 21#include <linux/vmalloc.h>
 22#include <linux/swap_slots.h>
 23#include <linux/huge_mm.h>
 24#include <linux/shmem_fs.h>
 25#include "internal.h"
 26#include "swap.h"
 27
 28/*
 29 * swapper_space is a fiction, retained to simplify the path through
 30 * vmscan's shrink_page_list.
 31 */
 32static const struct address_space_operations swap_aops = {
 33	.writepage	= swap_writepage,
 34	.dirty_folio	= noop_dirty_folio,
 35#ifdef CONFIG_MIGRATION
 36	.migrate_folio	= migrate_folio,
 37#endif
 38};
 39
 40struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
 41static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
 42static bool enable_vma_readahead __read_mostly = true;
 43
 44#define SWAP_RA_WIN_SHIFT	(PAGE_SHIFT / 2)
 45#define SWAP_RA_HITS_MASK	((1UL << SWAP_RA_WIN_SHIFT) - 1)
 46#define SWAP_RA_HITS_MAX	SWAP_RA_HITS_MASK
 47#define SWAP_RA_WIN_MASK	(~PAGE_MASK & ~SWAP_RA_HITS_MASK)
 48
 49#define SWAP_RA_HITS(v)		((v) & SWAP_RA_HITS_MASK)
 50#define SWAP_RA_WIN(v)		(((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
 51#define SWAP_RA_ADDR(v)		((v) & PAGE_MASK)
 52
 53#define SWAP_RA_VAL(addr, win, hits)				\
 54	(((addr) & PAGE_MASK) |					\
 55	 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) |	\
 56	 ((hits) & SWAP_RA_HITS_MASK))
 57
 58/* Initial readahead hits is 4 to start up with a small window */
 59#define GET_SWAP_RA_VAL(vma)					\
 60	(atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
 61
 
 
 
 
 
 
 
 
 
 
 62static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
 63
 64void show_swap_cache_info(void)
 65{
 66	printk("%lu pages in swap cache\n", total_swapcache_pages());
 67	printk("Free swap  = %ldkB\n", K(get_nr_swap_pages()));
 68	printk("Total swap = %lukB\n", K(total_swap_pages));
 
 
 
 
 69}
 70
 71void *get_shadow_from_swap_cache(swp_entry_t entry)
 72{
 73	struct address_space *address_space = swap_address_space(entry);
 74	pgoff_t idx = swp_offset(entry);
 75	struct page *page;
 76
 77	page = xa_load(&address_space->i_pages, idx);
 78	if (xa_is_value(page))
 79		return page;
 80	return NULL;
 81}
 82
 83/*
 84 * add_to_swap_cache resembles filemap_add_folio on swapper_space,
 85 * but sets SwapCache flag and private instead of mapping and index.
 86 */
 87int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
 88			gfp_t gfp, void **shadowp)
 89{
 90	struct address_space *address_space = swap_address_space(entry);
 91	pgoff_t idx = swp_offset(entry);
 92	XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio));
 93	unsigned long i, nr = folio_nr_pages(folio);
 94	void *old;
 95
 96	xas_set_update(&xas, workingset_update_node);
 
 
 97
 98	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
 99	VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
100	VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
101
102	folio_ref_add(folio, nr);
103	folio_set_swapcache(folio);
104	folio->swap = entry;
105
106	do {
107		xas_lock_irq(&xas);
108		xas_create_range(&xas);
109		if (xas_error(&xas))
110			goto unlock;
111		for (i = 0; i < nr; i++) {
112			VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio);
113			if (shadowp) {
114				old = xas_load(&xas);
115				if (xa_is_value(old))
116					*shadowp = old;
117			}
118			xas_store(&xas, folio);
 
119			xas_next(&xas);
120		}
121		address_space->nrpages += nr;
122		__node_stat_mod_folio(folio, NR_FILE_PAGES, nr);
123		__lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr);
 
124unlock:
125		xas_unlock_irq(&xas);
126	} while (xas_nomem(&xas, gfp));
127
128	if (!xas_error(&xas))
129		return 0;
130
131	folio_clear_swapcache(folio);
132	folio_ref_sub(folio, nr);
133	return xas_error(&xas);
134}
135
136/*
137 * This must be called only on folios that have
138 * been verified to be in the swap cache.
139 */
140void __delete_from_swap_cache(struct folio *folio,
141			swp_entry_t entry, void *shadow)
142{
143	struct address_space *address_space = swap_address_space(entry);
144	int i;
145	long nr = folio_nr_pages(folio);
146	pgoff_t idx = swp_offset(entry);
147	XA_STATE(xas, &address_space->i_pages, idx);
148
149	xas_set_update(&xas, workingset_update_node);
150
151	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
152	VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
153	VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
154
155	for (i = 0; i < nr; i++) {
156		void *entry = xas_store(&xas, shadow);
157		VM_BUG_ON_PAGE(entry != folio, entry);
 
158		xas_next(&xas);
159	}
160	folio->swap.val = 0;
161	folio_clear_swapcache(folio);
162	address_space->nrpages -= nr;
163	__node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
164	__lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
 
165}
166
167/**
168 * add_to_swap - allocate swap space for a folio
169 * @folio: folio we want to move to swap
170 *
171 * Allocate swap space for the folio and add the folio to the
172 * swap cache.
173 *
174 * Context: Caller needs to hold the folio lock.
175 * Return: Whether the folio was added to the swap cache.
176 */
177bool add_to_swap(struct folio *folio)
178{
179	swp_entry_t entry;
180	int err;
181
182	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
183	VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
184
185	entry = folio_alloc_swap(folio);
186	if (!entry.val)
187		return false;
188
189	/*
190	 * XArray node allocations from PF_MEMALLOC contexts could
191	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
192	 * stops emergency reserves from being allocated.
193	 *
194	 * TODO: this could cause a theoretical memory reclaim
195	 * deadlock in the swap out path.
196	 */
197	/*
198	 * Add it to the swap cache.
199	 */
200	err = add_to_swap_cache(folio, entry,
201			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
202	if (err)
203		/*
204		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
205		 * clear SWAP_HAS_CACHE flag.
206		 */
207		goto fail;
208	/*
209	 * Normally the folio will be dirtied in unmap because its
210	 * pte should be dirty. A special case is MADV_FREE page. The
211	 * page's pte could have dirty bit cleared but the folio's
212	 * SwapBacked flag is still set because clearing the dirty bit
213	 * and SwapBacked flag has no lock protected. For such folio,
214	 * unmap will not set dirty bit for it, so folio reclaim will
215	 * not write the folio out. This can cause data corruption when
216	 * the folio is swapped in later. Always setting the dirty flag
217	 * for the folio solves the problem.
218	 */
219	folio_mark_dirty(folio);
220
221	return true;
222
223fail:
224	put_swap_folio(folio, entry);
225	return false;
226}
227
228/*
229 * This must be called only on folios that have
230 * been verified to be in the swap cache and locked.
231 * It will never put the folio into the free list,
232 * the caller has a reference on the folio.
233 */
234void delete_from_swap_cache(struct folio *folio)
235{
236	swp_entry_t entry = folio->swap;
237	struct address_space *address_space = swap_address_space(entry);
238
239	xa_lock_irq(&address_space->i_pages);
240	__delete_from_swap_cache(folio, entry, NULL);
241	xa_unlock_irq(&address_space->i_pages);
242
243	put_swap_folio(folio, entry);
244	folio_ref_sub(folio, folio_nr_pages(folio));
245}
246
247void clear_shadow_from_swap_cache(int type, unsigned long begin,
248				unsigned long end)
249{
250	unsigned long curr = begin;
251	void *old;
252
253	for (;;) {
254		swp_entry_t entry = swp_entry(type, curr);
255		struct address_space *address_space = swap_address_space(entry);
256		XA_STATE(xas, &address_space->i_pages, curr);
257
258		xas_set_update(&xas, workingset_update_node);
259
260		xa_lock_irq(&address_space->i_pages);
261		xas_for_each(&xas, old, end) {
262			if (!xa_is_value(old))
263				continue;
264			xas_store(&xas, NULL);
265		}
266		xa_unlock_irq(&address_space->i_pages);
267
268		/* search the next swapcache until we meet end */
269		curr >>= SWAP_ADDRESS_SPACE_SHIFT;
270		curr++;
271		curr <<= SWAP_ADDRESS_SPACE_SHIFT;
272		if (curr > end)
273			break;
274	}
275}
276
277/*
278 * If we are the only user, then try to free up the swap cache.
279 *
280 * Its ok to check the swapcache flag without the folio lock
281 * here because we are going to recheck again inside
282 * folio_free_swap() _with_ the lock.
283 * 					- Marcelo
284 */
285void free_swap_cache(struct page *page)
286{
287	struct folio *folio = page_folio(page);
288
289	if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
290	    folio_trylock(folio)) {
291		folio_free_swap(folio);
292		folio_unlock(folio);
293	}
294}
295
296/*
297 * Perform a free_page(), also freeing any swap cache associated with
298 * this page if it is the last user of the page.
299 */
300void free_page_and_swap_cache(struct page *page)
301{
302	free_swap_cache(page);
303	if (!is_huge_zero_page(page))
304		put_page(page);
305}
306
307/*
308 * Passed an array of pages, drop them all from swapcache and then release
309 * them.  They are removed from the LRU and freed if this is their last use.
310 */
311void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
312{
 
 
 
313	lru_add_drain();
314	for (int i = 0; i < nr; i++)
315		free_swap_cache(encoded_page_ptr(pages[i]));
316	release_pages(pages, nr);
317}
318
319static inline bool swap_use_vma_readahead(void)
320{
321	return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
322}
323
324/*
325 * Lookup a swap entry in the swap cache. A found folio will be returned
326 * unlocked and with its refcount incremented - we rely on the kernel
327 * lock getting page table operations atomic even if we drop the folio
328 * lock before returning.
329 *
330 * Caller must lock the swap device or hold a reference to keep it valid.
331 */
332struct folio *swap_cache_get_folio(swp_entry_t entry,
333		struct vm_area_struct *vma, unsigned long addr)
334{
335	struct folio *folio;
 
336
337	folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry));
338	if (!IS_ERR(folio)) {
 
 
 
 
 
 
339		bool vma_ra = swap_use_vma_readahead();
340		bool readahead;
341
 
342		/*
343		 * At the moment, we don't support PG_readahead for anon THP
344		 * so let's bail out rather than confusing the readahead stat.
345		 */
346		if (unlikely(folio_test_large(folio)))
347			return folio;
348
349		readahead = folio_test_clear_readahead(folio);
350		if (vma && vma_ra) {
351			unsigned long ra_val;
352			int win, hits;
353
354			ra_val = GET_SWAP_RA_VAL(vma);
355			win = SWAP_RA_WIN(ra_val);
356			hits = SWAP_RA_HITS(ra_val);
357			if (readahead)
358				hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
359			atomic_long_set(&vma->swap_readahead_info,
360					SWAP_RA_VAL(addr, win, hits));
361		}
362
363		if (readahead) {
364			count_vm_event(SWAP_RA_HIT);
365			if (!vma || !vma_ra)
366				atomic_inc(&swapin_readahead_hits);
367		}
368	} else {
369		folio = NULL;
370	}
371
372	return folio;
373}
374
375/**
376 * filemap_get_incore_folio - Find and get a folio from the page or swap caches.
377 * @mapping: The address_space to search.
378 * @index: The page cache index.
379 *
380 * This differs from filemap_get_folio() in that it will also look for the
381 * folio in the swap cache.
382 *
383 * Return: The found folio or %NULL.
384 */
385struct folio *filemap_get_incore_folio(struct address_space *mapping,
386		pgoff_t index)
387{
388	swp_entry_t swp;
389	struct swap_info_struct *si;
390	struct folio *folio = filemap_get_entry(mapping, index);
 
391
392	if (!folio)
393		return ERR_PTR(-ENOENT);
394	if (!xa_is_value(folio))
395		return folio;
396	if (!shmem_mapping(mapping))
397		return ERR_PTR(-ENOENT);
398
399	swp = radix_to_swp_entry(folio);
400	/* There might be swapin error entries in shmem mapping. */
401	if (non_swap_entry(swp))
402		return ERR_PTR(-ENOENT);
403	/* Prevent swapoff from happening to us */
404	si = get_swap_device(swp);
405	if (!si)
406		return ERR_PTR(-ENOENT);
407	index = swp_offset(swp);
408	folio = filemap_get_folio(swap_address_space(swp), index);
409	put_swap_device(si);
410	return folio;
411}
412
413struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
414		struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
415		bool skip_if_exists)
416{
417	struct swap_info_struct *si;
418	struct folio *folio;
419	void *shadow = NULL;
420
421	*new_page_allocated = false;
422	si = get_swap_device(entry);
423	if (!si)
424		return NULL;
425
426	for (;;) {
427		int err;
428		/*
429		 * First check the swap cache.  Since this is normally
430		 * called after swap_cache_get_folio() failed, re-calling
431		 * that would confuse statistics.
432		 */
433		folio = filemap_get_folio(swap_address_space(entry),
434						swp_offset(entry));
435		if (!IS_ERR(folio))
436			goto got_folio;
 
 
 
 
437
438		/*
439		 * Just skip read ahead for unused swap slot.
440		 * During swap_off when swap_slot_cache is disabled,
441		 * we have to handle the race between putting
442		 * swap entry in swap cache and marking swap slot
443		 * as SWAP_HAS_CACHE.  That's done in later part of code or
444		 * else swap_off will be aborted if we return NULL.
445		 */
446		if (!swap_swapcount(si, entry) && swap_slot_cache_enabled)
447			goto fail_put_swap;
448
449		/*
450		 * Get a new folio to read into from swap.  Allocate it now,
451		 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
452		 * cause any racers to loop around until we add it to cache.
453		 */
454		folio = (struct folio *)alloc_pages_mpol(gfp_mask, 0,
455						mpol, ilx, numa_node_id());
456		if (!folio)
457                        goto fail_put_swap;
458
459		/*
460		 * Swap entry may have been freed since our caller observed it.
461		 */
462		err = swapcache_prepare(entry);
463		if (!err)
464			break;
465
466		folio_put(folio);
467		if (err != -EEXIST)
468			goto fail_put_swap;
469
470		/*
471		 * Protect against a recursive call to __read_swap_cache_async()
472		 * on the same entry waiting forever here because SWAP_HAS_CACHE
473		 * is set but the folio is not the swap cache yet. This can
474		 * happen today if mem_cgroup_swapin_charge_folio() below
475		 * triggers reclaim through zswap, which may call
476		 * __read_swap_cache_async() in the writeback path.
477		 */
478		if (skip_if_exists)
479			goto fail_put_swap;
480
481		/*
482		 * We might race against __delete_from_swap_cache(), and
483		 * stumble across a swap_map entry whose SWAP_HAS_CACHE
484		 * has not yet been cleared.  Or race against another
485		 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
486		 * in swap_map, but not yet added its folio to swap cache.
487		 */
488		schedule_timeout_uninterruptible(1);
489	}
490
491	/*
492	 * The swap entry is ours to swap in. Prepare the new folio.
493	 */
494
495	__folio_set_locked(folio);
496	__folio_set_swapbacked(folio);
497
498	if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
499		goto fail_unlock;
500
501	/* May fail (-ENOMEM) if XArray node allocation failed. */
502	if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
503		goto fail_unlock;
504
505	mem_cgroup_swapin_uncharge_swap(entry);
506
507	if (shadow)
508		workingset_refault(folio, shadow);
509
510	/* Caller will initiate read into locked folio */
511	folio_add_lru(folio);
512	*new_page_allocated = true;
513got_folio:
514	put_swap_device(si);
515	return folio;
516
517fail_unlock:
518	put_swap_folio(folio, entry);
519	folio_unlock(folio);
520	folio_put(folio);
521fail_put_swap:
522	put_swap_device(si);
523	return NULL;
524}
525
526/*
527 * Locate a page of swap in physical memory, reserving swap cache space
528 * and reading the disk if it is not already cached.
529 * A failure return means that either the page allocation failed or that
530 * the swap entry is no longer in use.
531 *
532 * get/put_swap_device() aren't needed to call this function, because
533 * __read_swap_cache_async() call them and swap_read_folio() holds the
534 * swap cache folio lock.
535 */
536struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
537		struct vm_area_struct *vma, unsigned long addr,
538		struct swap_iocb **plug)
539{
540	bool page_allocated;
541	struct mempolicy *mpol;
542	pgoff_t ilx;
543	struct folio *folio;
544
545	mpol = get_vma_policy(vma, addr, 0, &ilx);
546	folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
547					&page_allocated, false);
548	mpol_cond_put(mpol);
549
550	if (page_allocated)
551		swap_read_folio(folio, false, plug);
552	return folio;
553}
554
555static unsigned int __swapin_nr_pages(unsigned long prev_offset,
556				      unsigned long offset,
557				      int hits,
558				      int max_pages,
559				      int prev_win)
560{
561	unsigned int pages, last_ra;
562
563	/*
564	 * This heuristic has been found to work well on both sequential and
565	 * random loads, swapping to hard disk or to SSD: please don't ask
566	 * what the "+ 2" means, it just happens to work well, that's all.
567	 */
568	pages = hits + 2;
569	if (pages == 2) {
570		/*
571		 * We can have no readahead hits to judge by: but must not get
572		 * stuck here forever, so check for an adjacent offset instead
573		 * (and don't even bother to check whether swap type is same).
574		 */
575		if (offset != prev_offset + 1 && offset != prev_offset - 1)
576			pages = 1;
577	} else {
578		unsigned int roundup = 4;
579		while (roundup < pages)
580			roundup <<= 1;
581		pages = roundup;
582	}
583
584	if (pages > max_pages)
585		pages = max_pages;
586
587	/* Don't shrink readahead too fast */
588	last_ra = prev_win / 2;
589	if (pages < last_ra)
590		pages = last_ra;
591
592	return pages;
593}
594
595static unsigned long swapin_nr_pages(unsigned long offset)
596{
597	static unsigned long prev_offset;
598	unsigned int hits, pages, max_pages;
599	static atomic_t last_readahead_pages;
600
601	max_pages = 1 << READ_ONCE(page_cluster);
602	if (max_pages <= 1)
603		return 1;
604
605	hits = atomic_xchg(&swapin_readahead_hits, 0);
606	pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
607				  max_pages,
608				  atomic_read(&last_readahead_pages));
609	if (!hits)
610		WRITE_ONCE(prev_offset, offset);
611	atomic_set(&last_readahead_pages, pages);
612
613	return pages;
614}
615
616/**
617 * swap_cluster_readahead - swap in pages in hope we need them soon
618 * @entry: swap entry of this memory
619 * @gfp_mask: memory allocation flags
620 * @mpol: NUMA memory allocation policy to be applied
621 * @ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
622 *
623 * Returns the struct folio for entry and addr, after queueing swapin.
624 *
625 * Primitive swap readahead code. We simply read an aligned block of
626 * (1 << page_cluster) entries in the swap area. This method is chosen
627 * because it doesn't cost us any seek time.  We also make sure to queue
628 * the 'original' request together with the readahead ones...
629 *
630 * Note: it is intentional that the same NUMA policy and interleave index
631 * are used for every page of the readahead: neighbouring pages on swap
632 * are fairly likely to have been swapped out from the same node.
 
633 */
634struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
635				    struct mempolicy *mpol, pgoff_t ilx)
636{
637	struct folio *folio;
638	unsigned long entry_offset = swp_offset(entry);
639	unsigned long offset = entry_offset;
640	unsigned long start_offset, end_offset;
641	unsigned long mask;
642	struct swap_info_struct *si = swp_swap_info(entry);
643	struct blk_plug plug;
644	struct swap_iocb *splug = NULL;
645	bool page_allocated;
 
646
647	mask = swapin_nr_pages(offset) - 1;
648	if (!mask)
649		goto skip;
650
 
651	/* Read a page_cluster sized and aligned cluster around offset. */
652	start_offset = offset & ~mask;
653	end_offset = offset | mask;
654	if (!start_offset)	/* First page is swap header. */
655		start_offset++;
656	if (end_offset >= si->max)
657		end_offset = si->max - 1;
658
659	blk_start_plug(&plug);
660	for (offset = start_offset; offset <= end_offset ; offset++) {
661		/* Ok, do the async read-ahead now */
662		folio = __read_swap_cache_async(
663				swp_entry(swp_type(entry), offset),
664				gfp_mask, mpol, ilx, &page_allocated, false);
665		if (!folio)
666			continue;
667		if (page_allocated) {
668			swap_read_folio(folio, false, &splug);
669			if (offset != entry_offset) {
670				folio_set_readahead(folio);
671				count_vm_event(SWAP_RA);
672			}
673		}
674		folio_put(folio);
675	}
676	blk_finish_plug(&plug);
677	swap_read_unplug(splug);
678	lru_add_drain();	/* Push any new pages onto the LRU now */
679skip:
680	/* The page was likely read above, so no need for plugging here */
681	folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
682					&page_allocated, false);
683	if (unlikely(page_allocated)) {
684		zswap_folio_swapin(folio);
685		swap_read_folio(folio, false, NULL);
686	}
687	return folio;
688}
689
690int init_swap_address_space(unsigned int type, unsigned long nr_pages)
691{
692	struct address_space *spaces, *space;
693	unsigned int i, nr;
694
695	nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
696	spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
697	if (!spaces)
698		return -ENOMEM;
699	for (i = 0; i < nr; i++) {
700		space = spaces + i;
701		xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
702		atomic_set(&space->i_mmap_writable, 0);
703		space->a_ops = &swap_aops;
704		/* swap cache doesn't use writeback related tags */
705		mapping_set_no_writeback_tags(space);
706	}
707	nr_swapper_spaces[type] = nr;
708	swapper_spaces[type] = spaces;
709
710	return 0;
711}
712
713void exit_swap_address_space(unsigned int type)
714{
715	int i;
716	struct address_space *spaces = swapper_spaces[type];
717
718	for (i = 0; i < nr_swapper_spaces[type]; i++)
719		VM_WARN_ON_ONCE(!mapping_empty(&spaces[i]));
720	kvfree(spaces);
721	nr_swapper_spaces[type] = 0;
722	swapper_spaces[type] = NULL;
723}
724
725#define SWAP_RA_ORDER_CEILING	5
726
727struct vma_swap_readahead {
728	unsigned short win;
729	unsigned short offset;
730	unsigned short nr_pte;
731};
 
 
 
 
 
732
733static void swap_ra_info(struct vm_fault *vmf,
734			 struct vma_swap_readahead *ra_info)
735{
736	struct vm_area_struct *vma = vmf->vma;
737	unsigned long ra_val;
738	unsigned long faddr, pfn, fpfn, lpfn, rpfn;
739	unsigned long start, end;
740	unsigned int max_win, hits, prev_win, win;
 
 
 
 
741
742	max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
743			     SWAP_RA_ORDER_CEILING);
744	if (max_win == 1) {
745		ra_info->win = 1;
746		return;
747	}
748
749	faddr = vmf->address;
 
 
750	fpfn = PFN_DOWN(faddr);
751	ra_val = GET_SWAP_RA_VAL(vma);
752	pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
753	prev_win = SWAP_RA_WIN(ra_val);
754	hits = SWAP_RA_HITS(ra_val);
755	ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
756					       max_win, prev_win);
757	atomic_long_set(&vma->swap_readahead_info,
758			SWAP_RA_VAL(faddr, win, 0));
759	if (win == 1)
 
 
760		return;
 
761
762	if (fpfn == pfn + 1) {
763		lpfn = fpfn;
764		rpfn = fpfn + win;
765	} else if (pfn == fpfn + 1) {
766		lpfn = fpfn - win + 1;
767		rpfn = fpfn + 1;
768	} else {
769		unsigned int left = (win - 1) / 2;
770
771		lpfn = fpfn - left;
772		rpfn = fpfn + win - left;
773	}
774	start = max3(lpfn, PFN_DOWN(vma->vm_start),
775		     PFN_DOWN(faddr & PMD_MASK));
776	end = min3(rpfn, PFN_DOWN(vma->vm_end),
777		   PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
778
779	ra_info->nr_pte = end - start;
780	ra_info->offset = fpfn - start;
 
 
 
 
 
 
 
 
 
781}
782
783/**
784 * swap_vma_readahead - swap in pages in hope we need them soon
785 * @targ_entry: swap entry of the targeted memory
786 * @gfp_mask: memory allocation flags
787 * @mpol: NUMA memory allocation policy to be applied
788 * @targ_ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
789 * @vmf: fault information
790 *
791 * Returns the struct folio for entry and addr, after queueing swapin.
792 *
793 * Primitive swap readahead code. We simply read in a few pages whose
794 * virtual addresses are around the fault address in the same vma.
795 *
796 * Caller must hold read mmap_lock if vmf->vma is not NULL.
797 *
798 */
799static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
800		struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf)
801{
802	struct blk_plug plug;
803	struct swap_iocb *splug = NULL;
804	struct folio *folio;
805	pte_t *pte = NULL, pentry;
806	unsigned long addr;
807	swp_entry_t entry;
808	pgoff_t ilx;
809	unsigned int i;
810	bool page_allocated;
811	struct vma_swap_readahead ra_info = {
812		.win = 1,
813	};
814
815	swap_ra_info(vmf, &ra_info);
816	if (ra_info.win == 1)
817		goto skip;
818
819	addr = vmf->address - (ra_info.offset * PAGE_SIZE);
820	ilx = targ_ilx - ra_info.offset;
821
822	blk_start_plug(&plug);
823	for (i = 0; i < ra_info.nr_pte; i++, ilx++, addr += PAGE_SIZE) {
824		if (!pte++) {
825			pte = pte_offset_map(vmf->pmd, addr);
826			if (!pte)
827				break;
828		}
829		pentry = ptep_get_lockless(pte);
830		if (!is_swap_pte(pentry))
831			continue;
832		entry = pte_to_swp_entry(pentry);
833		if (unlikely(non_swap_entry(entry)))
834			continue;
835		pte_unmap(pte);
836		pte = NULL;
837		folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
838						&page_allocated, false);
839		if (!folio)
840			continue;
841		if (page_allocated) {
842			swap_read_folio(folio, false, &splug);
843			if (i != ra_info.offset) {
844				folio_set_readahead(folio);
845				count_vm_event(SWAP_RA);
846			}
847		}
848		folio_put(folio);
849	}
850	if (pte)
851		pte_unmap(pte);
852	blk_finish_plug(&plug);
853	swap_read_unplug(splug);
854	lru_add_drain();
855skip:
856	/* The folio was likely read above, so no need for plugging here */
857	folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
858					&page_allocated, false);
859	if (unlikely(page_allocated)) {
860		zswap_folio_swapin(folio);
861		swap_read_folio(folio, false, NULL);
862	}
863	return folio;
864}
865
866/**
867 * swapin_readahead - swap in pages in hope we need them soon
868 * @entry: swap entry of this memory
869 * @gfp_mask: memory allocation flags
870 * @vmf: fault information
871 *
872 * Returns the struct page for entry and addr, after queueing swapin.
873 *
874 * It's a main entry function for swap readahead. By the configuration,
875 * it will read ahead blocks by cluster-based(ie, physical disk based)
876 * or vma-based(ie, virtual address based on faulty address) readahead.
877 */
878struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
879				struct vm_fault *vmf)
880{
881	struct mempolicy *mpol;
882	pgoff_t ilx;
883	struct folio *folio;
884
885	mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx);
886	folio = swap_use_vma_readahead() ?
887		swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf) :
888		swap_cluster_readahead(entry, gfp_mask, mpol, ilx);
889	mpol_cond_put(mpol);
890
891	if (!folio)
892		return NULL;
893	return folio_file_page(folio, swp_offset(entry));
894}
895
896#ifdef CONFIG_SYSFS
897static ssize_t vma_ra_enabled_show(struct kobject *kobj,
898				     struct kobj_attribute *attr, char *buf)
899{
900	return sysfs_emit(buf, "%s\n",
901			  enable_vma_readahead ? "true" : "false");
902}
903static ssize_t vma_ra_enabled_store(struct kobject *kobj,
904				      struct kobj_attribute *attr,
905				      const char *buf, size_t count)
906{
907	ssize_t ret;
908
909	ret = kstrtobool(buf, &enable_vma_readahead);
910	if (ret)
911		return ret;
 
912
913	return count;
914}
915static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled);
 
 
916
917static struct attribute *swap_attrs[] = {
918	&vma_ra_enabled_attr.attr,
919	NULL,
920};
921
922static const struct attribute_group swap_attr_group = {
923	.attrs = swap_attrs,
924};
925
926static int __init swap_init_sysfs(void)
927{
928	int err;
929	struct kobject *swap_kobj;
930
931	swap_kobj = kobject_create_and_add("swap", mm_kobj);
932	if (!swap_kobj) {
933		pr_err("failed to create swap kobject\n");
934		return -ENOMEM;
935	}
936	err = sysfs_create_group(swap_kobj, &swap_attr_group);
937	if (err) {
938		pr_err("failed to register swap group\n");
939		goto delete_obj;
940	}
941	return 0;
942
943delete_obj:
944	kobject_put(swap_kobj);
945	return err;
946}
947subsys_initcall(swap_init_sysfs);
948#endif