Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/mm/swap_state.c
  4 *
  5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  6 *  Swap reorganised 29.12.95, Stephen Tweedie
  7 *
  8 *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
  9 */
 10#include <linux/mm.h>
 11#include <linux/gfp.h>
 12#include <linux/kernel_stat.h>
 
 13#include <linux/swap.h>
 14#include <linux/swapops.h>
 15#include <linux/init.h>
 16#include <linux/pagemap.h>
 
 17#include <linux/backing-dev.h>
 18#include <linux/blkdev.h>
 19#include <linux/pagevec.h>
 20#include <linux/migrate.h>
 21#include <linux/vmalloc.h>
 22#include <linux/swap_slots.h>
 23#include <linux/huge_mm.h>
 24#include <linux/shmem_fs.h>
 25#include "internal.h"
 
 26
 27/*
 28 * swapper_space is a fiction, retained to simplify the path through
 29 * vmscan's shrink_page_list.
 30 */
 31static const struct address_space_operations swap_aops = {
 32	.writepage	= swap_writepage,
 33	.set_page_dirty	= swap_set_page_dirty,
 34#ifdef CONFIG_MIGRATION
 35	.migratepage	= migrate_page,
 36#endif
 37};
 38
 39struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
 40static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
 41static bool enable_vma_readahead __read_mostly = true;
 42
 
 
 43#define SWAP_RA_WIN_SHIFT	(PAGE_SHIFT / 2)
 44#define SWAP_RA_HITS_MASK	((1UL << SWAP_RA_WIN_SHIFT) - 1)
 45#define SWAP_RA_HITS_MAX	SWAP_RA_HITS_MASK
 46#define SWAP_RA_WIN_MASK	(~PAGE_MASK & ~SWAP_RA_HITS_MASK)
 47
 48#define SWAP_RA_HITS(v)		((v) & SWAP_RA_HITS_MASK)
 49#define SWAP_RA_WIN(v)		(((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
 50#define SWAP_RA_ADDR(v)		((v) & PAGE_MASK)
 51
 52#define SWAP_RA_VAL(addr, win, hits)				\
 53	(((addr) & PAGE_MASK) |					\
 54	 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) |	\
 55	 ((hits) & SWAP_RA_HITS_MASK))
 56
 57/* Initial readahead hits is 4 to start up with a small window */
 58#define GET_SWAP_RA_VAL(vma)					\
 59	(atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
 60
 61#define INC_CACHE_INFO(x)	data_race(swap_cache_info.x++)
 62#define ADD_CACHE_INFO(x, nr)	data_race(swap_cache_info.x += (nr))
 63
 64static struct {
 65	unsigned long add_total;
 66	unsigned long del_total;
 67	unsigned long find_success;
 68	unsigned long find_total;
 69} swap_cache_info;
 70
 71static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
 72
 73void show_swap_cache_info(void)
 74{
 75	printk("%lu pages in swap cache\n", total_swapcache_pages());
 76	printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
 77		swap_cache_info.add_total, swap_cache_info.del_total,
 78		swap_cache_info.find_success, swap_cache_info.find_total);
 79	printk("Free swap  = %ldkB\n",
 80		get_nr_swap_pages() << (PAGE_SHIFT - 10));
 81	printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
 82}
 83
 84void *get_shadow_from_swap_cache(swp_entry_t entry)
 85{
 86	struct address_space *address_space = swap_address_space(entry);
 87	pgoff_t idx = swp_offset(entry);
 88	struct page *page;
 89
 90	page = xa_load(&address_space->i_pages, idx);
 91	if (xa_is_value(page))
 92		return page;
 93	return NULL;
 94}
 95
 96/*
 97 * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
 98 * but sets SwapCache flag and private instead of mapping and index.
 99 */
100int add_to_swap_cache(struct page *page, swp_entry_t entry,
101			gfp_t gfp, void **shadowp)
102{
103	struct address_space *address_space = swap_address_space(entry);
104	pgoff_t idx = swp_offset(entry);
105	XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
106	unsigned long i, nr = thp_nr_pages(page);
107	void *old;
108
109	VM_BUG_ON_PAGE(!PageLocked(page), page);
110	VM_BUG_ON_PAGE(PageSwapCache(page), page);
111	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
112
113	page_ref_add(page, nr);
114	SetPageSwapCache(page);
 
 
 
 
 
115
116	do {
117		xas_lock_irq(&xas);
118		xas_create_range(&xas);
119		if (xas_error(&xas))
120			goto unlock;
121		for (i = 0; i < nr; i++) {
122			VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
123			old = xas_load(&xas);
124			if (xa_is_value(old)) {
125				if (shadowp)
126					*shadowp = old;
127			}
128			set_page_private(page + i, entry.val + i);
129			xas_store(&xas, page);
130			xas_next(&xas);
131		}
132		address_space->nrpages += nr;
133		__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
134		__mod_lruvec_page_state(page, NR_SWAPCACHE, nr);
135		ADD_CACHE_INFO(add_total, nr);
136unlock:
137		xas_unlock_irq(&xas);
138	} while (xas_nomem(&xas, gfp));
139
140	if (!xas_error(&xas))
141		return 0;
142
143	ClearPageSwapCache(page);
144	page_ref_sub(page, nr);
145	return xas_error(&xas);
146}
147
148/*
149 * This must be called only on pages that have
150 * been verified to be in the swap cache.
151 */
152void __delete_from_swap_cache(struct page *page,
153			swp_entry_t entry, void *shadow)
154{
155	struct address_space *address_space = swap_address_space(entry);
156	int i, nr = thp_nr_pages(page);
157	pgoff_t idx = swp_offset(entry);
 
158	XA_STATE(xas, &address_space->i_pages, idx);
159
160	VM_BUG_ON_PAGE(!PageLocked(page), page);
161	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
162	VM_BUG_ON_PAGE(PageWriteback(page), page);
 
 
163
164	for (i = 0; i < nr; i++) {
165		void *entry = xas_store(&xas, shadow);
166		VM_BUG_ON_PAGE(entry != page, entry);
167		set_page_private(page + i, 0);
168		xas_next(&xas);
169	}
170	ClearPageSwapCache(page);
 
171	address_space->nrpages -= nr;
172	__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
173	__mod_lruvec_page_state(page, NR_SWAPCACHE, -nr);
174	ADD_CACHE_INFO(del_total, nr);
175}
176
177/**
178 * add_to_swap - allocate swap space for a page
179 * @page: page we want to move to swap
180 *
181 * Allocate swap space for the page and add the page to the
182 * swap cache.  Caller needs to hold the page lock. 
 
 
 
183 */
184int add_to_swap(struct page *page)
185{
186	swp_entry_t entry;
187	int err;
188
189	VM_BUG_ON_PAGE(!PageLocked(page), page);
190	VM_BUG_ON_PAGE(!PageUptodate(page), page);
191
192	entry = get_swap_page(page);
193	if (!entry.val)
194		return 0;
195
196	/*
197	 * XArray node allocations from PF_MEMALLOC contexts could
198	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
199	 * stops emergency reserves from being allocated.
200	 *
201	 * TODO: this could cause a theoretical memory reclaim
202	 * deadlock in the swap out path.
203	 */
204	/*
205	 * Add it to the swap cache.
206	 */
207	err = add_to_swap_cache(page, entry,
208			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
209	if (err)
210		/*
211		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
212		 * clear SWAP_HAS_CACHE flag.
213		 */
214		goto fail;
215	/*
216	 * Normally the page will be dirtied in unmap because its pte should be
217	 * dirty. A special case is MADV_FREE page. The page's pte could have
218	 * dirty bit cleared but the page's SwapBacked bit is still set because
219	 * clearing the dirty bit and SwapBacked bit has no lock protected. For
220	 * such page, unmap will not set dirty bit for it, so page reclaim will
221	 * not write the page out. This can cause data corruption when the page
222	 * is swap in later. Always setting the dirty bit for the page solves
223	 * the problem.
 
224	 */
225	set_page_dirty(page);
226
227	return 1;
228
229fail:
230	put_swap_page(page, entry);
231	return 0;
232}
233
234/*
235 * This must be called only on pages that have
236 * been verified to be in the swap cache and locked.
237 * It will never put the page into the free list,
238 * the caller has a reference on the page.
239 */
240void delete_from_swap_cache(struct page *page)
241{
242	swp_entry_t entry = { .val = page_private(page) };
243	struct address_space *address_space = swap_address_space(entry);
244
245	xa_lock_irq(&address_space->i_pages);
246	__delete_from_swap_cache(page, entry, NULL);
247	xa_unlock_irq(&address_space->i_pages);
248
249	put_swap_page(page, entry);
250	page_ref_sub(page, thp_nr_pages(page));
251}
252
253void clear_shadow_from_swap_cache(int type, unsigned long begin,
254				unsigned long end)
255{
256	unsigned long curr = begin;
257	void *old;
258
259	for (;;) {
260		swp_entry_t entry = swp_entry(type, curr);
 
261		struct address_space *address_space = swap_address_space(entry);
262		XA_STATE(xas, &address_space->i_pages, curr);
 
 
263
264		xa_lock_irq(&address_space->i_pages);
265		xas_for_each(&xas, old, end) {
266			if (!xa_is_value(old))
267				continue;
268			xas_store(&xas, NULL);
269		}
270		xa_unlock_irq(&address_space->i_pages);
271
272		/* search the next swapcache until we meet end */
273		curr >>= SWAP_ADDRESS_SPACE_SHIFT;
274		curr++;
275		curr <<= SWAP_ADDRESS_SPACE_SHIFT;
276		if (curr > end)
277			break;
278	}
279}
280
281/* 
282 * If we are the only user, then try to free up the swap cache. 
283 * 
284 * Its ok to check for PageSwapCache without the page lock
285 * here because we are going to recheck again inside
286 * try_to_free_swap() _with_ the lock.
287 * 					- Marcelo
288 */
289void free_swap_cache(struct page *page)
290{
291	if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
292		try_to_free_swap(page);
293		unlock_page(page);
 
294	}
295}
296
297/* 
298 * Perform a free_page(), also freeing any swap cache associated with
299 * this page if it is the last user of the page.
300 */
301void free_page_and_swap_cache(struct page *page)
302{
303	free_swap_cache(page);
304	if (!is_huge_zero_page(page))
305		put_page(page);
 
 
306}
307
308/*
309 * Passed an array of pages, drop them all from swapcache and then release
310 * them.  They are removed from the LRU and freed if this is their last use.
311 */
312void free_pages_and_swap_cache(struct page **pages, int nr)
313{
314	struct page **pagep = pages;
315	int i;
316
317	lru_add_drain();
318	for (i = 0; i < nr; i++)
319		free_swap_cache(pagep[i]);
320	release_pages(pagep, nr);
 
 
 
 
 
 
 
 
 
 
 
 
321}
322
323static inline bool swap_use_vma_readahead(void)
324{
325	return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
326}
327
328/*
329 * Lookup a swap entry in the swap cache. A found page will be returned
330 * unlocked and with its refcount incremented - we rely on the kernel
331 * lock getting page table operations atomic even if we drop the page
332 * lock before returning.
 
 
333 */
334struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
335			       unsigned long addr)
336{
337	struct page *page;
338	struct swap_info_struct *si;
339
340	si = get_swap_device(entry);
341	if (!si)
342		return NULL;
343	page = find_get_page(swap_address_space(entry), swp_offset(entry));
344	put_swap_device(si);
345
346	INC_CACHE_INFO(find_total);
347	if (page) {
348		bool vma_ra = swap_use_vma_readahead();
349		bool readahead;
350
351		INC_CACHE_INFO(find_success);
352		/*
353		 * At the moment, we don't support PG_readahead for anon THP
354		 * so let's bail out rather than confusing the readahead stat.
355		 */
356		if (unlikely(PageTransCompound(page)))
357			return page;
358
359		readahead = TestClearPageReadahead(page);
360		if (vma && vma_ra) {
361			unsigned long ra_val;
362			int win, hits;
363
364			ra_val = GET_SWAP_RA_VAL(vma);
365			win = SWAP_RA_WIN(ra_val);
366			hits = SWAP_RA_HITS(ra_val);
367			if (readahead)
368				hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
369			atomic_long_set(&vma->swap_readahead_info,
370					SWAP_RA_VAL(addr, win, hits));
371		}
372
373		if (readahead) {
374			count_vm_event(SWAP_RA_HIT);
375			if (!vma || !vma_ra)
376				atomic_inc(&swapin_readahead_hits);
377		}
 
 
378	}
379
380	return page;
381}
382
383/**
384 * find_get_incore_page - Find and get a page from the page or swap caches.
385 * @mapping: The address_space to search.
386 * @index: The page cache index.
387 *
388 * This differs from find_get_page() in that it will also look for the
389 * page in the swap cache.
390 *
391 * Return: The found page or %NULL.
392 */
393struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index)
 
394{
395	swp_entry_t swp;
396	struct swap_info_struct *si;
397	struct page *page = pagecache_get_page(mapping, index,
398						FGP_ENTRY | FGP_HEAD, 0);
399
400	if (!page)
401		return page;
402	if (!xa_is_value(page))
403		return find_subpage(page, index);
404	if (!shmem_mapping(mapping))
405		return NULL;
406
407	swp = radix_to_swp_entry(page);
 
 
 
408	/* Prevent swapoff from happening to us */
409	si = get_swap_device(swp);
410	if (!si)
411		return NULL;
412	page = find_get_page(swap_address_space(swp), swp_offset(swp));
 
413	put_swap_device(si);
414	return page;
415}
416
417struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
418			struct vm_area_struct *vma, unsigned long addr,
419			bool *new_page_allocated)
420{
421	struct swap_info_struct *si;
422	struct page *page;
 
 
423	void *shadow = NULL;
424
425	*new_page_allocated = false;
 
 
 
426
427	for (;;) {
428		int err;
429		/*
430		 * First check the swap cache.  Since this is normally
431		 * called after lookup_swap_cache() failed, re-calling
432		 * that would confuse statistics.
433		 */
434		si = get_swap_device(entry);
435		if (!si)
436			return NULL;
437		page = find_get_page(swap_address_space(entry),
438				     swp_offset(entry));
439		put_swap_device(si);
440		if (page)
441			return page;
442
443		/*
444		 * Just skip read ahead for unused swap slot.
445		 * During swap_off when swap_slot_cache is disabled,
446		 * we have to handle the race between putting
447		 * swap entry in swap cache and marking swap slot
448		 * as SWAP_HAS_CACHE.  That's done in later part of code or
449		 * else swap_off will be aborted if we return NULL.
450		 */
451		if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
452			return NULL;
453
454		/*
455		 * Get a new page to read into from swap.  Allocate it now,
456		 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
457		 * cause any racers to loop around until we add it to cache.
 
458		 */
459		page = alloc_page_vma(gfp_mask, vma, addr);
460		if (!page)
461			return NULL;
 
 
462
463		/*
464		 * Swap entry may have been freed since our caller observed it.
465		 */
466		err = swapcache_prepare(entry);
467		if (!err)
468			break;
 
 
469
470		put_page(page);
471		if (err != -EEXIST)
472			return NULL;
 
 
 
 
 
 
 
473
474		/*
475		 * We might race against __delete_from_swap_cache(), and
476		 * stumble across a swap_map entry whose SWAP_HAS_CACHE
477		 * has not yet been cleared.  Or race against another
478		 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
479		 * in swap_map, but not yet added its page to swap cache.
480		 */
481		cond_resched();
482	}
483
484	/*
485	 * The swap entry is ours to swap in. Prepare the new page.
486	 */
 
 
487
488	__SetPageLocked(page);
489	__SetPageSwapBacked(page);
490
491	if (mem_cgroup_swapin_charge_page(page, NULL, gfp_mask, entry))
492		goto fail_unlock;
493
494	/* May fail (-ENOMEM) if XArray node allocation failed. */
495	if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
496		goto fail_unlock;
497
498	mem_cgroup_swapin_uncharge_swap(entry);
499
500	if (shadow)
501		workingset_refault(page, shadow);
502
503	/* Caller will initiate read into locked page */
504	lru_cache_add(page);
505	*new_page_allocated = true;
506	return page;
 
 
 
507
508fail_unlock:
509	put_swap_page(page, entry);
510	unlock_page(page);
511	put_page(page);
512	return NULL;
 
 
 
513}
514
515/*
516 * Locate a page of swap in physical memory, reserving swap cache space
517 * and reading the disk if it is not already cached.
518 * A failure return means that either the page allocation failed or that
519 * the swap entry is no longer in use.
520 */
521struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
522		struct vm_area_struct *vma, unsigned long addr, bool do_poll)
 
 
 
 
 
523{
524	bool page_was_allocated;
525	struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
526			vma, addr, &page_was_allocated);
527
528	if (page_was_allocated)
529		swap_readpage(retpage, do_poll);
530
531	return retpage;
 
 
 
 
 
532}
533
534static unsigned int __swapin_nr_pages(unsigned long prev_offset,
535				      unsigned long offset,
536				      int hits,
537				      int max_pages,
538				      int prev_win)
539{
540	unsigned int pages, last_ra;
541
542	/*
543	 * This heuristic has been found to work well on both sequential and
544	 * random loads, swapping to hard disk or to SSD: please don't ask
545	 * what the "+ 2" means, it just happens to work well, that's all.
546	 */
547	pages = hits + 2;
548	if (pages == 2) {
549		/*
550		 * We can have no readahead hits to judge by: but must not get
551		 * stuck here forever, so check for an adjacent offset instead
552		 * (and don't even bother to check whether swap type is same).
553		 */
554		if (offset != prev_offset + 1 && offset != prev_offset - 1)
555			pages = 1;
556	} else {
557		unsigned int roundup = 4;
558		while (roundup < pages)
559			roundup <<= 1;
560		pages = roundup;
561	}
562
563	if (pages > max_pages)
564		pages = max_pages;
565
566	/* Don't shrink readahead too fast */
567	last_ra = prev_win / 2;
568	if (pages < last_ra)
569		pages = last_ra;
570
571	return pages;
572}
573
574static unsigned long swapin_nr_pages(unsigned long offset)
575{
576	static unsigned long prev_offset;
577	unsigned int hits, pages, max_pages;
578	static atomic_t last_readahead_pages;
579
580	max_pages = 1 << READ_ONCE(page_cluster);
581	if (max_pages <= 1)
582		return 1;
583
584	hits = atomic_xchg(&swapin_readahead_hits, 0);
585	pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
586				  max_pages,
587				  atomic_read(&last_readahead_pages));
588	if (!hits)
589		WRITE_ONCE(prev_offset, offset);
590	atomic_set(&last_readahead_pages, pages);
591
592	return pages;
593}
594
595/**
596 * swap_cluster_readahead - swap in pages in hope we need them soon
597 * @entry: swap entry of this memory
598 * @gfp_mask: memory allocation flags
599 * @vmf: fault information
 
600 *
601 * Returns the struct page for entry and addr, after queueing swapin.
602 *
603 * Primitive swap readahead code. We simply read an aligned block of
604 * (1 << page_cluster) entries in the swap area. This method is chosen
605 * because it doesn't cost us any seek time.  We also make sure to queue
606 * the 'original' request together with the readahead ones...
607 *
608 * This has been extended to use the NUMA policies from the mm triggering
609 * the readahead.
610 *
611 * Caller must hold read mmap_lock if vmf->vma is not NULL.
612 */
613struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
614				struct vm_fault *vmf)
615{
616	struct page *page;
617	unsigned long entry_offset = swp_offset(entry);
618	unsigned long offset = entry_offset;
619	unsigned long start_offset, end_offset;
620	unsigned long mask;
621	struct swap_info_struct *si = swp_swap_info(entry);
622	struct blk_plug plug;
623	bool do_poll = true, page_allocated;
624	struct vm_area_struct *vma = vmf->vma;
625	unsigned long addr = vmf->address;
626
627	mask = swapin_nr_pages(offset) - 1;
628	if (!mask)
629		goto skip;
630
631	do_poll = false;
632	/* Read a page_cluster sized and aligned cluster around offset. */
633	start_offset = offset & ~mask;
634	end_offset = offset | mask;
635	if (!start_offset)	/* First page is swap header. */
636		start_offset++;
637	if (end_offset >= si->max)
638		end_offset = si->max - 1;
639
640	blk_start_plug(&plug);
641	for (offset = start_offset; offset <= end_offset ; offset++) {
642		/* Ok, do the async read-ahead now */
643		page = __read_swap_cache_async(
644			swp_entry(swp_type(entry), offset),
645			gfp_mask, vma, addr, &page_allocated);
646		if (!page)
647			continue;
648		if (page_allocated) {
649			swap_readpage(page, false);
650			if (offset != entry_offset) {
651				SetPageReadahead(page);
652				count_vm_event(SWAP_RA);
653			}
654		}
655		put_page(page);
656	}
657	blk_finish_plug(&plug);
658
659	lru_add_drain();	/* Push any new pages onto the LRU now */
660skip:
661	return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll);
 
 
 
 
 
662}
663
664int init_swap_address_space(unsigned int type, unsigned long nr_pages)
665{
666	struct address_space *spaces, *space;
667	unsigned int i, nr;
668
669	nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
670	spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
671	if (!spaces)
672		return -ENOMEM;
673	for (i = 0; i < nr; i++) {
674		space = spaces + i;
675		xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
676		atomic_set(&space->i_mmap_writable, 0);
677		space->a_ops = &swap_aops;
678		/* swap cache doesn't use writeback related tags */
679		mapping_set_no_writeback_tags(space);
680	}
681	nr_swapper_spaces[type] = nr;
682	swapper_spaces[type] = spaces;
683
684	return 0;
685}
686
687void exit_swap_address_space(unsigned int type)
688{
689	int i;
690	struct address_space *spaces = swapper_spaces[type];
691
692	for (i = 0; i < nr_swapper_spaces[type]; i++)
693		VM_WARN_ON_ONCE(!mapping_empty(&spaces[i]));
694	kvfree(spaces);
695	nr_swapper_spaces[type] = 0;
696	swapper_spaces[type] = NULL;
697}
698
699static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
700				     unsigned long faddr,
701				     unsigned long lpfn,
702				     unsigned long rpfn,
703				     unsigned long *start,
704				     unsigned long *end)
705{
706	*start = max3(lpfn, PFN_DOWN(vma->vm_start),
707		      PFN_DOWN(faddr & PMD_MASK));
708	*end = min3(rpfn, PFN_DOWN(vma->vm_end),
709		    PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
710}
711
712static void swap_ra_info(struct vm_fault *vmf,
713			struct vma_swap_readahead *ra_info)
714{
715	struct vm_area_struct *vma = vmf->vma;
716	unsigned long ra_val;
717	unsigned long faddr, pfn, fpfn;
718	unsigned long start, end;
719	pte_t *pte, *orig_pte;
720	unsigned int max_win, hits, prev_win, win, left;
721#ifndef CONFIG_64BIT
722	pte_t *tpte;
723#endif
724
725	max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
726			     SWAP_RA_ORDER_CEILING);
727	if (max_win == 1) {
728		ra_info->win = 1;
729		return;
730	}
731
732	faddr = vmf->address;
733	orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
734
735	fpfn = PFN_DOWN(faddr);
736	ra_val = GET_SWAP_RA_VAL(vma);
737	pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
738	prev_win = SWAP_RA_WIN(ra_val);
739	hits = SWAP_RA_HITS(ra_val);
740	ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
741					       max_win, prev_win);
742	atomic_long_set(&vma->swap_readahead_info,
743			SWAP_RA_VAL(faddr, win, 0));
744
745	if (win == 1) {
746		pte_unmap(orig_pte);
747		return;
748	}
749
750	/* Copy the PTEs because the page table may be unmapped */
751	if (fpfn == pfn + 1)
752		swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end);
753	else if (pfn == fpfn + 1)
754		swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1,
755				  &start, &end);
756	else {
757		left = (win - 1) / 2;
758		swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
759				  &start, &end);
760	}
761	ra_info->nr_pte = end - start;
762	ra_info->offset = fpfn - start;
763	pte -= ra_info->offset;
764#ifdef CONFIG_64BIT
765	ra_info->ptes = pte;
766#else
767	tpte = ra_info->ptes;
768	for (pfn = start; pfn != end; pfn++)
769		*tpte++ = *pte++;
770#endif
771	pte_unmap(orig_pte);
772}
773
774/**
775 * swap_vma_readahead - swap in pages in hope we need them soon
776 * @fentry: swap entry of this memory
777 * @gfp_mask: memory allocation flags
 
 
778 * @vmf: fault information
779 *
780 * Returns the struct page for entry and addr, after queueing swapin.
781 *
782 * Primitive swap readahead code. We simply read in a few pages whose
783 * virtual addresses are around the fault address in the same vma.
784 *
785 * Caller must hold read mmap_lock if vmf->vma is not NULL.
786 *
787 */
788static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
789				       struct vm_fault *vmf)
790{
791	struct blk_plug plug;
792	struct vm_area_struct *vma = vmf->vma;
793	struct page *page;
794	pte_t *pte, pentry;
 
 
795	swp_entry_t entry;
796	unsigned int i;
797	bool page_allocated;
798	struct vma_swap_readahead ra_info = {
799		.win = 1,
800	};
801
802	swap_ra_info(vmf, &ra_info);
803	if (ra_info.win == 1)
804		goto skip;
805
 
 
806	blk_start_plug(&plug);
807	for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
808	     i++, pte++) {
809		pentry = *pte;
810		if (pte_none(pentry))
811			continue;
812		if (pte_present(pentry))
 
 
813			continue;
814		entry = pte_to_swp_entry(pentry);
815		if (unlikely(non_swap_entry(entry)))
816			continue;
817		page = __read_swap_cache_async(entry, gfp_mask, vma,
818					       vmf->address, &page_allocated);
819		if (!page)
 
 
820			continue;
821		if (page_allocated) {
822			swap_readpage(page, false);
823			if (i != ra_info.offset) {
824				SetPageReadahead(page);
825				count_vm_event(SWAP_RA);
826			}
827		}
828		put_page(page);
829	}
 
 
830	blk_finish_plug(&plug);
 
831	lru_add_drain();
832skip:
833	return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
834				     ra_info.win == 1);
 
 
 
 
835}
836
837/**
838 * swapin_readahead - swap in pages in hope we need them soon
839 * @entry: swap entry of this memory
840 * @gfp_mask: memory allocation flags
841 * @vmf: fault information
842 *
843 * Returns the struct page for entry and addr, after queueing swapin.
844 *
845 * It's a main entry function for swap readahead. By the configuration,
846 * it will read ahead blocks by cluster-based(ie, physical disk based)
847 * or vma-based(ie, virtual address based on faulty address) readahead.
848 */
849struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
850				struct vm_fault *vmf)
851{
852	return swap_use_vma_readahead() ?
853			swap_vma_readahead(entry, gfp_mask, vmf) :
854			swap_cluster_readahead(entry, gfp_mask, vmf);
 
 
 
 
 
 
 
 
855}
856
857#ifdef CONFIG_SYSFS
858static ssize_t vma_ra_enabled_show(struct kobject *kobj,
859				     struct kobj_attribute *attr, char *buf)
860{
861	return sysfs_emit(buf, "%s\n",
862			  enable_vma_readahead ? "true" : "false");
863}
864static ssize_t vma_ra_enabled_store(struct kobject *kobj,
865				      struct kobj_attribute *attr,
866				      const char *buf, size_t count)
867{
868	if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
869		enable_vma_readahead = true;
870	else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
871		enable_vma_readahead = false;
872	else
873		return -EINVAL;
874
875	return count;
876}
877static struct kobj_attribute vma_ra_enabled_attr =
878	__ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show,
879	       vma_ra_enabled_store);
880
881static struct attribute *swap_attrs[] = {
882	&vma_ra_enabled_attr.attr,
883	NULL,
884};
885
886static const struct attribute_group swap_attr_group = {
887	.attrs = swap_attrs,
888};
889
890static int __init swap_init_sysfs(void)
891{
892	int err;
893	struct kobject *swap_kobj;
894
895	swap_kobj = kobject_create_and_add("swap", mm_kobj);
896	if (!swap_kobj) {
897		pr_err("failed to create swap kobject\n");
898		return -ENOMEM;
899	}
900	err = sysfs_create_group(swap_kobj, &swap_attr_group);
901	if (err) {
902		pr_err("failed to register swap group\n");
903		goto delete_obj;
904	}
905	return 0;
906
907delete_obj:
908	kobject_put(swap_kobj);
909	return err;
910}
911subsys_initcall(swap_init_sysfs);
912#endif
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/mm/swap_state.c
  4 *
  5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  6 *  Swap reorganised 29.12.95, Stephen Tweedie
  7 *
  8 *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
  9 */
 10#include <linux/mm.h>
 11#include <linux/gfp.h>
 12#include <linux/kernel_stat.h>
 13#include <linux/mempolicy.h>
 14#include <linux/swap.h>
 15#include <linux/swapops.h>
 16#include <linux/init.h>
 17#include <linux/pagemap.h>
 18#include <linux/pagevec.h>
 19#include <linux/backing-dev.h>
 20#include <linux/blkdev.h>
 
 21#include <linux/migrate.h>
 22#include <linux/vmalloc.h>
 23#include <linux/swap_slots.h>
 24#include <linux/huge_mm.h>
 25#include <linux/shmem_fs.h>
 26#include "internal.h"
 27#include "swap.h"
 28
 29/*
 30 * swapper_space is a fiction, retained to simplify the path through
 31 * vmscan's shrink_folio_list.
 32 */
 33static const struct address_space_operations swap_aops = {
 34	.writepage	= swap_writepage,
 35	.dirty_folio	= noop_dirty_folio,
 36#ifdef CONFIG_MIGRATION
 37	.migrate_folio	= migrate_folio,
 38#endif
 39};
 40
 41struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
 42static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
 43static bool enable_vma_readahead __read_mostly = true;
 44
 45#define SWAP_RA_ORDER_CEILING	5
 46
 47#define SWAP_RA_WIN_SHIFT	(PAGE_SHIFT / 2)
 48#define SWAP_RA_HITS_MASK	((1UL << SWAP_RA_WIN_SHIFT) - 1)
 49#define SWAP_RA_HITS_MAX	SWAP_RA_HITS_MASK
 50#define SWAP_RA_WIN_MASK	(~PAGE_MASK & ~SWAP_RA_HITS_MASK)
 51
 52#define SWAP_RA_HITS(v)		((v) & SWAP_RA_HITS_MASK)
 53#define SWAP_RA_WIN(v)		(((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
 54#define SWAP_RA_ADDR(v)		((v) & PAGE_MASK)
 55
 56#define SWAP_RA_VAL(addr, win, hits)				\
 57	(((addr) & PAGE_MASK) |					\
 58	 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) |	\
 59	 ((hits) & SWAP_RA_HITS_MASK))
 60
 61/* Initial readahead hits is 4 to start up with a small window */
 62#define GET_SWAP_RA_VAL(vma)					\
 63	(atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
 64
 
 
 
 
 
 
 
 
 
 
 65static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
 66
 67void show_swap_cache_info(void)
 68{
 69	printk("%lu pages in swap cache\n", total_swapcache_pages());
 70	printk("Free swap  = %ldkB\n", K(get_nr_swap_pages()));
 71	printk("Total swap = %lukB\n", K(total_swap_pages));
 
 
 
 
 72}
 73
 74void *get_shadow_from_swap_cache(swp_entry_t entry)
 75{
 76	struct address_space *address_space = swap_address_space(entry);
 77	pgoff_t idx = swap_cache_index(entry);
 78	void *shadow;
 79
 80	shadow = xa_load(&address_space->i_pages, idx);
 81	if (xa_is_value(shadow))
 82		return shadow;
 83	return NULL;
 84}
 85
 86/*
 87 * add_to_swap_cache resembles filemap_add_folio on swapper_space,
 88 * but sets SwapCache flag and private instead of mapping and index.
 89 */
 90int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
 91			gfp_t gfp, void **shadowp)
 92{
 93	struct address_space *address_space = swap_address_space(entry);
 94	pgoff_t idx = swap_cache_index(entry);
 95	XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio));
 96	unsigned long i, nr = folio_nr_pages(folio);
 97	void *old;
 98
 99	xas_set_update(&xas, workingset_update_node);
 
 
100
101	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
102	VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
103	VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
104
105	folio_ref_add(folio, nr);
106	folio_set_swapcache(folio);
107	folio->swap = entry;
108
109	do {
110		xas_lock_irq(&xas);
111		xas_create_range(&xas);
112		if (xas_error(&xas))
113			goto unlock;
114		for (i = 0; i < nr; i++) {
115			VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio);
116			if (shadowp) {
117				old = xas_load(&xas);
118				if (xa_is_value(old))
119					*shadowp = old;
120			}
121			xas_store(&xas, folio);
 
122			xas_next(&xas);
123		}
124		address_space->nrpages += nr;
125		__node_stat_mod_folio(folio, NR_FILE_PAGES, nr);
126		__lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr);
 
127unlock:
128		xas_unlock_irq(&xas);
129	} while (xas_nomem(&xas, gfp));
130
131	if (!xas_error(&xas))
132		return 0;
133
134	folio_clear_swapcache(folio);
135	folio_ref_sub(folio, nr);
136	return xas_error(&xas);
137}
138
139/*
140 * This must be called only on folios that have
141 * been verified to be in the swap cache.
142 */
143void __delete_from_swap_cache(struct folio *folio,
144			swp_entry_t entry, void *shadow)
145{
146	struct address_space *address_space = swap_address_space(entry);
147	int i;
148	long nr = folio_nr_pages(folio);
149	pgoff_t idx = swap_cache_index(entry);
150	XA_STATE(xas, &address_space->i_pages, idx);
151
152	xas_set_update(&xas, workingset_update_node);
153
154	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
155	VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
156	VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
157
158	for (i = 0; i < nr; i++) {
159		void *entry = xas_store(&xas, shadow);
160		VM_BUG_ON_PAGE(entry != folio, entry);
 
161		xas_next(&xas);
162	}
163	folio->swap.val = 0;
164	folio_clear_swapcache(folio);
165	address_space->nrpages -= nr;
166	__node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
167	__lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
 
168}
169
170/**
171 * add_to_swap - allocate swap space for a folio
172 * @folio: folio we want to move to swap
173 *
174 * Allocate swap space for the folio and add the folio to the
175 * swap cache.
176 *
177 * Context: Caller needs to hold the folio lock.
178 * Return: Whether the folio was added to the swap cache.
179 */
180bool add_to_swap(struct folio *folio)
181{
182	swp_entry_t entry;
183	int err;
184
185	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
186	VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
187
188	entry = folio_alloc_swap(folio);
189	if (!entry.val)
190		return false;
191
192	/*
193	 * XArray node allocations from PF_MEMALLOC contexts could
194	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
195	 * stops emergency reserves from being allocated.
196	 *
197	 * TODO: this could cause a theoretical memory reclaim
198	 * deadlock in the swap out path.
199	 */
200	/*
201	 * Add it to the swap cache.
202	 */
203	err = add_to_swap_cache(folio, entry,
204			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
205	if (err)
206		/*
207		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
208		 * clear SWAP_HAS_CACHE flag.
209		 */
210		goto fail;
211	/*
212	 * Normally the folio will be dirtied in unmap because its
213	 * pte should be dirty. A special case is MADV_FREE page. The
214	 * page's pte could have dirty bit cleared but the folio's
215	 * SwapBacked flag is still set because clearing the dirty bit
216	 * and SwapBacked flag has no lock protected. For such folio,
217	 * unmap will not set dirty bit for it, so folio reclaim will
218	 * not write the folio out. This can cause data corruption when
219	 * the folio is swapped in later. Always setting the dirty flag
220	 * for the folio solves the problem.
221	 */
222	folio_mark_dirty(folio);
223
224	return true;
225
226fail:
227	put_swap_folio(folio, entry);
228	return false;
229}
230
231/*
232 * This must be called only on folios that have
233 * been verified to be in the swap cache and locked.
234 * It will never put the folio into the free list,
235 * the caller has a reference on the folio.
236 */
237void delete_from_swap_cache(struct folio *folio)
238{
239	swp_entry_t entry = folio->swap;
240	struct address_space *address_space = swap_address_space(entry);
241
242	xa_lock_irq(&address_space->i_pages);
243	__delete_from_swap_cache(folio, entry, NULL);
244	xa_unlock_irq(&address_space->i_pages);
245
246	put_swap_folio(folio, entry);
247	folio_ref_sub(folio, folio_nr_pages(folio));
248}
249
250void clear_shadow_from_swap_cache(int type, unsigned long begin,
251				unsigned long end)
252{
253	unsigned long curr = begin;
254	void *old;
255
256	for (;;) {
257		swp_entry_t entry = swp_entry(type, curr);
258		unsigned long index = curr & SWAP_ADDRESS_SPACE_MASK;
259		struct address_space *address_space = swap_address_space(entry);
260		XA_STATE(xas, &address_space->i_pages, index);
261
262		xas_set_update(&xas, workingset_update_node);
263
264		xa_lock_irq(&address_space->i_pages);
265		xas_for_each(&xas, old, min(index + (end - curr), SWAP_ADDRESS_SPACE_PAGES)) {
266			if (!xa_is_value(old))
267				continue;
268			xas_store(&xas, NULL);
269		}
270		xa_unlock_irq(&address_space->i_pages);
271
272		/* search the next swapcache until we meet end */
273		curr >>= SWAP_ADDRESS_SPACE_SHIFT;
274		curr++;
275		curr <<= SWAP_ADDRESS_SPACE_SHIFT;
276		if (curr > end)
277			break;
278	}
279}
280
281/*
282 * If we are the only user, then try to free up the swap cache.
283 *
284 * Its ok to check the swapcache flag without the folio lock
285 * here because we are going to recheck again inside
286 * folio_free_swap() _with_ the lock.
287 * 					- Marcelo
288 */
289void free_swap_cache(struct folio *folio)
290{
291	if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
292	    folio_trylock(folio)) {
293		folio_free_swap(folio);
294		folio_unlock(folio);
295	}
296}
297
298/*
299 * Perform a free_page(), also freeing any swap cache associated with
300 * this page if it is the last user of the page.
301 */
302void free_page_and_swap_cache(struct page *page)
303{
304	struct folio *folio = page_folio(page);
305
306	free_swap_cache(folio);
307	if (!is_huge_zero_folio(folio))
308		folio_put(folio);
309}
310
311/*
312 * Passed an array of pages, drop them all from swapcache and then release
313 * them.  They are removed from the LRU and freed if this is their last use.
314 */
315void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
316{
317	struct folio_batch folios;
318	unsigned int refs[PAGEVEC_SIZE];
319
320	lru_add_drain();
321	folio_batch_init(&folios);
322	for (int i = 0; i < nr; i++) {
323		struct folio *folio = page_folio(encoded_page_ptr(pages[i]));
324
325		free_swap_cache(folio);
326		refs[folios.nr] = 1;
327		if (unlikely(encoded_page_flags(pages[i]) &
328			     ENCODED_PAGE_BIT_NR_PAGES_NEXT))
329			refs[folios.nr] = encoded_nr_pages(pages[++i]);
330
331		if (folio_batch_add(&folios, folio) == 0)
332			folios_put_refs(&folios, refs);
333	}
334	if (folios.nr)
335		folios_put_refs(&folios, refs);
336}
337
338static inline bool swap_use_vma_readahead(void)
339{
340	return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
341}
342
343/*
344 * Lookup a swap entry in the swap cache. A found folio will be returned
345 * unlocked and with its refcount incremented - we rely on the kernel
346 * lock getting page table operations atomic even if we drop the folio
347 * lock before returning.
348 *
349 * Caller must lock the swap device or hold a reference to keep it valid.
350 */
351struct folio *swap_cache_get_folio(swp_entry_t entry,
352		struct vm_area_struct *vma, unsigned long addr)
353{
354	struct folio *folio;
 
355
356	folio = filemap_get_folio(swap_address_space(entry), swap_cache_index(entry));
357	if (!IS_ERR(folio)) {
 
 
 
 
 
 
358		bool vma_ra = swap_use_vma_readahead();
359		bool readahead;
360
 
361		/*
362		 * At the moment, we don't support PG_readahead for anon THP
363		 * so let's bail out rather than confusing the readahead stat.
364		 */
365		if (unlikely(folio_test_large(folio)))
366			return folio;
367
368		readahead = folio_test_clear_readahead(folio);
369		if (vma && vma_ra) {
370			unsigned long ra_val;
371			int win, hits;
372
373			ra_val = GET_SWAP_RA_VAL(vma);
374			win = SWAP_RA_WIN(ra_val);
375			hits = SWAP_RA_HITS(ra_val);
376			if (readahead)
377				hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
378			atomic_long_set(&vma->swap_readahead_info,
379					SWAP_RA_VAL(addr, win, hits));
380		}
381
382		if (readahead) {
383			count_vm_event(SWAP_RA_HIT);
384			if (!vma || !vma_ra)
385				atomic_inc(&swapin_readahead_hits);
386		}
387	} else {
388		folio = NULL;
389	}
390
391	return folio;
392}
393
394/**
395 * filemap_get_incore_folio - Find and get a folio from the page or swap caches.
396 * @mapping: The address_space to search.
397 * @index: The page cache index.
398 *
399 * This differs from filemap_get_folio() in that it will also look for the
400 * folio in the swap cache.
401 *
402 * Return: The found folio or %NULL.
403 */
404struct folio *filemap_get_incore_folio(struct address_space *mapping,
405		pgoff_t index)
406{
407	swp_entry_t swp;
408	struct swap_info_struct *si;
409	struct folio *folio = filemap_get_entry(mapping, index);
 
410
411	if (!folio)
412		return ERR_PTR(-ENOENT);
413	if (!xa_is_value(folio))
414		return folio;
415	if (!shmem_mapping(mapping))
416		return ERR_PTR(-ENOENT);
417
418	swp = radix_to_swp_entry(folio);
419	/* There might be swapin error entries in shmem mapping. */
420	if (non_swap_entry(swp))
421		return ERR_PTR(-ENOENT);
422	/* Prevent swapoff from happening to us */
423	si = get_swap_device(swp);
424	if (!si)
425		return ERR_PTR(-ENOENT);
426	index = swap_cache_index(swp);
427	folio = filemap_get_folio(swap_address_space(swp), index);
428	put_swap_device(si);
429	return folio;
430}
431
432struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
433		struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
434		bool skip_if_exists)
435{
436	struct swap_info_struct *si;
437	struct folio *folio;
438	struct folio *new_folio = NULL;
439	struct folio *result = NULL;
440	void *shadow = NULL;
441
442	*new_page_allocated = false;
443	si = get_swap_device(entry);
444	if (!si)
445		return NULL;
446
447	for (;;) {
448		int err;
449		/*
450		 * First check the swap cache.  Since this is normally
451		 * called after swap_cache_get_folio() failed, re-calling
452		 * that would confuse statistics.
453		 */
454		folio = filemap_get_folio(swap_address_space(entry),
455					  swap_cache_index(entry));
456		if (!IS_ERR(folio))
457			goto got_folio;
 
 
 
 
458
459		/*
460		 * Just skip read ahead for unused swap slot.
461		 * During swap_off when swap_slot_cache is disabled,
462		 * we have to handle the race between putting
463		 * swap entry in swap cache and marking swap slot
464		 * as SWAP_HAS_CACHE.  That's done in later part of code or
465		 * else swap_off will be aborted if we return NULL.
466		 */
467		if (!swap_swapcount(si, entry) && swap_slot_cache_enabled)
468			goto put_and_return;
469
470		/*
471		 * Get a new folio to read into from swap.  Allocate it now if
472		 * new_folio not exist, before marking swap_map SWAP_HAS_CACHE,
473		 * when -EEXIST will cause any racers to loop around until we
474		 * add it to cache.
475		 */
476		if (!new_folio) {
477			new_folio = folio_alloc_mpol(gfp_mask, 0, mpol, ilx, numa_node_id());
478			if (!new_folio)
479				goto put_and_return;
480		}
481
482		/*
483		 * Swap entry may have been freed since our caller observed it.
484		 */
485		err = swapcache_prepare(entry, 1);
486		if (!err)
487			break;
488		else if (err != -EEXIST)
489			goto put_and_return;
490
491		/*
492		 * Protect against a recursive call to __read_swap_cache_async()
493		 * on the same entry waiting forever here because SWAP_HAS_CACHE
494		 * is set but the folio is not the swap cache yet. This can
495		 * happen today if mem_cgroup_swapin_charge_folio() below
496		 * triggers reclaim through zswap, which may call
497		 * __read_swap_cache_async() in the writeback path.
498		 */
499		if (skip_if_exists)
500			goto put_and_return;
501
502		/*
503		 * We might race against __delete_from_swap_cache(), and
504		 * stumble across a swap_map entry whose SWAP_HAS_CACHE
505		 * has not yet been cleared.  Or race against another
506		 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
507		 * in swap_map, but not yet added its folio to swap cache.
508		 */
509		schedule_timeout_uninterruptible(1);
510	}
511
512	/*
513	 * The swap entry is ours to swap in. Prepare the new folio.
514	 */
515	__folio_set_locked(new_folio);
516	__folio_set_swapbacked(new_folio);
517
518	if (mem_cgroup_swapin_charge_folio(new_folio, NULL, gfp_mask, entry))
 
 
 
519		goto fail_unlock;
520
521	/* May fail (-ENOMEM) if XArray node allocation failed. */
522	if (add_to_swap_cache(new_folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
523		goto fail_unlock;
524
525	mem_cgroup_swapin_uncharge_swap(entry, 1);
526
527	if (shadow)
528		workingset_refault(new_folio, shadow);
529
530	/* Caller will initiate read into locked new_folio */
531	folio_add_lru(new_folio);
532	*new_page_allocated = true;
533	folio = new_folio;
534got_folio:
535	result = folio;
536	goto put_and_return;
537
538fail_unlock:
539	put_swap_folio(new_folio, entry);
540	folio_unlock(new_folio);
541put_and_return:
542	put_swap_device(si);
543	if (!(*new_page_allocated) && new_folio)
544		folio_put(new_folio);
545	return result;
546}
547
548/*
549 * Locate a page of swap in physical memory, reserving swap cache space
550 * and reading the disk if it is not already cached.
551 * A failure return means that either the page allocation failed or that
552 * the swap entry is no longer in use.
553 *
554 * get/put_swap_device() aren't needed to call this function, because
555 * __read_swap_cache_async() call them and swap_read_folio() holds the
556 * swap cache folio lock.
557 */
558struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
559		struct vm_area_struct *vma, unsigned long addr,
560		struct swap_iocb **plug)
561{
562	bool page_allocated;
563	struct mempolicy *mpol;
564	pgoff_t ilx;
565	struct folio *folio;
566
567	mpol = get_vma_policy(vma, addr, 0, &ilx);
568	folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
569					&page_allocated, false);
570	mpol_cond_put(mpol);
571
572	if (page_allocated)
573		swap_read_folio(folio, plug);
574	return folio;
575}
576
577static unsigned int __swapin_nr_pages(unsigned long prev_offset,
578				      unsigned long offset,
579				      int hits,
580				      int max_pages,
581				      int prev_win)
582{
583	unsigned int pages, last_ra;
584
585	/*
586	 * This heuristic has been found to work well on both sequential and
587	 * random loads, swapping to hard disk or to SSD: please don't ask
588	 * what the "+ 2" means, it just happens to work well, that's all.
589	 */
590	pages = hits + 2;
591	if (pages == 2) {
592		/*
593		 * We can have no readahead hits to judge by: but must not get
594		 * stuck here forever, so check for an adjacent offset instead
595		 * (and don't even bother to check whether swap type is same).
596		 */
597		if (offset != prev_offset + 1 && offset != prev_offset - 1)
598			pages = 1;
599	} else {
600		unsigned int roundup = 4;
601		while (roundup < pages)
602			roundup <<= 1;
603		pages = roundup;
604	}
605
606	if (pages > max_pages)
607		pages = max_pages;
608
609	/* Don't shrink readahead too fast */
610	last_ra = prev_win / 2;
611	if (pages < last_ra)
612		pages = last_ra;
613
614	return pages;
615}
616
617static unsigned long swapin_nr_pages(unsigned long offset)
618{
619	static unsigned long prev_offset;
620	unsigned int hits, pages, max_pages;
621	static atomic_t last_readahead_pages;
622
623	max_pages = 1 << READ_ONCE(page_cluster);
624	if (max_pages <= 1)
625		return 1;
626
627	hits = atomic_xchg(&swapin_readahead_hits, 0);
628	pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
629				  max_pages,
630				  atomic_read(&last_readahead_pages));
631	if (!hits)
632		WRITE_ONCE(prev_offset, offset);
633	atomic_set(&last_readahead_pages, pages);
634
635	return pages;
636}
637
638/**
639 * swap_cluster_readahead - swap in pages in hope we need them soon
640 * @entry: swap entry of this memory
641 * @gfp_mask: memory allocation flags
642 * @mpol: NUMA memory allocation policy to be applied
643 * @ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
644 *
645 * Returns the struct folio for entry and addr, after queueing swapin.
646 *
647 * Primitive swap readahead code. We simply read an aligned block of
648 * (1 << page_cluster) entries in the swap area. This method is chosen
649 * because it doesn't cost us any seek time.  We also make sure to queue
650 * the 'original' request together with the readahead ones...
651 *
652 * Note: it is intentional that the same NUMA policy and interleave index
653 * are used for every page of the readahead: neighbouring pages on swap
654 * are fairly likely to have been swapped out from the same node.
 
655 */
656struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
657				    struct mempolicy *mpol, pgoff_t ilx)
658{
659	struct folio *folio;
660	unsigned long entry_offset = swp_offset(entry);
661	unsigned long offset = entry_offset;
662	unsigned long start_offset, end_offset;
663	unsigned long mask;
664	struct swap_info_struct *si = swp_swap_info(entry);
665	struct blk_plug plug;
666	struct swap_iocb *splug = NULL;
667	bool page_allocated;
 
668
669	mask = swapin_nr_pages(offset) - 1;
670	if (!mask)
671		goto skip;
672
 
673	/* Read a page_cluster sized and aligned cluster around offset. */
674	start_offset = offset & ~mask;
675	end_offset = offset | mask;
676	if (!start_offset)	/* First page is swap header. */
677		start_offset++;
678	if (end_offset >= si->max)
679		end_offset = si->max - 1;
680
681	blk_start_plug(&plug);
682	for (offset = start_offset; offset <= end_offset ; offset++) {
683		/* Ok, do the async read-ahead now */
684		folio = __read_swap_cache_async(
685				swp_entry(swp_type(entry), offset),
686				gfp_mask, mpol, ilx, &page_allocated, false);
687		if (!folio)
688			continue;
689		if (page_allocated) {
690			swap_read_folio(folio, &splug);
691			if (offset != entry_offset) {
692				folio_set_readahead(folio);
693				count_vm_event(SWAP_RA);
694			}
695		}
696		folio_put(folio);
697	}
698	blk_finish_plug(&plug);
699	swap_read_unplug(splug);
700	lru_add_drain();	/* Push any new pages onto the LRU now */
701skip:
702	/* The page was likely read above, so no need for plugging here */
703	folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
704					&page_allocated, false);
705	if (unlikely(page_allocated))
706		swap_read_folio(folio, NULL);
707	return folio;
708}
709
710int init_swap_address_space(unsigned int type, unsigned long nr_pages)
711{
712	struct address_space *spaces, *space;
713	unsigned int i, nr;
714
715	nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
716	spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
717	if (!spaces)
718		return -ENOMEM;
719	for (i = 0; i < nr; i++) {
720		space = spaces + i;
721		xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
722		atomic_set(&space->i_mmap_writable, 0);
723		space->a_ops = &swap_aops;
724		/* swap cache doesn't use writeback related tags */
725		mapping_set_no_writeback_tags(space);
726	}
727	nr_swapper_spaces[type] = nr;
728	swapper_spaces[type] = spaces;
729
730	return 0;
731}
732
733void exit_swap_address_space(unsigned int type)
734{
735	int i;
736	struct address_space *spaces = swapper_spaces[type];
737
738	for (i = 0; i < nr_swapper_spaces[type]; i++)
739		VM_WARN_ON_ONCE(!mapping_empty(&spaces[i]));
740	kvfree(spaces);
741	nr_swapper_spaces[type] = 0;
742	swapper_spaces[type] = NULL;
743}
744
745static int swap_vma_ra_win(struct vm_fault *vmf, unsigned long *start,
746			   unsigned long *end)
 
 
 
 
 
 
 
 
 
 
 
 
 
747{
748	struct vm_area_struct *vma = vmf->vma;
749	unsigned long ra_val;
750	unsigned long faddr, prev_faddr, left, right;
751	unsigned int max_win, hits, prev_win, win;
 
 
 
 
 
752
753	max_win = 1 << min(READ_ONCE(page_cluster), SWAP_RA_ORDER_CEILING);
754	if (max_win == 1)
755		return 1;
 
 
 
756
757	faddr = vmf->address;
 
 
 
758	ra_val = GET_SWAP_RA_VAL(vma);
759	prev_faddr = SWAP_RA_ADDR(ra_val);
760	prev_win = SWAP_RA_WIN(ra_val);
761	hits = SWAP_RA_HITS(ra_val);
762	win = __swapin_nr_pages(PFN_DOWN(prev_faddr), PFN_DOWN(faddr), hits,
763				max_win, prev_win);
764	atomic_long_set(&vma->swap_readahead_info, SWAP_RA_VAL(faddr, win, 0));
765	if (win == 1)
766		return 1;
 
 
 
 
767
768	if (faddr == prev_faddr + PAGE_SIZE)
769		left = faddr;
770	else if (prev_faddr == faddr + PAGE_SIZE)
771		left = faddr - (win << PAGE_SHIFT) + PAGE_SIZE;
772	else
773		left = faddr - (((win - 1) / 2) << PAGE_SHIFT);
774	right = left + (win << PAGE_SHIFT);
775	if ((long)left < 0)
776		left = 0;
777	*start = max3(left, vma->vm_start, faddr & PMD_MASK);
778	*end = min3(right, vma->vm_end, (faddr & PMD_MASK) + PMD_SIZE);
779
780	return win;
 
 
 
 
 
 
 
 
 
781}
782
783/**
784 * swap_vma_readahead - swap in pages in hope we need them soon
785 * @targ_entry: swap entry of the targeted memory
786 * @gfp_mask: memory allocation flags
787 * @mpol: NUMA memory allocation policy to be applied
788 * @targ_ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
789 * @vmf: fault information
790 *
791 * Returns the struct folio for entry and addr, after queueing swapin.
792 *
793 * Primitive swap readahead code. We simply read in a few pages whose
794 * virtual addresses are around the fault address in the same vma.
795 *
796 * Caller must hold read mmap_lock if vmf->vma is not NULL.
797 *
798 */
799static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
800		struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf)
801{
802	struct blk_plug plug;
803	struct swap_iocb *splug = NULL;
804	struct folio *folio;
805	pte_t *pte = NULL, pentry;
806	int win;
807	unsigned long start, end, addr;
808	swp_entry_t entry;
809	pgoff_t ilx;
810	bool page_allocated;
 
 
 
811
812	win = swap_vma_ra_win(vmf, &start, &end);
813	if (win == 1)
814		goto skip;
815
816	ilx = targ_ilx - PFN_DOWN(vmf->address - start);
817
818	blk_start_plug(&plug);
819	for (addr = start; addr < end; ilx++, addr += PAGE_SIZE) {
820		if (!pte++) {
821			pte = pte_offset_map(vmf->pmd, addr);
822			if (!pte)
823				break;
824		}
825		pentry = ptep_get_lockless(pte);
826		if (!is_swap_pte(pentry))
827			continue;
828		entry = pte_to_swp_entry(pentry);
829		if (unlikely(non_swap_entry(entry)))
830			continue;
831		pte_unmap(pte);
832		pte = NULL;
833		folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
834						&page_allocated, false);
835		if (!folio)
836			continue;
837		if (page_allocated) {
838			swap_read_folio(folio, &splug);
839			if (addr != vmf->address) {
840				folio_set_readahead(folio);
841				count_vm_event(SWAP_RA);
842			}
843		}
844		folio_put(folio);
845	}
846	if (pte)
847		pte_unmap(pte);
848	blk_finish_plug(&plug);
849	swap_read_unplug(splug);
850	lru_add_drain();
851skip:
852	/* The folio was likely read above, so no need for plugging here */
853	folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
854					&page_allocated, false);
855	if (unlikely(page_allocated))
856		swap_read_folio(folio, NULL);
857	return folio;
858}
859
860/**
861 * swapin_readahead - swap in pages in hope we need them soon
862 * @entry: swap entry of this memory
863 * @gfp_mask: memory allocation flags
864 * @vmf: fault information
865 *
866 * Returns the struct folio for entry and addr, after queueing swapin.
867 *
868 * It's a main entry function for swap readahead. By the configuration,
869 * it will read ahead blocks by cluster-based(ie, physical disk based)
870 * or vma-based(ie, virtual address based on faulty address) readahead.
871 */
872struct folio *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
873				struct vm_fault *vmf)
874{
875	struct mempolicy *mpol;
876	pgoff_t ilx;
877	struct folio *folio;
878
879	mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx);
880	folio = swap_use_vma_readahead() ?
881		swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf) :
882		swap_cluster_readahead(entry, gfp_mask, mpol, ilx);
883	mpol_cond_put(mpol);
884
885	return folio;
886}
887
888#ifdef CONFIG_SYSFS
889static ssize_t vma_ra_enabled_show(struct kobject *kobj,
890				     struct kobj_attribute *attr, char *buf)
891{
892	return sysfs_emit(buf, "%s\n", str_true_false(enable_vma_readahead));
 
893}
894static ssize_t vma_ra_enabled_store(struct kobject *kobj,
895				      struct kobj_attribute *attr,
896				      const char *buf, size_t count)
897{
898	ssize_t ret;
899
900	ret = kstrtobool(buf, &enable_vma_readahead);
901	if (ret)
902		return ret;
 
903
904	return count;
905}
906static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled);
 
 
907
908static struct attribute *swap_attrs[] = {
909	&vma_ra_enabled_attr.attr,
910	NULL,
911};
912
913static const struct attribute_group swap_attr_group = {
914	.attrs = swap_attrs,
915};
916
917static int __init swap_init_sysfs(void)
918{
919	int err;
920	struct kobject *swap_kobj;
921
922	swap_kobj = kobject_create_and_add("swap", mm_kobj);
923	if (!swap_kobj) {
924		pr_err("failed to create swap kobject\n");
925		return -ENOMEM;
926	}
927	err = sysfs_create_group(swap_kobj, &swap_attr_group);
928	if (err) {
929		pr_err("failed to register swap group\n");
930		goto delete_obj;
931	}
932	return 0;
933
934delete_obj:
935	kobject_put(swap_kobj);
936	return err;
937}
938subsys_initcall(swap_init_sysfs);
939#endif