Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/mm/swap_state.c
  4 *
  5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  6 *  Swap reorganised 29.12.95, Stephen Tweedie
  7 *
  8 *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
  9 */
 10#include <linux/mm.h>
 11#include <linux/gfp.h>
 12#include <linux/kernel_stat.h>
 
 13#include <linux/swap.h>
 14#include <linux/swapops.h>
 15#include <linux/init.h>
 16#include <linux/pagemap.h>
 17#include <linux/backing-dev.h>
 18#include <linux/blkdev.h>
 19#include <linux/pagevec.h>
 20#include <linux/migrate.h>
 21#include <linux/vmalloc.h>
 22#include <linux/swap_slots.h>
 23#include <linux/huge_mm.h>
 24
 25#include <asm/pgtable.h>
 
 26
 27/*
 28 * swapper_space is a fiction, retained to simplify the path through
 29 * vmscan's shrink_page_list.
 30 */
 31static const struct address_space_operations swap_aops = {
 32	.writepage	= swap_writepage,
 33	.set_page_dirty	= swap_set_page_dirty,
 34#ifdef CONFIG_MIGRATION
 35	.migratepage	= migrate_page,
 36#endif
 37};
 38
 39struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
 40static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
 41static bool enable_vma_readahead __read_mostly = true;
 42
 43#define SWAP_RA_WIN_SHIFT	(PAGE_SHIFT / 2)
 44#define SWAP_RA_HITS_MASK	((1UL << SWAP_RA_WIN_SHIFT) - 1)
 45#define SWAP_RA_HITS_MAX	SWAP_RA_HITS_MASK
 46#define SWAP_RA_WIN_MASK	(~PAGE_MASK & ~SWAP_RA_HITS_MASK)
 47
 48#define SWAP_RA_HITS(v)		((v) & SWAP_RA_HITS_MASK)
 49#define SWAP_RA_WIN(v)		(((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
 50#define SWAP_RA_ADDR(v)		((v) & PAGE_MASK)
 51
 52#define SWAP_RA_VAL(addr, win, hits)				\
 53	(((addr) & PAGE_MASK) |					\
 54	 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) |	\
 55	 ((hits) & SWAP_RA_HITS_MASK))
 56
 57/* Initial readahead hits is 4 to start up with a small window */
 58#define GET_SWAP_RA_VAL(vma)					\
 59	(atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
 60
 61#define INC_CACHE_INFO(x)	do { swap_cache_info.x++; } while (0)
 62#define ADD_CACHE_INFO(x, nr)	do { swap_cache_info.x += (nr); } while (0)
 63
 64static struct {
 65	unsigned long add_total;
 66	unsigned long del_total;
 67	unsigned long find_success;
 68	unsigned long find_total;
 69} swap_cache_info;
 70
 71unsigned long total_swapcache_pages(void)
 72{
 73	unsigned int i, j, nr;
 74	unsigned long ret = 0;
 75	struct address_space *spaces;
 76
 77	rcu_read_lock();
 78	for (i = 0; i < MAX_SWAPFILES; i++) {
 79		/*
 80		 * The corresponding entries in nr_swapper_spaces and
 81		 * swapper_spaces will be reused only after at least
 82		 * one grace period.  So it is impossible for them
 83		 * belongs to different usage.
 84		 */
 85		nr = nr_swapper_spaces[i];
 86		spaces = rcu_dereference(swapper_spaces[i]);
 87		if (!nr || !spaces)
 88			continue;
 89		for (j = 0; j < nr; j++)
 90			ret += spaces[j].nrpages;
 91	}
 92	rcu_read_unlock();
 93	return ret;
 94}
 95
 96static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
 97
 98void show_swap_cache_info(void)
 99{
100	printk("%lu pages in swap cache\n", total_swapcache_pages());
101	printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
102		swap_cache_info.add_total, swap_cache_info.del_total,
103		swap_cache_info.find_success, swap_cache_info.find_total);
104	printk("Free swap  = %ldkB\n",
105		get_nr_swap_pages() << (PAGE_SHIFT - 10));
106	printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
 
 
 
 
 
 
 
 
107}
108
109/*
110 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
111 * but sets SwapCache flag and private instead of mapping and index.
112 */
113int __add_to_swap_cache(struct page *page, swp_entry_t entry)
 
114{
115	int error, i, nr = hpage_nr_pages(page);
116	struct address_space *address_space;
117	pgoff_t idx = swp_offset(entry);
 
 
 
 
 
 
 
 
 
 
 
 
 
118
119	VM_BUG_ON_PAGE(!PageLocked(page), page);
120	VM_BUG_ON_PAGE(PageSwapCache(page), page);
121	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
122
123	page_ref_add(page, nr);
124	SetPageSwapCache(page);
125
126	address_space = swap_address_space(entry);
127	xa_lock_irq(&address_space->i_pages);
128	for (i = 0; i < nr; i++) {
129		set_page_private(page + i, entry.val + i);
130		error = radix_tree_insert(&address_space->i_pages,
131					  idx + i, page + i);
132		if (unlikely(error))
133			break;
134	}
135	if (likely(!error)) {
136		address_space->nrpages += nr;
137		__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
138		ADD_CACHE_INFO(add_total, nr);
139	} else {
140		/*
141		 * Only the context which have set SWAP_HAS_CACHE flag
142		 * would call add_to_swap_cache().
143		 * So add_to_swap_cache() doesn't returns -EEXIST.
144		 */
145		VM_BUG_ON(error == -EEXIST);
146		set_page_private(page + i, 0UL);
147		while (i--) {
148			radix_tree_delete(&address_space->i_pages, idx + i);
149			set_page_private(page + i, 0UL);
150		}
151		ClearPageSwapCache(page);
152		page_ref_sub(page, nr);
153	}
154	xa_unlock_irq(&address_space->i_pages);
155
156	return error;
157}
158
159
160int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
161{
162	int error;
163
164	error = radix_tree_maybe_preload_order(gfp_mask, compound_order(page));
165	if (!error) {
166		error = __add_to_swap_cache(page, entry);
167		radix_tree_preload_end();
168	}
169	return error;
170}
171
172/*
173 * This must be called only on pages that have
174 * been verified to be in the swap cache.
175 */
176void __delete_from_swap_cache(struct page *page)
 
177{
178	struct address_space *address_space;
179	int i, nr = hpage_nr_pages(page);
180	swp_entry_t entry;
181	pgoff_t idx;
 
 
 
 
 
 
 
182
183	VM_BUG_ON_PAGE(!PageLocked(page), page);
184	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
185	VM_BUG_ON_PAGE(PageWriteback(page), page);
186
187	entry.val = page_private(page);
188	address_space = swap_address_space(entry);
189	idx = swp_offset(entry);
190	for (i = 0; i < nr; i++) {
191		radix_tree_delete(&address_space->i_pages, idx + i);
192		set_page_private(page + i, 0);
 
193	}
194	ClearPageSwapCache(page);
 
195	address_space->nrpages -= nr;
196	__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
197	ADD_CACHE_INFO(del_total, nr);
198}
199
200/**
201 * add_to_swap - allocate swap space for a page
202 * @page: page we want to move to swap
 
 
 
203 *
204 * Allocate swap space for the page and add the page to the
205 * swap cache.  Caller needs to hold the page lock. 
206 */
207int add_to_swap(struct page *page)
208{
209	swp_entry_t entry;
210	int err;
211
212	VM_BUG_ON_PAGE(!PageLocked(page), page);
213	VM_BUG_ON_PAGE(!PageUptodate(page), page);
214
215	entry = get_swap_page(page);
216	if (!entry.val)
217		return 0;
218
219	if (mem_cgroup_try_charge_swap(page, entry))
220		goto fail;
221
222	/*
223	 * Radix-tree node allocations from PF_MEMALLOC contexts could
224	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
225	 * stops emergency reserves from being allocated.
226	 *
227	 * TODO: this could cause a theoretical memory reclaim
228	 * deadlock in the swap out path.
229	 */
230	/*
231	 * Add it to the swap cache.
232	 */
233	err = add_to_swap_cache(page, entry,
234			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
235	/* -ENOMEM radix-tree allocation failure */
236	if (err)
237		/*
238		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
239		 * clear SWAP_HAS_CACHE flag.
240		 */
241		goto fail;
242	/*
243	 * Normally the page will be dirtied in unmap because its pte should be
244	 * dirty. A special case is MADV_FREE page. The page'e pte could have
245	 * dirty bit cleared but the page's SwapBacked bit is still set because
246	 * clearing the dirty bit and SwapBacked bit has no lock protected. For
247	 * such page, unmap will not set dirty bit for it, so page reclaim will
248	 * not write the page out. This can cause data corruption when the page
249	 * is swap in later. Always setting the dirty bit for the page solves
250	 * the problem.
 
251	 */
252	set_page_dirty(page);
253
254	return 1;
255
256fail:
257	put_swap_page(page, entry);
258	return 0;
259}
260
261/*
262 * This must be called only on pages that have
263 * been verified to be in the swap cache and locked.
264 * It will never put the page into the free list,
265 * the caller has a reference on the page.
266 */
267void delete_from_swap_cache(struct page *page)
268{
269	swp_entry_t entry;
270	struct address_space *address_space;
271
272	entry.val = page_private(page);
273
274	address_space = swap_address_space(entry);
275	xa_lock_irq(&address_space->i_pages);
276	__delete_from_swap_cache(page);
277	xa_unlock_irq(&address_space->i_pages);
278
279	put_swap_page(page, entry);
280	page_ref_sub(page, hpage_nr_pages(page));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
281}
282
283/* 
284 * If we are the only user, then try to free up the swap cache. 
285 * 
286 * Its ok to check for PageSwapCache without the page lock
287 * here because we are going to recheck again inside
288 * try_to_free_swap() _with_ the lock.
289 * 					- Marcelo
290 */
291static inline void free_swap_cache(struct page *page)
292{
293	if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
294		try_to_free_swap(page);
295		unlock_page(page);
 
 
 
296	}
297}
298
299/* 
300 * Perform a free_page(), also freeing any swap cache associated with
301 * this page if it is the last user of the page.
302 */
303void free_page_and_swap_cache(struct page *page)
304{
305	free_swap_cache(page);
306	if (!is_huge_zero_page(page))
307		put_page(page);
308}
309
310/*
311 * Passed an array of pages, drop them all from swapcache and then release
312 * them.  They are removed from the LRU and freed if this is their last use.
313 */
314void free_pages_and_swap_cache(struct page **pages, int nr)
315{
316	struct page **pagep = pages;
317	int i;
318
319	lru_add_drain();
320	for (i = 0; i < nr; i++)
321		free_swap_cache(pagep[i]);
322	release_pages(pagep, nr);
323}
324
325static inline bool swap_use_vma_readahead(void)
326{
327	return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
328}
329
330/*
331 * Lookup a swap entry in the swap cache. A found page will be returned
332 * unlocked and with its refcount incremented - we rely on the kernel
333 * lock getting page table operations atomic even if we drop the page
334 * lock before returning.
 
 
335 */
336struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
337			       unsigned long addr)
338{
339	struct page *page;
340
341	page = find_get_page(swap_address_space(entry), swp_offset(entry));
342
343	INC_CACHE_INFO(find_total);
344	if (page) {
345		bool vma_ra = swap_use_vma_readahead();
346		bool readahead;
347
348		INC_CACHE_INFO(find_success);
349		/*
350		 * At the moment, we don't support PG_readahead for anon THP
351		 * so let's bail out rather than confusing the readahead stat.
352		 */
353		if (unlikely(PageTransCompound(page)))
354			return page;
355
356		readahead = TestClearPageReadahead(page);
357		if (vma && vma_ra) {
358			unsigned long ra_val;
359			int win, hits;
360
361			ra_val = GET_SWAP_RA_VAL(vma);
362			win = SWAP_RA_WIN(ra_val);
363			hits = SWAP_RA_HITS(ra_val);
364			if (readahead)
365				hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
366			atomic_long_set(&vma->swap_readahead_info,
367					SWAP_RA_VAL(addr, win, hits));
368		}
369
370		if (readahead) {
371			count_vm_event(SWAP_RA_HIT);
372			if (!vma || !vma_ra)
373				atomic_inc(&swapin_readahead_hits);
374		}
 
 
375	}
376
377	return page;
378}
379
380struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
381			struct vm_area_struct *vma, unsigned long addr,
382			bool *new_page_allocated)
 
 
 
 
 
 
 
 
 
383{
384	struct page *found_page, *new_page = NULL;
385	struct address_space *swapper_space = swap_address_space(entry);
386	int err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
387	*new_page_allocated = false;
 
 
 
388
389	do {
 
390		/*
391		 * First check the swap cache.  Since this is normally
392		 * called after lookup_swap_cache() failed, re-calling
393		 * that would confuse statistics.
394		 */
395		found_page = find_get_page(swapper_space, swp_offset(entry));
396		if (found_page)
397			break;
 
398
399		/*
400		 * Just skip read ahead for unused swap slot.
401		 * During swap_off when swap_slot_cache is disabled,
402		 * we have to handle the race between putting
403		 * swap entry in swap cache and marking swap slot
404		 * as SWAP_HAS_CACHE.  That's done in later part of code or
405		 * else swap_off will be aborted if we return NULL.
406		 */
407		if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
408			break;
409
410		/*
411		 * Get a new page to read into from swap.
412		 */
413		if (!new_page) {
414			new_page = alloc_page_vma(gfp_mask, vma, addr);
415			if (!new_page)
416				break;		/* Out of memory */
417		}
 
418
419		/*
420		 * call radix_tree_preload() while we can wait.
421		 */
422		err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
423		if (err)
424			break;
425
 
 
 
 
426		/*
427		 * Swap entry may have been freed since our caller observed it.
 
 
 
 
 
428		 */
429		err = swapcache_prepare(entry);
430		if (err == -EEXIST) {
431			radix_tree_preload_end();
432			/*
433			 * We might race against get_swap_page() and stumble
434			 * across a SWAP_HAS_CACHE swap_map entry whose page
435			 * has not been brought into the swapcache yet.
436			 */
437			cond_resched();
438			continue;
439		}
440		if (err) {		/* swp entry is obsolete ? */
441			radix_tree_preload_end();
442			break;
443		}
444
445		/* May fail (-ENOMEM) if radix-tree node allocation failed. */
446		__SetPageLocked(new_page);
447		__SetPageSwapBacked(new_page);
448		err = __add_to_swap_cache(new_page, entry);
449		if (likely(!err)) {
450			radix_tree_preload_end();
451			/*
452			 * Initiate read into locked page and return.
453			 */
454			lru_cache_add_anon(new_page);
455			*new_page_allocated = true;
456			return new_page;
457		}
458		radix_tree_preload_end();
459		__ClearPageLocked(new_page);
460		/*
461		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
462		 * clear SWAP_HAS_CACHE flag.
 
 
 
463		 */
464		put_swap_page(new_page, entry);
465	} while (err != -ENOMEM);
 
 
 
 
 
 
 
466
467	if (new_page)
468		put_page(new_page);
469	return found_page;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
470}
471
472/*
473 * Locate a page of swap in physical memory, reserving swap cache space
474 * and reading the disk if it is not already cached.
475 * A failure return means that either the page allocation failed or that
476 * the swap entry is no longer in use.
477 */
478struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
479		struct vm_area_struct *vma, unsigned long addr, bool do_poll)
 
 
 
 
 
480{
481	bool page_was_allocated;
482	struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
483			vma, addr, &page_was_allocated);
484
485	if (page_was_allocated)
486		swap_readpage(retpage, do_poll);
487
488	return retpage;
 
 
 
 
 
489}
490
491static unsigned int __swapin_nr_pages(unsigned long prev_offset,
492				      unsigned long offset,
493				      int hits,
494				      int max_pages,
495				      int prev_win)
496{
497	unsigned int pages, last_ra;
498
499	/*
500	 * This heuristic has been found to work well on both sequential and
501	 * random loads, swapping to hard disk or to SSD: please don't ask
502	 * what the "+ 2" means, it just happens to work well, that's all.
503	 */
504	pages = hits + 2;
505	if (pages == 2) {
506		/*
507		 * We can have no readahead hits to judge by: but must not get
508		 * stuck here forever, so check for an adjacent offset instead
509		 * (and don't even bother to check whether swap type is same).
510		 */
511		if (offset != prev_offset + 1 && offset != prev_offset - 1)
512			pages = 1;
513	} else {
514		unsigned int roundup = 4;
515		while (roundup < pages)
516			roundup <<= 1;
517		pages = roundup;
518	}
519
520	if (pages > max_pages)
521		pages = max_pages;
522
523	/* Don't shrink readahead too fast */
524	last_ra = prev_win / 2;
525	if (pages < last_ra)
526		pages = last_ra;
527
528	return pages;
529}
530
531static unsigned long swapin_nr_pages(unsigned long offset)
532{
533	static unsigned long prev_offset;
534	unsigned int hits, pages, max_pages;
535	static atomic_t last_readahead_pages;
536
537	max_pages = 1 << READ_ONCE(page_cluster);
538	if (max_pages <= 1)
539		return 1;
540
541	hits = atomic_xchg(&swapin_readahead_hits, 0);
542	pages = __swapin_nr_pages(prev_offset, offset, hits, max_pages,
 
543				  atomic_read(&last_readahead_pages));
544	if (!hits)
545		prev_offset = offset;
546	atomic_set(&last_readahead_pages, pages);
547
548	return pages;
549}
550
551/**
552 * swap_cluster_readahead - swap in pages in hope we need them soon
553 * @entry: swap entry of this memory
554 * @gfp_mask: memory allocation flags
555 * @vmf: fault information
 
556 *
557 * Returns the struct page for entry and addr, after queueing swapin.
558 *
559 * Primitive swap readahead code. We simply read an aligned block of
560 * (1 << page_cluster) entries in the swap area. This method is chosen
561 * because it doesn't cost us any seek time.  We also make sure to queue
562 * the 'original' request together with the readahead ones...
563 *
564 * This has been extended to use the NUMA policies from the mm triggering
565 * the readahead.
566 *
567 * Caller must hold down_read on the vma->vm_mm if vmf->vma is not NULL.
568 */
569struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
570				struct vm_fault *vmf)
571{
572	struct page *page;
573	unsigned long entry_offset = swp_offset(entry);
574	unsigned long offset = entry_offset;
575	unsigned long start_offset, end_offset;
576	unsigned long mask;
577	struct swap_info_struct *si = swp_swap_info(entry);
578	struct blk_plug plug;
579	bool do_poll = true, page_allocated;
580	struct vm_area_struct *vma = vmf->vma;
581	unsigned long addr = vmf->address;
582
583	mask = swapin_nr_pages(offset) - 1;
584	if (!mask)
585		goto skip;
586
587	do_poll = false;
588	/* Read a page_cluster sized and aligned cluster around offset. */
589	start_offset = offset & ~mask;
590	end_offset = offset | mask;
591	if (!start_offset)	/* First page is swap header. */
592		start_offset++;
593	if (end_offset >= si->max)
594		end_offset = si->max - 1;
595
596	blk_start_plug(&plug);
597	for (offset = start_offset; offset <= end_offset ; offset++) {
598		/* Ok, do the async read-ahead now */
599		page = __read_swap_cache_async(
600			swp_entry(swp_type(entry), offset),
601			gfp_mask, vma, addr, &page_allocated);
602		if (!page)
603			continue;
604		if (page_allocated) {
605			swap_readpage(page, false);
606			if (offset != entry_offset) {
607				SetPageReadahead(page);
608				count_vm_event(SWAP_RA);
609			}
610		}
611		put_page(page);
612	}
613	blk_finish_plug(&plug);
614
615	lru_add_drain();	/* Push any new pages onto the LRU now */
616skip:
617	return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll);
 
 
 
 
 
 
 
618}
619
620int init_swap_address_space(unsigned int type, unsigned long nr_pages)
621{
622	struct address_space *spaces, *space;
623	unsigned int i, nr;
624
625	nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
626	spaces = kvzalloc(sizeof(struct address_space) * nr, GFP_KERNEL);
627	if (!spaces)
628		return -ENOMEM;
629	for (i = 0; i < nr; i++) {
630		space = spaces + i;
631		INIT_RADIX_TREE(&space->i_pages, GFP_ATOMIC|__GFP_NOWARN);
632		atomic_set(&space->i_mmap_writable, 0);
633		space->a_ops = &swap_aops;
634		/* swap cache doesn't use writeback related tags */
635		mapping_set_no_writeback_tags(space);
636	}
637	nr_swapper_spaces[type] = nr;
638	rcu_assign_pointer(swapper_spaces[type], spaces);
639
640	return 0;
641}
642
643void exit_swap_address_space(unsigned int type)
644{
645	struct address_space *spaces;
 
646
647	spaces = swapper_spaces[type];
648	nr_swapper_spaces[type] = 0;
649	rcu_assign_pointer(swapper_spaces[type], NULL);
650	synchronize_rcu();
651	kvfree(spaces);
 
 
652}
653
654static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
655				     unsigned long faddr,
656				     unsigned long lpfn,
657				     unsigned long rpfn,
658				     unsigned long *start,
659				     unsigned long *end)
660{
661	*start = max3(lpfn, PFN_DOWN(vma->vm_start),
662		      PFN_DOWN(faddr & PMD_MASK));
663	*end = min3(rpfn, PFN_DOWN(vma->vm_end),
664		    PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
665}
666
667static void swap_ra_info(struct vm_fault *vmf,
668			struct vma_swap_readahead *ra_info)
669{
670	struct vm_area_struct *vma = vmf->vma;
671	unsigned long ra_val;
672	swp_entry_t entry;
673	unsigned long faddr, pfn, fpfn;
674	unsigned long start, end;
675	pte_t *pte, *orig_pte;
676	unsigned int max_win, hits, prev_win, win, left;
677#ifndef CONFIG_64BIT
678	pte_t *tpte;
679#endif
680
681	max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
682			     SWAP_RA_ORDER_CEILING);
683	if (max_win == 1) {
684		ra_info->win = 1;
685		return;
686	}
687
688	faddr = vmf->address;
689	orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
690	entry = pte_to_swp_entry(*pte);
691	if ((unlikely(non_swap_entry(entry)))) {
692		pte_unmap(orig_pte);
693		return;
694	}
695
696	fpfn = PFN_DOWN(faddr);
697	ra_val = GET_SWAP_RA_VAL(vma);
698	pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
699	prev_win = SWAP_RA_WIN(ra_val);
700	hits = SWAP_RA_HITS(ra_val);
701	ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
702					       max_win, prev_win);
703	atomic_long_set(&vma->swap_readahead_info,
704			SWAP_RA_VAL(faddr, win, 0));
705
706	if (win == 1) {
707		pte_unmap(orig_pte);
708		return;
709	}
710
711	/* Copy the PTEs because the page table may be unmapped */
712	if (fpfn == pfn + 1)
713		swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end);
714	else if (pfn == fpfn + 1)
715		swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1,
716				  &start, &end);
717	else {
718		left = (win - 1) / 2;
719		swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
720				  &start, &end);
 
721	}
 
 
 
 
 
722	ra_info->nr_pte = end - start;
723	ra_info->offset = fpfn - start;
724	pte -= ra_info->offset;
725#ifdef CONFIG_64BIT
726	ra_info->ptes = pte;
727#else
728	tpte = ra_info->ptes;
729	for (pfn = start; pfn != end; pfn++)
730		*tpte++ = *pte++;
731#endif
732	pte_unmap(orig_pte);
733}
734
735static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
736				       struct vm_fault *vmf)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
737{
738	struct blk_plug plug;
739	struct vm_area_struct *vma = vmf->vma;
740	struct page *page;
741	pte_t *pte, pentry;
 
742	swp_entry_t entry;
 
743	unsigned int i;
744	bool page_allocated;
745	struct vma_swap_readahead ra_info = {0,};
 
 
746
747	swap_ra_info(vmf, &ra_info);
748	if (ra_info.win == 1)
749		goto skip;
750
 
 
 
751	blk_start_plug(&plug);
752	for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
753	     i++, pte++) {
754		pentry = *pte;
755		if (pte_none(pentry))
756			continue;
757		if (pte_present(pentry))
 
 
758			continue;
759		entry = pte_to_swp_entry(pentry);
760		if (unlikely(non_swap_entry(entry)))
761			continue;
762		page = __read_swap_cache_async(entry, gfp_mask, vma,
763					       vmf->address, &page_allocated);
764		if (!page)
 
 
765			continue;
766		if (page_allocated) {
767			swap_readpage(page, false);
768			if (i != ra_info.offset) {
769				SetPageReadahead(page);
770				count_vm_event(SWAP_RA);
771			}
772		}
773		put_page(page);
774	}
 
 
775	blk_finish_plug(&plug);
 
776	lru_add_drain();
777skip:
778	return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
779				     ra_info.win == 1);
 
 
 
 
 
 
780}
781
782/**
783 * swapin_readahead - swap in pages in hope we need them soon
784 * @entry: swap entry of this memory
785 * @gfp_mask: memory allocation flags
786 * @vmf: fault information
787 *
788 * Returns the struct page for entry and addr, after queueing swapin.
789 *
790 * It's a main entry function for swap readahead. By the configuration,
791 * it will read ahead blocks by cluster-based(ie, physical disk based)
792 * or vma-based(ie, virtual address based on faulty address) readahead.
793 */
794struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
795				struct vm_fault *vmf)
796{
797	return swap_use_vma_readahead() ?
798			swap_vma_readahead(entry, gfp_mask, vmf) :
799			swap_cluster_readahead(entry, gfp_mask, vmf);
 
 
 
 
 
 
 
 
 
 
800}
801
802#ifdef CONFIG_SYSFS
803static ssize_t vma_ra_enabled_show(struct kobject *kobj,
804				     struct kobj_attribute *attr, char *buf)
805{
806	return sprintf(buf, "%s\n", enable_vma_readahead ? "true" : "false");
 
807}
808static ssize_t vma_ra_enabled_store(struct kobject *kobj,
809				      struct kobj_attribute *attr,
810				      const char *buf, size_t count)
811{
812	if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
813		enable_vma_readahead = true;
814	else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
815		enable_vma_readahead = false;
816	else
817		return -EINVAL;
818
819	return count;
820}
821static struct kobj_attribute vma_ra_enabled_attr =
822	__ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show,
823	       vma_ra_enabled_store);
824
825static struct attribute *swap_attrs[] = {
826	&vma_ra_enabled_attr.attr,
827	NULL,
828};
829
830static struct attribute_group swap_attr_group = {
831	.attrs = swap_attrs,
832};
833
834static int __init swap_init_sysfs(void)
835{
836	int err;
837	struct kobject *swap_kobj;
838
839	swap_kobj = kobject_create_and_add("swap", mm_kobj);
840	if (!swap_kobj) {
841		pr_err("failed to create swap kobject\n");
842		return -ENOMEM;
843	}
844	err = sysfs_create_group(swap_kobj, &swap_attr_group);
845	if (err) {
846		pr_err("failed to register swap group\n");
847		goto delete_obj;
848	}
849	return 0;
850
851delete_obj:
852	kobject_put(swap_kobj);
853	return err;
854}
855subsys_initcall(swap_init_sysfs);
856#endif
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/mm/swap_state.c
  4 *
  5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  6 *  Swap reorganised 29.12.95, Stephen Tweedie
  7 *
  8 *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
  9 */
 10#include <linux/mm.h>
 11#include <linux/gfp.h>
 12#include <linux/kernel_stat.h>
 13#include <linux/mempolicy.h>
 14#include <linux/swap.h>
 15#include <linux/swapops.h>
 16#include <linux/init.h>
 17#include <linux/pagemap.h>
 18#include <linux/backing-dev.h>
 19#include <linux/blkdev.h>
 
 20#include <linux/migrate.h>
 21#include <linux/vmalloc.h>
 22#include <linux/swap_slots.h>
 23#include <linux/huge_mm.h>
 24#include <linux/shmem_fs.h>
 25#include "internal.h"
 26#include "swap.h"
 27
 28/*
 29 * swapper_space is a fiction, retained to simplify the path through
 30 * vmscan's shrink_page_list.
 31 */
 32static const struct address_space_operations swap_aops = {
 33	.writepage	= swap_writepage,
 34	.dirty_folio	= noop_dirty_folio,
 35#ifdef CONFIG_MIGRATION
 36	.migrate_folio	= migrate_folio,
 37#endif
 38};
 39
 40struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
 41static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
 42static bool enable_vma_readahead __read_mostly = true;
 43
 44#define SWAP_RA_WIN_SHIFT	(PAGE_SHIFT / 2)
 45#define SWAP_RA_HITS_MASK	((1UL << SWAP_RA_WIN_SHIFT) - 1)
 46#define SWAP_RA_HITS_MAX	SWAP_RA_HITS_MASK
 47#define SWAP_RA_WIN_MASK	(~PAGE_MASK & ~SWAP_RA_HITS_MASK)
 48
 49#define SWAP_RA_HITS(v)		((v) & SWAP_RA_HITS_MASK)
 50#define SWAP_RA_WIN(v)		(((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
 51#define SWAP_RA_ADDR(v)		((v) & PAGE_MASK)
 52
 53#define SWAP_RA_VAL(addr, win, hits)				\
 54	(((addr) & PAGE_MASK) |					\
 55	 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) |	\
 56	 ((hits) & SWAP_RA_HITS_MASK))
 57
 58/* Initial readahead hits is 4 to start up with a small window */
 59#define GET_SWAP_RA_VAL(vma)					\
 60	(atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
 61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 62static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
 63
 64void show_swap_cache_info(void)
 65{
 66	printk("%lu pages in swap cache\n", total_swapcache_pages());
 67	printk("Free swap  = %ldkB\n", K(get_nr_swap_pages()));
 68	printk("Total swap = %lukB\n", K(total_swap_pages));
 69}
 70
 71void *get_shadow_from_swap_cache(swp_entry_t entry)
 72{
 73	struct address_space *address_space = swap_address_space(entry);
 74	pgoff_t idx = swp_offset(entry);
 75	struct page *page;
 76
 77	page = xa_load(&address_space->i_pages, idx);
 78	if (xa_is_value(page))
 79		return page;
 80	return NULL;
 81}
 82
 83/*
 84 * add_to_swap_cache resembles filemap_add_folio on swapper_space,
 85 * but sets SwapCache flag and private instead of mapping and index.
 86 */
 87int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
 88			gfp_t gfp, void **shadowp)
 89{
 90	struct address_space *address_space = swap_address_space(entry);
 
 91	pgoff_t idx = swp_offset(entry);
 92	XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio));
 93	unsigned long i, nr = folio_nr_pages(folio);
 94	void *old;
 95
 96	xas_set_update(&xas, workingset_update_node);
 97
 98	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
 99	VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
100	VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
101
102	folio_ref_add(folio, nr);
103	folio_set_swapcache(folio);
104	folio->swap = entry;
105
106	do {
107		xas_lock_irq(&xas);
108		xas_create_range(&xas);
109		if (xas_error(&xas))
110			goto unlock;
111		for (i = 0; i < nr; i++) {
112			VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio);
113			if (shadowp) {
114				old = xas_load(&xas);
115				if (xa_is_value(old))
116					*shadowp = old;
117			}
118			xas_store(&xas, folio);
119			xas_next(&xas);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120		}
121		address_space->nrpages += nr;
122		__node_stat_mod_folio(folio, NR_FILE_PAGES, nr);
123		__lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr);
124unlock:
125		xas_unlock_irq(&xas);
126	} while (xas_nomem(&xas, gfp));
 
 
127
128	if (!xas_error(&xas))
129		return 0;
 
130
131	folio_clear_swapcache(folio);
132	folio_ref_sub(folio, nr);
133	return xas_error(&xas);
 
 
 
134}
135
136/*
137 * This must be called only on folios that have
138 * been verified to be in the swap cache.
139 */
140void __delete_from_swap_cache(struct folio *folio,
141			swp_entry_t entry, void *shadow)
142{
143	struct address_space *address_space = swap_address_space(entry);
144	int i;
145	long nr = folio_nr_pages(folio);
146	pgoff_t idx = swp_offset(entry);
147	XA_STATE(xas, &address_space->i_pages, idx);
148
149	xas_set_update(&xas, workingset_update_node);
150
151	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
152	VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
153	VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
154
 
 
 
 
 
 
 
155	for (i = 0; i < nr; i++) {
156		void *entry = xas_store(&xas, shadow);
157		VM_BUG_ON_PAGE(entry != folio, entry);
158		xas_next(&xas);
159	}
160	folio->swap.val = 0;
161	folio_clear_swapcache(folio);
162	address_space->nrpages -= nr;
163	__node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
164	__lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
165}
166
167/**
168 * add_to_swap - allocate swap space for a folio
169 * @folio: folio we want to move to swap
170 *
171 * Allocate swap space for the folio and add the folio to the
172 * swap cache.
173 *
174 * Context: Caller needs to hold the folio lock.
175 * Return: Whether the folio was added to the swap cache.
176 */
177bool add_to_swap(struct folio *folio)
178{
179	swp_entry_t entry;
180	int err;
181
182	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
183	VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
184
185	entry = folio_alloc_swap(folio);
186	if (!entry.val)
187		return false;
 
 
 
188
189	/*
190	 * XArray node allocations from PF_MEMALLOC contexts could
191	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
192	 * stops emergency reserves from being allocated.
193	 *
194	 * TODO: this could cause a theoretical memory reclaim
195	 * deadlock in the swap out path.
196	 */
197	/*
198	 * Add it to the swap cache.
199	 */
200	err = add_to_swap_cache(folio, entry,
201			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
 
202	if (err)
203		/*
204		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
205		 * clear SWAP_HAS_CACHE flag.
206		 */
207		goto fail;
208	/*
209	 * Normally the folio will be dirtied in unmap because its
210	 * pte should be dirty. A special case is MADV_FREE page. The
211	 * page's pte could have dirty bit cleared but the folio's
212	 * SwapBacked flag is still set because clearing the dirty bit
213	 * and SwapBacked flag has no lock protected. For such folio,
214	 * unmap will not set dirty bit for it, so folio reclaim will
215	 * not write the folio out. This can cause data corruption when
216	 * the folio is swapped in later. Always setting the dirty flag
217	 * for the folio solves the problem.
218	 */
219	folio_mark_dirty(folio);
220
221	return true;
222
223fail:
224	put_swap_folio(folio, entry);
225	return false;
226}
227
228/*
229 * This must be called only on folios that have
230 * been verified to be in the swap cache and locked.
231 * It will never put the folio into the free list,
232 * the caller has a reference on the folio.
233 */
234void delete_from_swap_cache(struct folio *folio)
235{
236	swp_entry_t entry = folio->swap;
237	struct address_space *address_space = swap_address_space(entry);
 
 
238
 
239	xa_lock_irq(&address_space->i_pages);
240	__delete_from_swap_cache(folio, entry, NULL);
241	xa_unlock_irq(&address_space->i_pages);
242
243	put_swap_folio(folio, entry);
244	folio_ref_sub(folio, folio_nr_pages(folio));
245}
246
247void clear_shadow_from_swap_cache(int type, unsigned long begin,
248				unsigned long end)
249{
250	unsigned long curr = begin;
251	void *old;
252
253	for (;;) {
254		swp_entry_t entry = swp_entry(type, curr);
255		struct address_space *address_space = swap_address_space(entry);
256		XA_STATE(xas, &address_space->i_pages, curr);
257
258		xas_set_update(&xas, workingset_update_node);
259
260		xa_lock_irq(&address_space->i_pages);
261		xas_for_each(&xas, old, end) {
262			if (!xa_is_value(old))
263				continue;
264			xas_store(&xas, NULL);
265		}
266		xa_unlock_irq(&address_space->i_pages);
267
268		/* search the next swapcache until we meet end */
269		curr >>= SWAP_ADDRESS_SPACE_SHIFT;
270		curr++;
271		curr <<= SWAP_ADDRESS_SPACE_SHIFT;
272		if (curr > end)
273			break;
274	}
275}
276
277/*
278 * If we are the only user, then try to free up the swap cache.
279 *
280 * Its ok to check the swapcache flag without the folio lock
281 * here because we are going to recheck again inside
282 * folio_free_swap() _with_ the lock.
283 * 					- Marcelo
284 */
285void free_swap_cache(struct page *page)
286{
287	struct folio *folio = page_folio(page);
288
289	if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
290	    folio_trylock(folio)) {
291		folio_free_swap(folio);
292		folio_unlock(folio);
293	}
294}
295
296/*
297 * Perform a free_page(), also freeing any swap cache associated with
298 * this page if it is the last user of the page.
299 */
300void free_page_and_swap_cache(struct page *page)
301{
302	free_swap_cache(page);
303	if (!is_huge_zero_page(page))
304		put_page(page);
305}
306
307/*
308 * Passed an array of pages, drop them all from swapcache and then release
309 * them.  They are removed from the LRU and freed if this is their last use.
310 */
311void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
312{
 
 
 
313	lru_add_drain();
314	for (int i = 0; i < nr; i++)
315		free_swap_cache(encoded_page_ptr(pages[i]));
316	release_pages(pages, nr);
317}
318
319static inline bool swap_use_vma_readahead(void)
320{
321	return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
322}
323
324/*
325 * Lookup a swap entry in the swap cache. A found folio will be returned
326 * unlocked and with its refcount incremented - we rely on the kernel
327 * lock getting page table operations atomic even if we drop the folio
328 * lock before returning.
329 *
330 * Caller must lock the swap device or hold a reference to keep it valid.
331 */
332struct folio *swap_cache_get_folio(swp_entry_t entry,
333		struct vm_area_struct *vma, unsigned long addr)
334{
335	struct folio *folio;
 
 
336
337	folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry));
338	if (!IS_ERR(folio)) {
339		bool vma_ra = swap_use_vma_readahead();
340		bool readahead;
341
 
342		/*
343		 * At the moment, we don't support PG_readahead for anon THP
344		 * so let's bail out rather than confusing the readahead stat.
345		 */
346		if (unlikely(folio_test_large(folio)))
347			return folio;
348
349		readahead = folio_test_clear_readahead(folio);
350		if (vma && vma_ra) {
351			unsigned long ra_val;
352			int win, hits;
353
354			ra_val = GET_SWAP_RA_VAL(vma);
355			win = SWAP_RA_WIN(ra_val);
356			hits = SWAP_RA_HITS(ra_val);
357			if (readahead)
358				hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
359			atomic_long_set(&vma->swap_readahead_info,
360					SWAP_RA_VAL(addr, win, hits));
361		}
362
363		if (readahead) {
364			count_vm_event(SWAP_RA_HIT);
365			if (!vma || !vma_ra)
366				atomic_inc(&swapin_readahead_hits);
367		}
368	} else {
369		folio = NULL;
370	}
371
372	return folio;
373}
374
375/**
376 * filemap_get_incore_folio - Find and get a folio from the page or swap caches.
377 * @mapping: The address_space to search.
378 * @index: The page cache index.
379 *
380 * This differs from filemap_get_folio() in that it will also look for the
381 * folio in the swap cache.
382 *
383 * Return: The found folio or %NULL.
384 */
385struct folio *filemap_get_incore_folio(struct address_space *mapping,
386		pgoff_t index)
387{
388	swp_entry_t swp;
389	struct swap_info_struct *si;
390	struct folio *folio = filemap_get_entry(mapping, index);
391
392	if (!folio)
393		return ERR_PTR(-ENOENT);
394	if (!xa_is_value(folio))
395		return folio;
396	if (!shmem_mapping(mapping))
397		return ERR_PTR(-ENOENT);
398
399	swp = radix_to_swp_entry(folio);
400	/* There might be swapin error entries in shmem mapping. */
401	if (non_swap_entry(swp))
402		return ERR_PTR(-ENOENT);
403	/* Prevent swapoff from happening to us */
404	si = get_swap_device(swp);
405	if (!si)
406		return ERR_PTR(-ENOENT);
407	index = swp_offset(swp);
408	folio = filemap_get_folio(swap_address_space(swp), index);
409	put_swap_device(si);
410	return folio;
411}
412
413struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
414		struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
415		bool skip_if_exists)
416{
417	struct swap_info_struct *si;
418	struct folio *folio;
419	void *shadow = NULL;
420
421	*new_page_allocated = false;
422	si = get_swap_device(entry);
423	if (!si)
424		return NULL;
425
426	for (;;) {
427		int err;
428		/*
429		 * First check the swap cache.  Since this is normally
430		 * called after swap_cache_get_folio() failed, re-calling
431		 * that would confuse statistics.
432		 */
433		folio = filemap_get_folio(swap_address_space(entry),
434						swp_offset(entry));
435		if (!IS_ERR(folio))
436			goto got_folio;
437
438		/*
439		 * Just skip read ahead for unused swap slot.
440		 * During swap_off when swap_slot_cache is disabled,
441		 * we have to handle the race between putting
442		 * swap entry in swap cache and marking swap slot
443		 * as SWAP_HAS_CACHE.  That's done in later part of code or
444		 * else swap_off will be aborted if we return NULL.
445		 */
446		if (!swap_swapcount(si, entry) && swap_slot_cache_enabled)
447			goto fail_put_swap;
448
449		/*
450		 * Get a new folio to read into from swap.  Allocate it now,
451		 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
452		 * cause any racers to loop around until we add it to cache.
453		 */
454		folio = (struct folio *)alloc_pages_mpol(gfp_mask, 0,
455						mpol, ilx, numa_node_id());
456		if (!folio)
457                        goto fail_put_swap;
458
459		/*
460		 * Swap entry may have been freed since our caller observed it.
461		 */
462		err = swapcache_prepare(entry);
463		if (!err)
464			break;
465
466		folio_put(folio);
467		if (err != -EEXIST)
468			goto fail_put_swap;
469
470		/*
471		 * Protect against a recursive call to __read_swap_cache_async()
472		 * on the same entry waiting forever here because SWAP_HAS_CACHE
473		 * is set but the folio is not the swap cache yet. This can
474		 * happen today if mem_cgroup_swapin_charge_folio() below
475		 * triggers reclaim through zswap, which may call
476		 * __read_swap_cache_async() in the writeback path.
477		 */
478		if (skip_if_exists)
479			goto fail_put_swap;
 
 
 
 
 
 
 
 
 
 
 
 
 
480
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
481		/*
482		 * We might race against __delete_from_swap_cache(), and
483		 * stumble across a swap_map entry whose SWAP_HAS_CACHE
484		 * has not yet been cleared.  Or race against another
485		 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
486		 * in swap_map, but not yet added its folio to swap cache.
487		 */
488		schedule_timeout_uninterruptible(1);
489	}
490
491	/*
492	 * The swap entry is ours to swap in. Prepare the new folio.
493	 */
494
495	__folio_set_locked(folio);
496	__folio_set_swapbacked(folio);
497
498	if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
499		goto fail_unlock;
500
501	/* May fail (-ENOMEM) if XArray node allocation failed. */
502	if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
503		goto fail_unlock;
504
505	mem_cgroup_swapin_uncharge_swap(entry);
506
507	if (shadow)
508		workingset_refault(folio, shadow);
509
510	/* Caller will initiate read into locked folio */
511	folio_add_lru(folio);
512	*new_page_allocated = true;
513got_folio:
514	put_swap_device(si);
515	return folio;
516
517fail_unlock:
518	put_swap_folio(folio, entry);
519	folio_unlock(folio);
520	folio_put(folio);
521fail_put_swap:
522	put_swap_device(si);
523	return NULL;
524}
525
526/*
527 * Locate a page of swap in physical memory, reserving swap cache space
528 * and reading the disk if it is not already cached.
529 * A failure return means that either the page allocation failed or that
530 * the swap entry is no longer in use.
531 *
532 * get/put_swap_device() aren't needed to call this function, because
533 * __read_swap_cache_async() call them and swap_read_folio() holds the
534 * swap cache folio lock.
535 */
536struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
537		struct vm_area_struct *vma, unsigned long addr,
538		struct swap_iocb **plug)
539{
540	bool page_allocated;
541	struct mempolicy *mpol;
542	pgoff_t ilx;
543	struct folio *folio;
544
545	mpol = get_vma_policy(vma, addr, 0, &ilx);
546	folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
547					&page_allocated, false);
548	mpol_cond_put(mpol);
549
550	if (page_allocated)
551		swap_read_folio(folio, false, plug);
552	return folio;
553}
554
555static unsigned int __swapin_nr_pages(unsigned long prev_offset,
556				      unsigned long offset,
557				      int hits,
558				      int max_pages,
559				      int prev_win)
560{
561	unsigned int pages, last_ra;
562
563	/*
564	 * This heuristic has been found to work well on both sequential and
565	 * random loads, swapping to hard disk or to SSD: please don't ask
566	 * what the "+ 2" means, it just happens to work well, that's all.
567	 */
568	pages = hits + 2;
569	if (pages == 2) {
570		/*
571		 * We can have no readahead hits to judge by: but must not get
572		 * stuck here forever, so check for an adjacent offset instead
573		 * (and don't even bother to check whether swap type is same).
574		 */
575		if (offset != prev_offset + 1 && offset != prev_offset - 1)
576			pages = 1;
577	} else {
578		unsigned int roundup = 4;
579		while (roundup < pages)
580			roundup <<= 1;
581		pages = roundup;
582	}
583
584	if (pages > max_pages)
585		pages = max_pages;
586
587	/* Don't shrink readahead too fast */
588	last_ra = prev_win / 2;
589	if (pages < last_ra)
590		pages = last_ra;
591
592	return pages;
593}
594
595static unsigned long swapin_nr_pages(unsigned long offset)
596{
597	static unsigned long prev_offset;
598	unsigned int hits, pages, max_pages;
599	static atomic_t last_readahead_pages;
600
601	max_pages = 1 << READ_ONCE(page_cluster);
602	if (max_pages <= 1)
603		return 1;
604
605	hits = atomic_xchg(&swapin_readahead_hits, 0);
606	pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
607				  max_pages,
608				  atomic_read(&last_readahead_pages));
609	if (!hits)
610		WRITE_ONCE(prev_offset, offset);
611	atomic_set(&last_readahead_pages, pages);
612
613	return pages;
614}
615
616/**
617 * swap_cluster_readahead - swap in pages in hope we need them soon
618 * @entry: swap entry of this memory
619 * @gfp_mask: memory allocation flags
620 * @mpol: NUMA memory allocation policy to be applied
621 * @ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
622 *
623 * Returns the struct folio for entry and addr, after queueing swapin.
624 *
625 * Primitive swap readahead code. We simply read an aligned block of
626 * (1 << page_cluster) entries in the swap area. This method is chosen
627 * because it doesn't cost us any seek time.  We also make sure to queue
628 * the 'original' request together with the readahead ones...
629 *
630 * Note: it is intentional that the same NUMA policy and interleave index
631 * are used for every page of the readahead: neighbouring pages on swap
632 * are fairly likely to have been swapped out from the same node.
 
633 */
634struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
635				    struct mempolicy *mpol, pgoff_t ilx)
636{
637	struct folio *folio;
638	unsigned long entry_offset = swp_offset(entry);
639	unsigned long offset = entry_offset;
640	unsigned long start_offset, end_offset;
641	unsigned long mask;
642	struct swap_info_struct *si = swp_swap_info(entry);
643	struct blk_plug plug;
644	struct swap_iocb *splug = NULL;
645	bool page_allocated;
 
646
647	mask = swapin_nr_pages(offset) - 1;
648	if (!mask)
649		goto skip;
650
 
651	/* Read a page_cluster sized and aligned cluster around offset. */
652	start_offset = offset & ~mask;
653	end_offset = offset | mask;
654	if (!start_offset)	/* First page is swap header. */
655		start_offset++;
656	if (end_offset >= si->max)
657		end_offset = si->max - 1;
658
659	blk_start_plug(&plug);
660	for (offset = start_offset; offset <= end_offset ; offset++) {
661		/* Ok, do the async read-ahead now */
662		folio = __read_swap_cache_async(
663				swp_entry(swp_type(entry), offset),
664				gfp_mask, mpol, ilx, &page_allocated, false);
665		if (!folio)
666			continue;
667		if (page_allocated) {
668			swap_read_folio(folio, false, &splug);
669			if (offset != entry_offset) {
670				folio_set_readahead(folio);
671				count_vm_event(SWAP_RA);
672			}
673		}
674		folio_put(folio);
675	}
676	blk_finish_plug(&plug);
677	swap_read_unplug(splug);
678	lru_add_drain();	/* Push any new pages onto the LRU now */
679skip:
680	/* The page was likely read above, so no need for plugging here */
681	folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
682					&page_allocated, false);
683	if (unlikely(page_allocated)) {
684		zswap_folio_swapin(folio);
685		swap_read_folio(folio, false, NULL);
686	}
687	return folio;
688}
689
690int init_swap_address_space(unsigned int type, unsigned long nr_pages)
691{
692	struct address_space *spaces, *space;
693	unsigned int i, nr;
694
695	nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
696	spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
697	if (!spaces)
698		return -ENOMEM;
699	for (i = 0; i < nr; i++) {
700		space = spaces + i;
701		xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
702		atomic_set(&space->i_mmap_writable, 0);
703		space->a_ops = &swap_aops;
704		/* swap cache doesn't use writeback related tags */
705		mapping_set_no_writeback_tags(space);
706	}
707	nr_swapper_spaces[type] = nr;
708	swapper_spaces[type] = spaces;
709
710	return 0;
711}
712
713void exit_swap_address_space(unsigned int type)
714{
715	int i;
716	struct address_space *spaces = swapper_spaces[type];
717
718	for (i = 0; i < nr_swapper_spaces[type]; i++)
719		VM_WARN_ON_ONCE(!mapping_empty(&spaces[i]));
 
 
720	kvfree(spaces);
721	nr_swapper_spaces[type] = 0;
722	swapper_spaces[type] = NULL;
723}
724
725#define SWAP_RA_ORDER_CEILING	5
726
727struct vma_swap_readahead {
728	unsigned short win;
729	unsigned short offset;
730	unsigned short nr_pte;
731};
 
 
 
 
 
732
733static void swap_ra_info(struct vm_fault *vmf,
734			 struct vma_swap_readahead *ra_info)
735{
736	struct vm_area_struct *vma = vmf->vma;
737	unsigned long ra_val;
738	unsigned long faddr, pfn, fpfn, lpfn, rpfn;
 
739	unsigned long start, end;
740	unsigned int max_win, hits, prev_win, win;
 
 
 
 
741
742	max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
743			     SWAP_RA_ORDER_CEILING);
744	if (max_win == 1) {
745		ra_info->win = 1;
746		return;
747	}
748
749	faddr = vmf->address;
 
 
 
 
 
 
 
750	fpfn = PFN_DOWN(faddr);
751	ra_val = GET_SWAP_RA_VAL(vma);
752	pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
753	prev_win = SWAP_RA_WIN(ra_val);
754	hits = SWAP_RA_HITS(ra_val);
755	ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
756					       max_win, prev_win);
757	atomic_long_set(&vma->swap_readahead_info,
758			SWAP_RA_VAL(faddr, win, 0));
759	if (win == 1)
 
 
760		return;
 
761
762	if (fpfn == pfn + 1) {
763		lpfn = fpfn;
764		rpfn = fpfn + win;
765	} else if (pfn == fpfn + 1) {
766		lpfn = fpfn - win + 1;
767		rpfn = fpfn + 1;
768	} else {
769		unsigned int left = (win - 1) / 2;
770
771		lpfn = fpfn - left;
772		rpfn = fpfn + win - left;
773	}
774	start = max3(lpfn, PFN_DOWN(vma->vm_start),
775		     PFN_DOWN(faddr & PMD_MASK));
776	end = min3(rpfn, PFN_DOWN(vma->vm_end),
777		   PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
778
779	ra_info->nr_pte = end - start;
780	ra_info->offset = fpfn - start;
 
 
 
 
 
 
 
 
 
781}
782
783/**
784 * swap_vma_readahead - swap in pages in hope we need them soon
785 * @targ_entry: swap entry of the targeted memory
786 * @gfp_mask: memory allocation flags
787 * @mpol: NUMA memory allocation policy to be applied
788 * @targ_ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
789 * @vmf: fault information
790 *
791 * Returns the struct folio for entry and addr, after queueing swapin.
792 *
793 * Primitive swap readahead code. We simply read in a few pages whose
794 * virtual addresses are around the fault address in the same vma.
795 *
796 * Caller must hold read mmap_lock if vmf->vma is not NULL.
797 *
798 */
799static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
800		struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf)
801{
802	struct blk_plug plug;
803	struct swap_iocb *splug = NULL;
804	struct folio *folio;
805	pte_t *pte = NULL, pentry;
806	unsigned long addr;
807	swp_entry_t entry;
808	pgoff_t ilx;
809	unsigned int i;
810	bool page_allocated;
811	struct vma_swap_readahead ra_info = {
812		.win = 1,
813	};
814
815	swap_ra_info(vmf, &ra_info);
816	if (ra_info.win == 1)
817		goto skip;
818
819	addr = vmf->address - (ra_info.offset * PAGE_SIZE);
820	ilx = targ_ilx - ra_info.offset;
821
822	blk_start_plug(&plug);
823	for (i = 0; i < ra_info.nr_pte; i++, ilx++, addr += PAGE_SIZE) {
824		if (!pte++) {
825			pte = pte_offset_map(vmf->pmd, addr);
826			if (!pte)
827				break;
828		}
829		pentry = ptep_get_lockless(pte);
830		if (!is_swap_pte(pentry))
831			continue;
832		entry = pte_to_swp_entry(pentry);
833		if (unlikely(non_swap_entry(entry)))
834			continue;
835		pte_unmap(pte);
836		pte = NULL;
837		folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
838						&page_allocated, false);
839		if (!folio)
840			continue;
841		if (page_allocated) {
842			swap_read_folio(folio, false, &splug);
843			if (i != ra_info.offset) {
844				folio_set_readahead(folio);
845				count_vm_event(SWAP_RA);
846			}
847		}
848		folio_put(folio);
849	}
850	if (pte)
851		pte_unmap(pte);
852	blk_finish_plug(&plug);
853	swap_read_unplug(splug);
854	lru_add_drain();
855skip:
856	/* The folio was likely read above, so no need for plugging here */
857	folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
858					&page_allocated, false);
859	if (unlikely(page_allocated)) {
860		zswap_folio_swapin(folio);
861		swap_read_folio(folio, false, NULL);
862	}
863	return folio;
864}
865
866/**
867 * swapin_readahead - swap in pages in hope we need them soon
868 * @entry: swap entry of this memory
869 * @gfp_mask: memory allocation flags
870 * @vmf: fault information
871 *
872 * Returns the struct page for entry and addr, after queueing swapin.
873 *
874 * It's a main entry function for swap readahead. By the configuration,
875 * it will read ahead blocks by cluster-based(ie, physical disk based)
876 * or vma-based(ie, virtual address based on faulty address) readahead.
877 */
878struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
879				struct vm_fault *vmf)
880{
881	struct mempolicy *mpol;
882	pgoff_t ilx;
883	struct folio *folio;
884
885	mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx);
886	folio = swap_use_vma_readahead() ?
887		swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf) :
888		swap_cluster_readahead(entry, gfp_mask, mpol, ilx);
889	mpol_cond_put(mpol);
890
891	if (!folio)
892		return NULL;
893	return folio_file_page(folio, swp_offset(entry));
894}
895
896#ifdef CONFIG_SYSFS
897static ssize_t vma_ra_enabled_show(struct kobject *kobj,
898				     struct kobj_attribute *attr, char *buf)
899{
900	return sysfs_emit(buf, "%s\n",
901			  enable_vma_readahead ? "true" : "false");
902}
903static ssize_t vma_ra_enabled_store(struct kobject *kobj,
904				      struct kobj_attribute *attr,
905				      const char *buf, size_t count)
906{
907	ssize_t ret;
908
909	ret = kstrtobool(buf, &enable_vma_readahead);
910	if (ret)
911		return ret;
 
912
913	return count;
914}
915static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled);
 
 
916
917static struct attribute *swap_attrs[] = {
918	&vma_ra_enabled_attr.attr,
919	NULL,
920};
921
922static const struct attribute_group swap_attr_group = {
923	.attrs = swap_attrs,
924};
925
926static int __init swap_init_sysfs(void)
927{
928	int err;
929	struct kobject *swap_kobj;
930
931	swap_kobj = kobject_create_and_add("swap", mm_kobj);
932	if (!swap_kobj) {
933		pr_err("failed to create swap kobject\n");
934		return -ENOMEM;
935	}
936	err = sysfs_create_group(swap_kobj, &swap_attr_group);
937	if (err) {
938		pr_err("failed to register swap group\n");
939		goto delete_obj;
940	}
941	return 0;
942
943delete_obj:
944	kobject_put(swap_kobj);
945	return err;
946}
947subsys_initcall(swap_init_sysfs);
948#endif