Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 *  linux/mm/swap_state.c
  3 *
  4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  5 *  Swap reorganised 29.12.95, Stephen Tweedie
  6 *
  7 *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
  8 */
  9#include <linux/mm.h>
 10#include <linux/gfp.h>
 11#include <linux/kernel_stat.h>
 12#include <linux/swap.h>
 13#include <linux/swapops.h>
 14#include <linux/init.h>
 15#include <linux/pagemap.h>
 16#include <linux/backing-dev.h>
 17#include <linux/blkdev.h>
 18#include <linux/pagevec.h>
 19#include <linux/migrate.h>
 20#include <linux/page_cgroup.h>
 21
 22#include <asm/pgtable.h>
 
 
 
 23
 24/*
 25 * swapper_space is a fiction, retained to simplify the path through
 26 * vmscan's shrink_page_list.
 27 */
 28static const struct address_space_operations swap_aops = {
 29	.writepage	= swap_writepage,
 30	.set_page_dirty	= swap_set_page_dirty,
 31	.migratepage	= migrate_page,
 32};
 33
 34static struct backing_dev_info swap_backing_dev_info = {
 35	.name		= "swap",
 36	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
 37};
 38
 39struct address_space swapper_spaces[MAX_SWAPFILES] = {
 40	[0 ... MAX_SWAPFILES - 1] = {
 41		.page_tree	= RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
 42		.a_ops		= &swap_aops,
 43		.backing_dev_info = &swap_backing_dev_info,
 44	}
 45};
 46
 47#define INC_CACHE_INFO(x)	do { swap_cache_info.x++; } while (0)
 48
 49static struct {
 50	unsigned long add_total;
 51	unsigned long del_total;
 52	unsigned long find_success;
 53	unsigned long find_total;
 54} swap_cache_info;
 55
 56unsigned long total_swapcache_pages(void)
 57{
 58	int i;
 59	unsigned long ret = 0;
 60
 61	for (i = 0; i < MAX_SWAPFILES; i++)
 62		ret += swapper_spaces[i].nrpages;
 63	return ret;
 64}
 65
 66static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
 67
 68void show_swap_cache_info(void)
 69{
 70	printk("%lu pages in swap cache\n", total_swapcache_pages());
 71	printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
 72		swap_cache_info.add_total, swap_cache_info.del_total,
 73		swap_cache_info.find_success, swap_cache_info.find_total);
 74	printk("Free swap  = %ldkB\n",
 75		get_nr_swap_pages() << (PAGE_SHIFT - 10));
 76	printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
 77}
 78
 
 
 
 
 
 
 
 
 
 
 
 
 79/*
 80 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
 81 * but sets SwapCache flag and private instead of mapping and index.
 82 */
 83int __add_to_swap_cache(struct page *page, swp_entry_t entry)
 
 84{
 85	int error;
 86	struct address_space *address_space;
 
 
 
 
 
 
 
 87
 88	VM_BUG_ON_PAGE(!PageLocked(page), page);
 89	VM_BUG_ON_PAGE(PageSwapCache(page), page);
 90	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
 91
 92	page_cache_get(page);
 93	SetPageSwapCache(page);
 94	set_page_private(page, entry.val);
 95
 96	address_space = swap_address_space(entry);
 97	spin_lock_irq(&address_space->tree_lock);
 98	error = radix_tree_insert(&address_space->page_tree,
 99					entry.val, page);
100	if (likely(!error)) {
101		address_space->nrpages++;
102		__inc_zone_page_state(page, NR_FILE_PAGES);
103		INC_CACHE_INFO(add_total);
104	}
105	spin_unlock_irq(&address_space->tree_lock);
106
107	if (unlikely(error)) {
108		/*
109		 * Only the context which have set SWAP_HAS_CACHE flag
110		 * would call add_to_swap_cache().
111		 * So add_to_swap_cache() doesn't returns -EEXIST.
112		 */
113		VM_BUG_ON(error == -EEXIST);
114		set_page_private(page, 0UL);
115		ClearPageSwapCache(page);
116		page_cache_release(page);
117	}
118
119	return error;
120}
121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
123int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
124{
125	int error;
126
127	error = radix_tree_maybe_preload(gfp_mask);
128	if (!error) {
129		error = __add_to_swap_cache(page, entry);
130		radix_tree_preload_end();
131	}
132	return error;
133}
134
135/*
136 * This must be called only on pages that have
137 * been verified to be in the swap cache.
138 */
139void __delete_from_swap_cache(struct page *page)
 
140{
141	swp_entry_t entry;
142	struct address_space *address_space;
143
144	VM_BUG_ON_PAGE(!PageLocked(page), page);
145	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
146	VM_BUG_ON_PAGE(PageWriteback(page), page);
147
148	entry.val = page_private(page);
149	address_space = swap_address_space(entry);
150	radix_tree_delete(&address_space->page_tree, page_private(page));
151	set_page_private(page, 0);
152	ClearPageSwapCache(page);
153	address_space->nrpages--;
154	__dec_zone_page_state(page, NR_FILE_PAGES);
155	INC_CACHE_INFO(del_total);
 
 
 
 
 
156}
157
158/**
159 * add_to_swap - allocate swap space for a page
160 * @page: page we want to move to swap
161 *
162 * Allocate swap space for the page and add the page to the
163 * swap cache.  Caller needs to hold the page lock. 
 
 
 
164 */
165int add_to_swap(struct page *page, struct list_head *list)
166{
167	swp_entry_t entry;
168	int err;
169
170	VM_BUG_ON_PAGE(!PageLocked(page), page);
171	VM_BUG_ON_PAGE(!PageUptodate(page), page);
172
173	entry = get_swap_page();
174	if (!entry.val)
175		return 0;
176
177	if (unlikely(PageTransHuge(page)))
178		if (unlikely(split_huge_page_to_list(page, list))) {
179			swapcache_free(entry, NULL);
180			return 0;
181		}
182
183	/*
184	 * Radix-tree node allocations from PF_MEMALLOC contexts could
185	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
186	 * stops emergency reserves from being allocated.
187	 *
188	 * TODO: this could cause a theoretical memory reclaim
189	 * deadlock in the swap out path.
190	 */
191	/*
192	 * Add it to the swap cache and mark it dirty
193	 */
194	err = add_to_swap_cache(page, entry,
195			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
196
197	if (!err) {	/* Success */
198		SetPageDirty(page);
199		return 1;
200	} else {	/* -ENOMEM radix-tree allocation failure */
201		/*
202		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
203		 * clear SWAP_HAS_CACHE flag.
204		 */
205		swapcache_free(entry, NULL);
206		return 0;
207	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208}
209
210/*
211 * This must be called only on pages that have
212 * been verified to be in the swap cache and locked.
213 * It will never put the page into the free list,
214 * the caller has a reference on the page.
215 */
216void delete_from_swap_cache(struct page *page)
217{
218	swp_entry_t entry;
219	struct address_space *address_space;
 
 
 
 
 
 
 
 
220
221	entry.val = page_private(page);
 
 
 
 
222
223	address_space = swap_address_space(entry);
224	spin_lock_irq(&address_space->tree_lock);
225	__delete_from_swap_cache(page);
226	spin_unlock_irq(&address_space->tree_lock);
227
228	swapcache_free(entry, page);
229	page_cache_release(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
230}
231
232/* 
233 * If we are the only user, then try to free up the swap cache. 
234 * 
235 * Its ok to check for PageSwapCache without the page lock
236 * here because we are going to recheck again inside
237 * try_to_free_swap() _with_ the lock.
238 * 					- Marcelo
239 */
240static inline void free_swap_cache(struct page *page)
241{
242	if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
243		try_to_free_swap(page);
244		unlock_page(page);
 
 
 
245	}
246}
247
248/* 
249 * Perform a free_page(), also freeing any swap cache associated with
250 * this page if it is the last user of the page.
251 */
252void free_page_and_swap_cache(struct page *page)
253{
254	free_swap_cache(page);
255	page_cache_release(page);
 
256}
257
258/*
259 * Passed an array of pages, drop them all from swapcache and then release
260 * them.  They are removed from the LRU and freed if this is their last use.
261 */
262void free_pages_and_swap_cache(struct page **pages, int nr)
263{
264	struct page **pagep = pages;
265
266	lru_add_drain();
267	while (nr) {
268		int todo = min(nr, PAGEVEC_SIZE);
269		int i;
270
271		for (i = 0; i < todo; i++)
272			free_swap_cache(pagep[i]);
273		release_pages(pagep, todo, 0);
274		pagep += todo;
275		nr -= todo;
276	}
277}
278
279/*
280 * Lookup a swap entry in the swap cache. A found page will be returned
281 * unlocked and with its refcount incremented - we rely on the kernel
282 * lock getting page table operations atomic even if we drop the page
283 * lock before returning.
284 */
285struct page * lookup_swap_cache(swp_entry_t entry)
 
286{
287	struct page *page;
 
288
289	page = find_get_page(swap_address_space(entry), entry.val);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
290
291	if (page) {
292		INC_CACHE_INFO(find_success);
293		if (TestClearPageReadahead(page))
294			atomic_inc(&swapin_readahead_hits);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
295	}
296
297	INC_CACHE_INFO(find_total);
298	return page;
299}
300
301/* 
302 * Locate a page of swap in physical memory, reserving swap cache space
303 * and reading the disk if it is not already cached.
304 * A failure return means that either the page allocation failed or that
305 * the swap entry is no longer in use.
 
 
 
 
306 */
307struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
308			struct vm_area_struct *vma, unsigned long addr)
309{
310	struct page *found_page, *new_page = NULL;
311	int err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312
313	do {
 
 
 
314		/*
315		 * First check the swap cache.  Since this is normally
316		 * called after lookup_swap_cache() failed, re-calling
317		 * that would confuse statistics.
318		 */
319		found_page = find_get_page(swap_address_space(entry),
320					entry.val);
321		if (found_page)
322			break;
 
 
 
 
323
324		/*
325		 * Get a new page to read into from swap.
 
 
 
 
 
326		 */
327		if (!new_page) {
328			new_page = alloc_page_vma(gfp_mask, vma, addr);
329			if (!new_page)
330				break;		/* Out of memory */
331		}
332
333		/*
334		 * call radix_tree_preload() while we can wait.
 
 
335		 */
336		err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
337		if (err)
338			break;
339
340		/*
341		 * Swap entry may have been freed since our caller observed it.
342		 */
343		err = swapcache_prepare(entry);
344		if (err == -EEXIST) {
345			radix_tree_preload_end();
346			/*
347			 * We might race against get_swap_page() and stumble
348			 * across a SWAP_HAS_CACHE swap_map entry whose page
349			 * has not been brought into the swapcache yet, while
350			 * the other end is scheduled away waiting on discard
351			 * I/O completion at scan_swap_map().
352			 *
353			 * In order to avoid turning this transitory state
354			 * into a permanent loop around this -EEXIST case
355			 * if !CONFIG_PREEMPT and the I/O completion happens
356			 * to be waiting on the CPU waitqueue where we are now
357			 * busy looping, we just conditionally invoke the
358			 * scheduler here, if there are some more important
359			 * tasks to run.
360			 */
361			cond_resched();
362			continue;
363		}
364		if (err) {		/* swp entry is obsolete ? */
365			radix_tree_preload_end();
366			break;
367		}
368
369		/* May fail (-ENOMEM) if radix-tree node allocation failed. */
370		__set_page_locked(new_page);
371		SetPageSwapBacked(new_page);
372		err = __add_to_swap_cache(new_page, entry);
373		if (likely(!err)) {
374			radix_tree_preload_end();
375			/*
376			 * Initiate read into locked page and return.
377			 */
378			lru_cache_add_anon(new_page);
379			swap_readpage(new_page);
380			return new_page;
381		}
382		radix_tree_preload_end();
383		ClearPageSwapBacked(new_page);
384		__clear_page_locked(new_page);
385		/*
386		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
387		 * clear SWAP_HAS_CACHE flag.
 
 
 
388		 */
389		swapcache_free(entry, NULL);
390	} while (err != -ENOMEM);
 
 
 
 
 
 
 
 
 
 
391
392	if (new_page)
393		page_cache_release(new_page);
394	return found_page;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
395}
396
397static unsigned long swapin_nr_pages(unsigned long offset)
 
 
 
 
 
 
 
 
 
398{
399	static unsigned long prev_offset;
400	unsigned int pages, max_pages, last_ra;
401	static atomic_t last_readahead_pages;
402
403	max_pages = 1 << ACCESS_ONCE(page_cluster);
404	if (max_pages <= 1)
405		return 1;
 
 
 
 
 
 
 
 
 
 
406
407	/*
408	 * This heuristic has been found to work well on both sequential and
409	 * random loads, swapping to hard disk or to SSD: please don't ask
410	 * what the "+ 2" means, it just happens to work well, that's all.
411	 */
412	pages = atomic_xchg(&swapin_readahead_hits, 0) + 2;
413	if (pages == 2) {
414		/*
415		 * We can have no readahead hits to judge by: but must not get
416		 * stuck here forever, so check for an adjacent offset instead
417		 * (and don't even bother to check whether swap type is same).
418		 */
419		if (offset != prev_offset + 1 && offset != prev_offset - 1)
420			pages = 1;
421		prev_offset = offset;
422	} else {
423		unsigned int roundup = 4;
424		while (roundup < pages)
425			roundup <<= 1;
426		pages = roundup;
427	}
428
429	if (pages > max_pages)
430		pages = max_pages;
431
432	/* Don't shrink readahead too fast */
433	last_ra = atomic_read(&last_readahead_pages) / 2;
434	if (pages < last_ra)
435		pages = last_ra;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
436	atomic_set(&last_readahead_pages, pages);
437
438	return pages;
439}
440
441/**
442 * swapin_readahead - swap in pages in hope we need them soon
443 * @entry: swap entry of this memory
444 * @gfp_mask: memory allocation flags
445 * @vma: user vma this address belongs to
446 * @addr: target address for mempolicy
447 *
448 * Returns the struct page for entry and addr, after queueing swapin.
449 *
450 * Primitive swap readahead code. We simply read an aligned block of
451 * (1 << page_cluster) entries in the swap area. This method is chosen
452 * because it doesn't cost us any seek time.  We also make sure to queue
453 * the 'original' request together with the readahead ones...
454 *
455 * This has been extended to use the NUMA policies from the mm triggering
456 * the readahead.
457 *
458 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
459 */
460struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
461			struct vm_area_struct *vma, unsigned long addr)
462{
463	struct page *page;
464	unsigned long entry_offset = swp_offset(entry);
465	unsigned long offset = entry_offset;
466	unsigned long start_offset, end_offset;
467	unsigned long mask;
 
468	struct blk_plug plug;
 
 
 
 
469
470	mask = swapin_nr_pages(offset) - 1;
471	if (!mask)
472		goto skip;
473
 
474	/* Read a page_cluster sized and aligned cluster around offset. */
475	start_offset = offset & ~mask;
476	end_offset = offset | mask;
477	if (!start_offset)	/* First page is swap header. */
478		start_offset++;
 
 
479
480	blk_start_plug(&plug);
481	for (offset = start_offset; offset <= end_offset ; offset++) {
482		/* Ok, do the async read-ahead now */
483		page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
484						gfp_mask, vma, addr);
 
485		if (!page)
486			continue;
487		if (offset != entry_offset)
488			SetPageReadahead(page);
489		page_cache_release(page);
 
 
 
 
 
490	}
491	blk_finish_plug(&plug);
 
492
493	lru_add_drain();	/* Push any new pages onto the LRU now */
494skip:
495	return read_swap_cache_async(entry, gfp_mask, vma, addr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
496}
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/mm/swap_state.c
  4 *
  5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  6 *  Swap reorganised 29.12.95, Stephen Tweedie
  7 *
  8 *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
  9 */
 10#include <linux/mm.h>
 11#include <linux/gfp.h>
 12#include <linux/kernel_stat.h>
 13#include <linux/swap.h>
 14#include <linux/swapops.h>
 15#include <linux/init.h>
 16#include <linux/pagemap.h>
 17#include <linux/backing-dev.h>
 18#include <linux/blkdev.h>
 19#include <linux/pagevec.h>
 20#include <linux/migrate.h>
 21#include <linux/vmalloc.h>
 22#include <linux/swap_slots.h>
 23#include <linux/huge_mm.h>
 24#include <linux/shmem_fs.h>
 25#include "internal.h"
 26#include "swap.h"
 27
 28/*
 29 * swapper_space is a fiction, retained to simplify the path through
 30 * vmscan's shrink_page_list.
 31 */
 32static const struct address_space_operations swap_aops = {
 33	.writepage	= swap_writepage,
 34	.dirty_folio	= noop_dirty_folio,
 35#ifdef CONFIG_MIGRATION
 36	.migrate_folio	= migrate_folio,
 37#endif
 
 
 
 38};
 39
 40struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
 41static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
 42static bool enable_vma_readahead __read_mostly = true;
 43
 44#define SWAP_RA_WIN_SHIFT	(PAGE_SHIFT / 2)
 45#define SWAP_RA_HITS_MASK	((1UL << SWAP_RA_WIN_SHIFT) - 1)
 46#define SWAP_RA_HITS_MAX	SWAP_RA_HITS_MASK
 47#define SWAP_RA_WIN_MASK	(~PAGE_MASK & ~SWAP_RA_HITS_MASK)
 48
 49#define SWAP_RA_HITS(v)		((v) & SWAP_RA_HITS_MASK)
 50#define SWAP_RA_WIN(v)		(((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
 51#define SWAP_RA_ADDR(v)		((v) & PAGE_MASK)
 52
 53#define SWAP_RA_VAL(addr, win, hits)				\
 54	(((addr) & PAGE_MASK) |					\
 55	 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) |	\
 56	 ((hits) & SWAP_RA_HITS_MASK))
 57
 58/* Initial readahead hits is 4 to start up with a small window */
 59#define GET_SWAP_RA_VAL(vma)					\
 60	(atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
 
 
 
 
 
 61
 62static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
 63
 64void show_swap_cache_info(void)
 65{
 66	printk("%lu pages in swap cache\n", total_swapcache_pages());
 
 
 
 67	printk("Free swap  = %ldkB\n",
 68		get_nr_swap_pages() << (PAGE_SHIFT - 10));
 69	printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
 70}
 71
 72void *get_shadow_from_swap_cache(swp_entry_t entry)
 73{
 74	struct address_space *address_space = swap_address_space(entry);
 75	pgoff_t idx = swp_offset(entry);
 76	struct page *page;
 77
 78	page = xa_load(&address_space->i_pages, idx);
 79	if (xa_is_value(page))
 80		return page;
 81	return NULL;
 82}
 83
 84/*
 85 * add_to_swap_cache resembles filemap_add_folio on swapper_space,
 86 * but sets SwapCache flag and private instead of mapping and index.
 87 */
 88int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
 89			gfp_t gfp, void **shadowp)
 90{
 91	struct address_space *address_space = swap_address_space(entry);
 92	pgoff_t idx = swp_offset(entry);
 93	XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio));
 94	unsigned long i, nr = folio_nr_pages(folio);
 95	void *old;
 96
 97	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
 98	VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
 99	VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
100
101	folio_ref_add(folio, nr);
102	folio_set_swapcache(folio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
104	do {
105		xas_lock_irq(&xas);
106		xas_create_range(&xas);
107		if (xas_error(&xas))
108			goto unlock;
109		for (i = 0; i < nr; i++) {
110			VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio);
111			old = xas_load(&xas);
112			if (xa_is_value(old)) {
113				if (shadowp)
114					*shadowp = old;
115			}
116			set_page_private(folio_page(folio, i), entry.val + i);
117			xas_store(&xas, folio);
118			xas_next(&xas);
119		}
120		address_space->nrpages += nr;
121		__node_stat_mod_folio(folio, NR_FILE_PAGES, nr);
122		__lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr);
123unlock:
124		xas_unlock_irq(&xas);
125	} while (xas_nomem(&xas, gfp));
126
127	if (!xas_error(&xas))
128		return 0;
 
129
130	folio_clear_swapcache(folio);
131	folio_ref_sub(folio, nr);
132	return xas_error(&xas);
 
 
 
133}
134
135/*
136 * This must be called only on folios that have
137 * been verified to be in the swap cache.
138 */
139void __delete_from_swap_cache(struct folio *folio,
140			swp_entry_t entry, void *shadow)
141{
142	struct address_space *address_space = swap_address_space(entry);
143	int i;
144	long nr = folio_nr_pages(folio);
145	pgoff_t idx = swp_offset(entry);
146	XA_STATE(xas, &address_space->i_pages, idx);
147
148	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
149	VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
150	VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
151
152	for (i = 0; i < nr; i++) {
153		void *entry = xas_store(&xas, shadow);
154		VM_BUG_ON_PAGE(entry != folio, entry);
155		set_page_private(folio_page(folio, i), 0);
156		xas_next(&xas);
157	}
158	folio_clear_swapcache(folio);
159	address_space->nrpages -= nr;
160	__node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
161	__lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
162}
163
164/**
165 * add_to_swap - allocate swap space for a folio
166 * @folio: folio we want to move to swap
167 *
168 * Allocate swap space for the folio and add the folio to the
169 * swap cache.
170 *
171 * Context: Caller needs to hold the folio lock.
172 * Return: Whether the folio was added to the swap cache.
173 */
174bool add_to_swap(struct folio *folio)
175{
176	swp_entry_t entry;
177	int err;
178
179	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
180	VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
181
182	entry = folio_alloc_swap(folio);
183	if (!entry.val)
184		return false;
 
 
 
 
 
 
185
186	/*
187	 * XArray node allocations from PF_MEMALLOC contexts could
188	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
189	 * stops emergency reserves from being allocated.
190	 *
191	 * TODO: this could cause a theoretical memory reclaim
192	 * deadlock in the swap out path.
193	 */
194	/*
195	 * Add it to the swap cache.
196	 */
197	err = add_to_swap_cache(folio, entry,
198			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
199	if (err)
 
 
 
 
200		/*
201		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
202		 * clear SWAP_HAS_CACHE flag.
203		 */
204		goto fail;
205	/*
206	 * Normally the folio will be dirtied in unmap because its
207	 * pte should be dirty. A special case is MADV_FREE page. The
208	 * page's pte could have dirty bit cleared but the folio's
209	 * SwapBacked flag is still set because clearing the dirty bit
210	 * and SwapBacked flag has no lock protected. For such folio,
211	 * unmap will not set dirty bit for it, so folio reclaim will
212	 * not write the folio out. This can cause data corruption when
213	 * the folio is swapped in later. Always setting the dirty flag
214	 * for the folio solves the problem.
215	 */
216	folio_mark_dirty(folio);
217
218	return true;
219
220fail:
221	put_swap_folio(folio, entry);
222	return false;
223}
224
225/*
226 * This must be called only on folios that have
227 * been verified to be in the swap cache and locked.
228 * It will never put the folio into the free list,
229 * the caller has a reference on the folio.
230 */
231void delete_from_swap_cache(struct folio *folio)
232{
233	swp_entry_t entry = folio_swap_entry(folio);
234	struct address_space *address_space = swap_address_space(entry);
235
236	xa_lock_irq(&address_space->i_pages);
237	__delete_from_swap_cache(folio, entry, NULL);
238	xa_unlock_irq(&address_space->i_pages);
239
240	put_swap_folio(folio, entry);
241	folio_ref_sub(folio, folio_nr_pages(folio));
242}
243
244void clear_shadow_from_swap_cache(int type, unsigned long begin,
245				unsigned long end)
246{
247	unsigned long curr = begin;
248	void *old;
249
250	for (;;) {
251		swp_entry_t entry = swp_entry(type, curr);
252		struct address_space *address_space = swap_address_space(entry);
253		XA_STATE(xas, &address_space->i_pages, curr);
254
255		xa_lock_irq(&address_space->i_pages);
256		xas_for_each(&xas, old, end) {
257			if (!xa_is_value(old))
258				continue;
259			xas_store(&xas, NULL);
260		}
261		xa_unlock_irq(&address_space->i_pages);
262
263		/* search the next swapcache until we meet end */
264		curr >>= SWAP_ADDRESS_SPACE_SHIFT;
265		curr++;
266		curr <<= SWAP_ADDRESS_SPACE_SHIFT;
267		if (curr > end)
268			break;
269	}
270}
271
272/* 
273 * If we are the only user, then try to free up the swap cache. 
274 * 
275 * Its ok to check the swapcache flag without the folio lock
276 * here because we are going to recheck again inside
277 * folio_free_swap() _with_ the lock.
278 * 					- Marcelo
279 */
280void free_swap_cache(struct page *page)
281{
282	struct folio *folio = page_folio(page);
283
284	if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
285	    folio_trylock(folio)) {
286		folio_free_swap(folio);
287		folio_unlock(folio);
288	}
289}
290
291/* 
292 * Perform a free_page(), also freeing any swap cache associated with
293 * this page if it is the last user of the page.
294 */
295void free_page_and_swap_cache(struct page *page)
296{
297	free_swap_cache(page);
298	if (!is_huge_zero_page(page))
299		put_page(page);
300}
301
302/*
303 * Passed an array of pages, drop them all from swapcache and then release
304 * them.  They are removed from the LRU and freed if this is their last use.
305 */
306void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
307{
 
 
308	lru_add_drain();
309	for (int i = 0; i < nr; i++)
310		free_swap_cache(encoded_page_ptr(pages[i]));
311	release_pages(pages, nr);
312}
313
314static inline bool swap_use_vma_readahead(void)
315{
316	return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
 
 
317}
318
319/*
320 * Lookup a swap entry in the swap cache. A found folio will be returned
321 * unlocked and with its refcount incremented - we rely on the kernel
322 * lock getting page table operations atomic even if we drop the folio
323 * lock before returning.
324 */
325struct folio *swap_cache_get_folio(swp_entry_t entry,
326		struct vm_area_struct *vma, unsigned long addr)
327{
328	struct folio *folio;
329	struct swap_info_struct *si;
330
331	si = get_swap_device(entry);
332	if (!si)
333		return NULL;
334	folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry));
335	put_swap_device(si);
336
337	if (folio) {
338		bool vma_ra = swap_use_vma_readahead();
339		bool readahead;
340
341		/*
342		 * At the moment, we don't support PG_readahead for anon THP
343		 * so let's bail out rather than confusing the readahead stat.
344		 */
345		if (unlikely(folio_test_large(folio)))
346			return folio;
347
348		readahead = folio_test_clear_readahead(folio);
349		if (vma && vma_ra) {
350			unsigned long ra_val;
351			int win, hits;
352
353			ra_val = GET_SWAP_RA_VAL(vma);
354			win = SWAP_RA_WIN(ra_val);
355			hits = SWAP_RA_HITS(ra_val);
356			if (readahead)
357				hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
358			atomic_long_set(&vma->swap_readahead_info,
359					SWAP_RA_VAL(addr, win, hits));
360		}
361
362		if (readahead) {
363			count_vm_event(SWAP_RA_HIT);
364			if (!vma || !vma_ra)
365				atomic_inc(&swapin_readahead_hits);
366		}
367	}
368
369	return folio;
 
370}
371
372/**
373 * filemap_get_incore_folio - Find and get a folio from the page or swap caches.
374 * @mapping: The address_space to search.
375 * @index: The page cache index.
376 *
377 * This differs from filemap_get_folio() in that it will also look for the
378 * folio in the swap cache.
379 *
380 * Return: The found folio or %NULL.
381 */
382struct folio *filemap_get_incore_folio(struct address_space *mapping,
383		pgoff_t index)
384{
385	swp_entry_t swp;
386	struct swap_info_struct *si;
387	struct folio *folio = __filemap_get_folio(mapping, index, FGP_ENTRY, 0);
388
389	if (!xa_is_value(folio))
390		goto out;
391	if (!shmem_mapping(mapping))
392		return NULL;
393
394	swp = radix_to_swp_entry(folio);
395	/* There might be swapin error entries in shmem mapping. */
396	if (non_swap_entry(swp))
397		return NULL;
398	/* Prevent swapoff from happening to us */
399	si = get_swap_device(swp);
400	if (!si)
401		return NULL;
402	index = swp_offset(swp);
403	folio = filemap_get_folio(swap_address_space(swp), index);
404	put_swap_device(si);
405out:
406	return folio;
407}
408
409struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
410			struct vm_area_struct *vma, unsigned long addr,
411			bool *new_page_allocated)
412{
413	struct swap_info_struct *si;
414	struct folio *folio;
415	void *shadow = NULL;
416
417	*new_page_allocated = false;
418
419	for (;;) {
420		int err;
421		/*
422		 * First check the swap cache.  Since this is normally
423		 * called after swap_cache_get_folio() failed, re-calling
424		 * that would confuse statistics.
425		 */
426		si = get_swap_device(entry);
427		if (!si)
428			return NULL;
429		folio = filemap_get_folio(swap_address_space(entry),
430						swp_offset(entry));
431		put_swap_device(si);
432		if (folio)
433			return folio_file_page(folio, swp_offset(entry));
434
435		/*
436		 * Just skip read ahead for unused swap slot.
437		 * During swap_off when swap_slot_cache is disabled,
438		 * we have to handle the race between putting
439		 * swap entry in swap cache and marking swap slot
440		 * as SWAP_HAS_CACHE.  That's done in later part of code or
441		 * else swap_off will be aborted if we return NULL.
442		 */
443		if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
444			return NULL;
 
 
 
445
446		/*
447		 * Get a new page to read into from swap.  Allocate it now,
448		 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
449		 * cause any racers to loop around until we add it to cache.
450		 */
451		folio = vma_alloc_folio(gfp_mask, 0, vma, addr, false);
452		if (!folio)
453			return NULL;
454
455		/*
456		 * Swap entry may have been freed since our caller observed it.
457		 */
458		err = swapcache_prepare(entry);
459		if (!err)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
460			break;
 
461
462		folio_put(folio);
463		if (err != -EEXIST)
464			return NULL;
465
 
 
 
 
 
 
 
 
 
 
 
 
466		/*
467		 * We might race against __delete_from_swap_cache(), and
468		 * stumble across a swap_map entry whose SWAP_HAS_CACHE
469		 * has not yet been cleared.  Or race against another
470		 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
471		 * in swap_map, but not yet added its page to swap cache.
472		 */
473		schedule_timeout_uninterruptible(1);
474	}
475
476	/*
477	 * The swap entry is ours to swap in. Prepare the new page.
478	 */
479
480	__folio_set_locked(folio);
481	__folio_set_swapbacked(folio);
482
483	if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
484		goto fail_unlock;
485
486	/* May fail (-ENOMEM) if XArray node allocation failed. */
487	if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
488		goto fail_unlock;
489
490	mem_cgroup_swapin_uncharge_swap(entry);
491
492	if (shadow)
493		workingset_refault(folio, shadow);
494
495	/* Caller will initiate read into locked folio */
496	folio_add_lru(folio);
497	*new_page_allocated = true;
498	return &folio->page;
499
500fail_unlock:
501	put_swap_folio(folio, entry);
502	folio_unlock(folio);
503	folio_put(folio);
504	return NULL;
505}
506
507/*
508 * Locate a page of swap in physical memory, reserving swap cache space
509 * and reading the disk if it is not already cached.
510 * A failure return means that either the page allocation failed or that
511 * the swap entry is no longer in use.
512 */
513struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
514				   struct vm_area_struct *vma,
515				   unsigned long addr, bool do_poll,
516				   struct swap_iocb **plug)
517{
518	bool page_was_allocated;
519	struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
520			vma, addr, &page_was_allocated);
521
522	if (page_was_allocated)
523		swap_readpage(retpage, do_poll, plug);
524
525	return retpage;
526}
527
528static unsigned int __swapin_nr_pages(unsigned long prev_offset,
529				      unsigned long offset,
530				      int hits,
531				      int max_pages,
532				      int prev_win)
533{
534	unsigned int pages, last_ra;
535
536	/*
537	 * This heuristic has been found to work well on both sequential and
538	 * random loads, swapping to hard disk or to SSD: please don't ask
539	 * what the "+ 2" means, it just happens to work well, that's all.
540	 */
541	pages = hits + 2;
542	if (pages == 2) {
543		/*
544		 * We can have no readahead hits to judge by: but must not get
545		 * stuck here forever, so check for an adjacent offset instead
546		 * (and don't even bother to check whether swap type is same).
547		 */
548		if (offset != prev_offset + 1 && offset != prev_offset - 1)
549			pages = 1;
 
550	} else {
551		unsigned int roundup = 4;
552		while (roundup < pages)
553			roundup <<= 1;
554		pages = roundup;
555	}
556
557	if (pages > max_pages)
558		pages = max_pages;
559
560	/* Don't shrink readahead too fast */
561	last_ra = prev_win / 2;
562	if (pages < last_ra)
563		pages = last_ra;
564
565	return pages;
566}
567
568static unsigned long swapin_nr_pages(unsigned long offset)
569{
570	static unsigned long prev_offset;
571	unsigned int hits, pages, max_pages;
572	static atomic_t last_readahead_pages;
573
574	max_pages = 1 << READ_ONCE(page_cluster);
575	if (max_pages <= 1)
576		return 1;
577
578	hits = atomic_xchg(&swapin_readahead_hits, 0);
579	pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
580				  max_pages,
581				  atomic_read(&last_readahead_pages));
582	if (!hits)
583		WRITE_ONCE(prev_offset, offset);
584	atomic_set(&last_readahead_pages, pages);
585
586	return pages;
587}
588
589/**
590 * swap_cluster_readahead - swap in pages in hope we need them soon
591 * @entry: swap entry of this memory
592 * @gfp_mask: memory allocation flags
593 * @vmf: fault information
 
594 *
595 * Returns the struct page for entry and addr, after queueing swapin.
596 *
597 * Primitive swap readahead code. We simply read an aligned block of
598 * (1 << page_cluster) entries in the swap area. This method is chosen
599 * because it doesn't cost us any seek time.  We also make sure to queue
600 * the 'original' request together with the readahead ones...
601 *
602 * This has been extended to use the NUMA policies from the mm triggering
603 * the readahead.
604 *
605 * Caller must hold read mmap_lock if vmf->vma is not NULL.
606 */
607struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
608				struct vm_fault *vmf)
609{
610	struct page *page;
611	unsigned long entry_offset = swp_offset(entry);
612	unsigned long offset = entry_offset;
613	unsigned long start_offset, end_offset;
614	unsigned long mask;
615	struct swap_info_struct *si = swp_swap_info(entry);
616	struct blk_plug plug;
617	struct swap_iocb *splug = NULL;
618	bool do_poll = true, page_allocated;
619	struct vm_area_struct *vma = vmf->vma;
620	unsigned long addr = vmf->address;
621
622	mask = swapin_nr_pages(offset) - 1;
623	if (!mask)
624		goto skip;
625
626	do_poll = false;
627	/* Read a page_cluster sized and aligned cluster around offset. */
628	start_offset = offset & ~mask;
629	end_offset = offset | mask;
630	if (!start_offset)	/* First page is swap header. */
631		start_offset++;
632	if (end_offset >= si->max)
633		end_offset = si->max - 1;
634
635	blk_start_plug(&plug);
636	for (offset = start_offset; offset <= end_offset ; offset++) {
637		/* Ok, do the async read-ahead now */
638		page = __read_swap_cache_async(
639			swp_entry(swp_type(entry), offset),
640			gfp_mask, vma, addr, &page_allocated);
641		if (!page)
642			continue;
643		if (page_allocated) {
644			swap_readpage(page, false, &splug);
645			if (offset != entry_offset) {
646				SetPageReadahead(page);
647				count_vm_event(SWAP_RA);
648			}
649		}
650		put_page(page);
651	}
652	blk_finish_plug(&plug);
653	swap_read_unplug(splug);
654
655	lru_add_drain();	/* Push any new pages onto the LRU now */
656skip:
657	/* The page was likely read above, so no need for plugging here */
658	return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll, NULL);
659}
660
661int init_swap_address_space(unsigned int type, unsigned long nr_pages)
662{
663	struct address_space *spaces, *space;
664	unsigned int i, nr;
665
666	nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
667	spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
668	if (!spaces)
669		return -ENOMEM;
670	for (i = 0; i < nr; i++) {
671		space = spaces + i;
672		xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
673		atomic_set(&space->i_mmap_writable, 0);
674		space->a_ops = &swap_aops;
675		/* swap cache doesn't use writeback related tags */
676		mapping_set_no_writeback_tags(space);
677	}
678	nr_swapper_spaces[type] = nr;
679	swapper_spaces[type] = spaces;
680
681	return 0;
682}
683
684void exit_swap_address_space(unsigned int type)
685{
686	int i;
687	struct address_space *spaces = swapper_spaces[type];
688
689	for (i = 0; i < nr_swapper_spaces[type]; i++)
690		VM_WARN_ON_ONCE(!mapping_empty(&spaces[i]));
691	kvfree(spaces);
692	nr_swapper_spaces[type] = 0;
693	swapper_spaces[type] = NULL;
694}
695
696static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
697				     unsigned long faddr,
698				     unsigned long lpfn,
699				     unsigned long rpfn,
700				     unsigned long *start,
701				     unsigned long *end)
702{
703	*start = max3(lpfn, PFN_DOWN(vma->vm_start),
704		      PFN_DOWN(faddr & PMD_MASK));
705	*end = min3(rpfn, PFN_DOWN(vma->vm_end),
706		    PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
707}
708
709static void swap_ra_info(struct vm_fault *vmf,
710			struct vma_swap_readahead *ra_info)
711{
712	struct vm_area_struct *vma = vmf->vma;
713	unsigned long ra_val;
714	unsigned long faddr, pfn, fpfn;
715	unsigned long start, end;
716	pte_t *pte, *orig_pte;
717	unsigned int max_win, hits, prev_win, win, left;
718#ifndef CONFIG_64BIT
719	pte_t *tpte;
720#endif
721
722	max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
723			     SWAP_RA_ORDER_CEILING);
724	if (max_win == 1) {
725		ra_info->win = 1;
726		return;
727	}
728
729	faddr = vmf->address;
730	orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
731
732	fpfn = PFN_DOWN(faddr);
733	ra_val = GET_SWAP_RA_VAL(vma);
734	pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
735	prev_win = SWAP_RA_WIN(ra_val);
736	hits = SWAP_RA_HITS(ra_val);
737	ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
738					       max_win, prev_win);
739	atomic_long_set(&vma->swap_readahead_info,
740			SWAP_RA_VAL(faddr, win, 0));
741
742	if (win == 1) {
743		pte_unmap(orig_pte);
744		return;
745	}
746
747	/* Copy the PTEs because the page table may be unmapped */
748	if (fpfn == pfn + 1)
749		swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end);
750	else if (pfn == fpfn + 1)
751		swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1,
752				  &start, &end);
753	else {
754		left = (win - 1) / 2;
755		swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
756				  &start, &end);
757	}
758	ra_info->nr_pte = end - start;
759	ra_info->offset = fpfn - start;
760	pte -= ra_info->offset;
761#ifdef CONFIG_64BIT
762	ra_info->ptes = pte;
763#else
764	tpte = ra_info->ptes;
765	for (pfn = start; pfn != end; pfn++)
766		*tpte++ = *pte++;
767#endif
768	pte_unmap(orig_pte);
769}
770
771/**
772 * swap_vma_readahead - swap in pages in hope we need them soon
773 * @fentry: swap entry of this memory
774 * @gfp_mask: memory allocation flags
775 * @vmf: fault information
776 *
777 * Returns the struct page for entry and addr, after queueing swapin.
778 *
779 * Primitive swap readahead code. We simply read in a few pages whose
780 * virtual addresses are around the fault address in the same vma.
781 *
782 * Caller must hold read mmap_lock if vmf->vma is not NULL.
783 *
784 */
785static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
786				       struct vm_fault *vmf)
787{
788	struct blk_plug plug;
789	struct swap_iocb *splug = NULL;
790	struct vm_area_struct *vma = vmf->vma;
791	struct page *page;
792	pte_t *pte, pentry;
793	swp_entry_t entry;
794	unsigned int i;
795	bool page_allocated;
796	struct vma_swap_readahead ra_info = {
797		.win = 1,
798	};
799
800	swap_ra_info(vmf, &ra_info);
801	if (ra_info.win == 1)
802		goto skip;
803
804	blk_start_plug(&plug);
805	for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
806	     i++, pte++) {
807		pentry = *pte;
808		if (!is_swap_pte(pentry))
809			continue;
810		entry = pte_to_swp_entry(pentry);
811		if (unlikely(non_swap_entry(entry)))
812			continue;
813		page = __read_swap_cache_async(entry, gfp_mask, vma,
814					       vmf->address, &page_allocated);
815		if (!page)
816			continue;
817		if (page_allocated) {
818			swap_readpage(page, false, &splug);
819			if (i != ra_info.offset) {
820				SetPageReadahead(page);
821				count_vm_event(SWAP_RA);
822			}
823		}
824		put_page(page);
825	}
826	blk_finish_plug(&plug);
827	swap_read_unplug(splug);
828	lru_add_drain();
829skip:
830	/* The page was likely read above, so no need for plugging here */
831	return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
832				     ra_info.win == 1, NULL);
833}
834
835/**
836 * swapin_readahead - swap in pages in hope we need them soon
837 * @entry: swap entry of this memory
838 * @gfp_mask: memory allocation flags
839 * @vmf: fault information
840 *
841 * Returns the struct page for entry and addr, after queueing swapin.
842 *
843 * It's a main entry function for swap readahead. By the configuration,
844 * it will read ahead blocks by cluster-based(ie, physical disk based)
845 * or vma-based(ie, virtual address based on faulty address) readahead.
846 */
847struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
848				struct vm_fault *vmf)
849{
850	return swap_use_vma_readahead() ?
851			swap_vma_readahead(entry, gfp_mask, vmf) :
852			swap_cluster_readahead(entry, gfp_mask, vmf);
853}
854
855#ifdef CONFIG_SYSFS
856static ssize_t vma_ra_enabled_show(struct kobject *kobj,
857				     struct kobj_attribute *attr, char *buf)
858{
859	return sysfs_emit(buf, "%s\n",
860			  enable_vma_readahead ? "true" : "false");
861}
862static ssize_t vma_ra_enabled_store(struct kobject *kobj,
863				      struct kobj_attribute *attr,
864				      const char *buf, size_t count)
865{
866	ssize_t ret;
867
868	ret = kstrtobool(buf, &enable_vma_readahead);
869	if (ret)
870		return ret;
871
872	return count;
873}
874static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled);
875
876static struct attribute *swap_attrs[] = {
877	&vma_ra_enabled_attr.attr,
878	NULL,
879};
880
881static const struct attribute_group swap_attr_group = {
882	.attrs = swap_attrs,
883};
884
885static int __init swap_init_sysfs(void)
886{
887	int err;
888	struct kobject *swap_kobj;
889
890	swap_kobj = kobject_create_and_add("swap", mm_kobj);
891	if (!swap_kobj) {
892		pr_err("failed to create swap kobject\n");
893		return -ENOMEM;
894	}
895	err = sysfs_create_group(swap_kobj, &swap_attr_group);
896	if (err) {
897		pr_err("failed to register swap group\n");
898		goto delete_obj;
899	}
900	return 0;
901
902delete_obj:
903	kobject_put(swap_kobj);
904	return err;
905}
906subsys_initcall(swap_init_sysfs);
907#endif