Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 *  linux/mm/swap_state.c
  3 *
  4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  5 *  Swap reorganised 29.12.95, Stephen Tweedie
  6 *
  7 *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
  8 */
  9#include <linux/module.h>
 10#include <linux/mm.h>
 11#include <linux/gfp.h>
 12#include <linux/kernel_stat.h>
 
 13#include <linux/swap.h>
 14#include <linux/swapops.h>
 15#include <linux/init.h>
 16#include <linux/pagemap.h>
 17#include <linux/buffer_head.h>
 18#include <linux/backing-dev.h>
 19#include <linux/pagevec.h>
 
 
 20#include <linux/migrate.h>
 21#include <linux/page_cgroup.h>
 22
 23#include <asm/pgtable.h>
 
 
 
 24
 25/*
 26 * swapper_space is a fiction, retained to simplify the path through
 27 * vmscan's shrink_page_list.
 28 */
 29static const struct address_space_operations swap_aops = {
 30	.writepage	= swap_writepage,
 31	.set_page_dirty	= __set_page_dirty_nobuffers,
 32	.migratepage	= migrate_page,
 33};
 34
 35static struct backing_dev_info swap_backing_dev_info = {
 36	.name		= "swap",
 37	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
 38};
 39
 40struct address_space swapper_space = {
 41	.page_tree	= RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
 42	.tree_lock	= __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock),
 43	.a_ops		= &swap_aops,
 44	.i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
 45	.backing_dev_info = &swap_backing_dev_info,
 46};
 47
 48#define INC_CACHE_INFO(x)	do { swap_cache_info.x++; } while (0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 49
 50static struct {
 51	unsigned long add_total;
 52	unsigned long del_total;
 53	unsigned long find_success;
 54	unsigned long find_total;
 55} swap_cache_info;
 56
 57void show_swap_cache_info(void)
 58{
 59	printk("%lu pages in swap cache\n", total_swapcache_pages);
 60	printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
 61		swap_cache_info.add_total, swap_cache_info.del_total,
 62		swap_cache_info.find_success, swap_cache_info.find_total);
 63	printk("Free swap  = %ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10));
 64	printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
 
 
 
 
 
 
 
 
 
 65}
 66
 67/*
 68 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
 69 * but sets SwapCache flag and private instead of mapping and index.
 70 */
 71static int __add_to_swap_cache(struct page *page, swp_entry_t entry)
 
 72{
 73	int error;
 74
 75	VM_BUG_ON(!PageLocked(page));
 76	VM_BUG_ON(PageSwapCache(page));
 77	VM_BUG_ON(!PageSwapBacked(page));
 78
 79	page_cache_get(page);
 80	SetPageSwapCache(page);
 81	set_page_private(page, entry.val);
 82
 83	spin_lock_irq(&swapper_space.tree_lock);
 84	error = radix_tree_insert(&swapper_space.page_tree, entry.val, page);
 85	if (likely(!error)) {
 86		total_swapcache_pages++;
 87		__inc_zone_page_state(page, NR_FILE_PAGES);
 88		INC_CACHE_INFO(add_total);
 89	}
 90	spin_unlock_irq(&swapper_space.tree_lock);
 91
 92	if (unlikely(error)) {
 93		/*
 94		 * Only the context which have set SWAP_HAS_CACHE flag
 95		 * would call add_to_swap_cache().
 96		 * So add_to_swap_cache() doesn't returns -EEXIST.
 97		 */
 98		VM_BUG_ON(error == -EEXIST);
 99		set_page_private(page, 0UL);
100		ClearPageSwapCache(page);
101		page_cache_release(page);
102	}
103
104	return error;
105}
106
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
108int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
109{
110	int error;
111
112	error = radix_tree_preload(gfp_mask);
113	if (!error) {
114		error = __add_to_swap_cache(page, entry);
115		radix_tree_preload_end();
116	}
117	return error;
118}
119
120/*
121 * This must be called only on pages that have
122 * been verified to be in the swap cache.
123 */
124void __delete_from_swap_cache(struct page *page)
 
125{
126	VM_BUG_ON(!PageLocked(page));
127	VM_BUG_ON(!PageSwapCache(page));
128	VM_BUG_ON(PageWriteback(page));
129
130	radix_tree_delete(&swapper_space.page_tree, page_private(page));
131	set_page_private(page, 0);
132	ClearPageSwapCache(page);
133	total_swapcache_pages--;
134	__dec_zone_page_state(page, NR_FILE_PAGES);
135	INC_CACHE_INFO(del_total);
 
 
 
 
 
 
 
 
 
 
 
 
136}
137
138/**
139 * add_to_swap - allocate swap space for a page
140 * @page: page we want to move to swap
141 *
142 * Allocate swap space for the page and add the page to the
143 * swap cache.  Caller needs to hold the page lock. 
 
 
 
144 */
145int add_to_swap(struct page *page)
146{
147	swp_entry_t entry;
148	int err;
149
150	VM_BUG_ON(!PageLocked(page));
151	VM_BUG_ON(!PageUptodate(page));
152
153	entry = get_swap_page();
154	if (!entry.val)
155		return 0;
156
157	if (unlikely(PageTransHuge(page)))
158		if (unlikely(split_huge_page(page))) {
159			swapcache_free(entry, NULL);
160			return 0;
161		}
162
163	/*
164	 * Radix-tree node allocations from PF_MEMALLOC contexts could
165	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
166	 * stops emergency reserves from being allocated.
167	 *
168	 * TODO: this could cause a theoretical memory reclaim
169	 * deadlock in the swap out path.
170	 */
171	/*
172	 * Add it to the swap cache and mark it dirty
173	 */
174	err = add_to_swap_cache(page, entry,
175			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
176
177	if (!err) {	/* Success */
178		SetPageDirty(page);
179		return 1;
180	} else {	/* -ENOMEM radix-tree allocation failure */
181		/*
182		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
183		 * clear SWAP_HAS_CACHE flag.
184		 */
185		swapcache_free(entry, NULL);
186		return 0;
187	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188}
189
190/*
191 * This must be called only on pages that have
192 * been verified to be in the swap cache and locked.
193 * It will never put the page into the free list,
194 * the caller has a reference on the page.
195 */
196void delete_from_swap_cache(struct page *page)
197{
198	swp_entry_t entry;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
200	entry.val = page_private(page);
 
 
 
 
201
202	spin_lock_irq(&swapper_space.tree_lock);
203	__delete_from_swap_cache(page);
204	spin_unlock_irq(&swapper_space.tree_lock);
205
206	swapcache_free(entry, page);
207	page_cache_release(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
208}
209
210/* 
211 * If we are the only user, then try to free up the swap cache. 
212 * 
213 * Its ok to check for PageSwapCache without the page lock
214 * here because we are going to recheck again inside
215 * try_to_free_swap() _with_ the lock.
216 * 					- Marcelo
217 */
218static inline void free_swap_cache(struct page *page)
219{
220	if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
221		try_to_free_swap(page);
222		unlock_page(page);
 
223	}
224}
225
226/* 
227 * Perform a free_page(), also freeing any swap cache associated with
228 * this page if it is the last user of the page.
229 */
230void free_page_and_swap_cache(struct page *page)
231{
232	free_swap_cache(page);
233	page_cache_release(page);
 
 
 
234}
235
236/*
237 * Passed an array of pages, drop them all from swapcache and then release
238 * them.  They are removed from the LRU and freed if this is their last use.
239 */
240void free_pages_and_swap_cache(struct page **pages, int nr)
241{
242	struct page **pagep = pages;
 
243
244	lru_add_drain();
245	while (nr) {
246		int todo = min(nr, PAGEVEC_SIZE);
247		int i;
248
249		for (i = 0; i < todo; i++)
250			free_swap_cache(pagep[i]);
251		release_pages(pagep, todo, 0);
252		pagep += todo;
253		nr -= todo;
 
 
 
254	}
 
 
 
 
 
 
 
255}
256
257/*
258 * Lookup a swap entry in the swap cache. A found page will be returned
259 * unlocked and with its refcount incremented - we rely on the kernel
260 * lock getting page table operations atomic even if we drop the page
261 * lock before returning.
 
 
262 */
263struct page * lookup_swap_cache(swp_entry_t entry)
 
264{
265	struct page *page;
 
 
 
 
 
 
 
 
 
 
 
 
266
267	page = find_get_page(&swapper_space, entry.val);
 
 
 
 
 
 
 
 
 
 
 
 
268
269	if (page)
270		INC_CACHE_INFO(find_success);
 
 
 
 
 
 
271
272	INC_CACHE_INFO(find_total);
273	return page;
274}
275
276/* 
277 * Locate a page of swap in physical memory, reserving swap cache space
278 * and reading the disk if it is not already cached.
279 * A failure return means that either the page allocation failed or that
280 * the swap entry is no longer in use.
 
 
 
 
281 */
282struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
283			struct vm_area_struct *vma, unsigned long addr)
284{
285	struct page *found_page, *new_page = NULL;
286	int err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287
288	do {
 
289		/*
290		 * First check the swap cache.  Since this is normally
291		 * called after lookup_swap_cache() failed, re-calling
292		 * that would confuse statistics.
293		 */
294		found_page = find_get_page(&swapper_space, entry.val);
295		if (found_page)
296			break;
 
297
298		/*
299		 * Get a new page to read into from swap.
 
 
 
 
 
300		 */
301		if (!new_page) {
302			new_page = alloc_page_vma(gfp_mask, vma, addr);
303			if (!new_page)
304				break;		/* Out of memory */
305		}
306
307		/*
308		 * call radix_tree_preload() while we can wait.
 
 
 
309		 */
310		err = radix_tree_preload(gfp_mask & GFP_KERNEL);
311		if (err)
312			break;
 
 
313
314		/*
315		 * Swap entry may have been freed since our caller observed it.
316		 */
317		err = swapcache_prepare(entry);
318		if (err == -EEXIST) {	/* seems racy */
319			radix_tree_preload_end();
320			continue;
321		}
322		if (err) {		/* swp entry is obsolete ? */
323			radix_tree_preload_end();
324			break;
325		}
 
326
327		/* May fail (-ENOMEM) if radix-tree node allocation failed. */
328		__set_page_locked(new_page);
329		SetPageSwapBacked(new_page);
330		err = __add_to_swap_cache(new_page, entry);
331		if (likely(!err)) {
332			radix_tree_preload_end();
333			/*
334			 * Initiate read into locked page and return.
335			 */
336			lru_cache_add_anon(new_page);
337			swap_readpage(new_page);
338			return new_page;
339		}
340		radix_tree_preload_end();
341		ClearPageSwapBacked(new_page);
342		__clear_page_locked(new_page);
343		/*
344		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
345		 * clear SWAP_HAS_CACHE flag.
 
 
 
 
346		 */
347		swapcache_free(entry, NULL);
348	} while (err != -ENOMEM);
349
350	if (new_page)
351		page_cache_release(new_page);
352	return found_page;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
353}
354
355/**
356 * swapin_readahead - swap in pages in hope we need them soon
357 * @entry: swap entry of this memory
358 * @gfp_mask: memory allocation flags
359 * @vma: user vma this address belongs to
360 * @addr: target address for mempolicy
361 *
362 * Returns the struct page for entry and addr, after queueing swapin.
363 *
364 * Primitive swap readahead code. We simply read an aligned block of
365 * (1 << page_cluster) entries in the swap area. This method is chosen
366 * because it doesn't cost us any seek time.  We also make sure to queue
367 * the 'original' request together with the readahead ones...
368 *
369 * This has been extended to use the NUMA policies from the mm triggering
370 * the readahead.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
371 *
372 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
373 */
374struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
375			struct vm_area_struct *vma, unsigned long addr)
376{
377	int nr_pages;
378	struct page *page;
379	unsigned long offset;
380	unsigned long end_offset;
 
 
 
 
 
381
382	/*
383	 * Get starting offset for readaround, and number of pages to read.
384	 * Adjust starting address by readbehind (for NUMA interleave case)?
385	 * No, it's very unlikely that swap layout would follow vma layout,
386	 * more likely that neighbouring swap pages came from the same node:
387	 * so use the same "addr" to choose the same node for each swap read.
388	 */
389	nr_pages = valid_swaphandles(entry, &offset);
390	for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
391		/* Ok, do the async read-ahead now */
392		page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
393						gfp_mask, vma, addr);
394		if (!page)
395			break;
396		page_cache_release(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397	}
398	lru_add_drain();	/* Push any new pages onto the LRU now */
399	return read_swap_cache_async(entry, gfp_mask, vma, addr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
400}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/mm/swap_state.c
  4 *
  5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  6 *  Swap reorganised 29.12.95, Stephen Tweedie
  7 *
  8 *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
  9 */
 
 10#include <linux/mm.h>
 11#include <linux/gfp.h>
 12#include <linux/kernel_stat.h>
 13#include <linux/mempolicy.h>
 14#include <linux/swap.h>
 15#include <linux/swapops.h>
 16#include <linux/init.h>
 17#include <linux/pagemap.h>
 
 
 18#include <linux/pagevec.h>
 19#include <linux/backing-dev.h>
 20#include <linux/blkdev.h>
 21#include <linux/migrate.h>
 22#include <linux/vmalloc.h>
 23#include <linux/swap_slots.h>
 24#include <linux/huge_mm.h>
 25#include <linux/shmem_fs.h>
 26#include "internal.h"
 27#include "swap.h"
 28
 29/*
 30 * swapper_space is a fiction, retained to simplify the path through
 31 * vmscan's shrink_folio_list.
 32 */
 33static const struct address_space_operations swap_aops = {
 34	.writepage	= swap_writepage,
 35	.dirty_folio	= noop_dirty_folio,
 36#ifdef CONFIG_MIGRATION
 37	.migrate_folio	= migrate_folio,
 38#endif
 
 
 
 
 
 
 
 
 
 
 
 39};
 40
 41struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
 42static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
 43static bool enable_vma_readahead __read_mostly = true;
 44
 45#define SWAP_RA_ORDER_CEILING	5
 46
 47#define SWAP_RA_WIN_SHIFT	(PAGE_SHIFT / 2)
 48#define SWAP_RA_HITS_MASK	((1UL << SWAP_RA_WIN_SHIFT) - 1)
 49#define SWAP_RA_HITS_MAX	SWAP_RA_HITS_MASK
 50#define SWAP_RA_WIN_MASK	(~PAGE_MASK & ~SWAP_RA_HITS_MASK)
 51
 52#define SWAP_RA_HITS(v)		((v) & SWAP_RA_HITS_MASK)
 53#define SWAP_RA_WIN(v)		(((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
 54#define SWAP_RA_ADDR(v)		((v) & PAGE_MASK)
 55
 56#define SWAP_RA_VAL(addr, win, hits)				\
 57	(((addr) & PAGE_MASK) |					\
 58	 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) |	\
 59	 ((hits) & SWAP_RA_HITS_MASK))
 60
 61/* Initial readahead hits is 4 to start up with a small window */
 62#define GET_SWAP_RA_VAL(vma)					\
 63	(atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
 64
 65static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
 
 
 
 
 
 66
 67void show_swap_cache_info(void)
 68{
 69	printk("%lu pages in swap cache\n", total_swapcache_pages());
 70	printk("Free swap  = %ldkB\n", K(get_nr_swap_pages()));
 71	printk("Total swap = %lukB\n", K(total_swap_pages));
 72}
 73
 74void *get_shadow_from_swap_cache(swp_entry_t entry)
 75{
 76	struct address_space *address_space = swap_address_space(entry);
 77	pgoff_t idx = swap_cache_index(entry);
 78	void *shadow;
 79
 80	shadow = xa_load(&address_space->i_pages, idx);
 81	if (xa_is_value(shadow))
 82		return shadow;
 83	return NULL;
 84}
 85
 86/*
 87 * add_to_swap_cache resembles filemap_add_folio on swapper_space,
 88 * but sets SwapCache flag and private instead of mapping and index.
 89 */
 90int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
 91			gfp_t gfp, void **shadowp)
 92{
 93	struct address_space *address_space = swap_address_space(entry);
 94	pgoff_t idx = swap_cache_index(entry);
 95	XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio));
 96	unsigned long i, nr = folio_nr_pages(folio);
 97	void *old;
 98
 99	xas_set_update(&xas, workingset_update_node);
100
101	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
102	VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
103	VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
104
105	folio_ref_add(folio, nr);
106	folio_set_swapcache(folio);
107	folio->swap = entry;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
109	do {
110		xas_lock_irq(&xas);
111		xas_create_range(&xas);
112		if (xas_error(&xas))
113			goto unlock;
114		for (i = 0; i < nr; i++) {
115			VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio);
116			if (shadowp) {
117				old = xas_load(&xas);
118				if (xa_is_value(old))
119					*shadowp = old;
120			}
121			xas_store(&xas, folio);
122			xas_next(&xas);
123		}
124		address_space->nrpages += nr;
125		__node_stat_mod_folio(folio, NR_FILE_PAGES, nr);
126		__lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr);
127unlock:
128		xas_unlock_irq(&xas);
129	} while (xas_nomem(&xas, gfp));
130
131	if (!xas_error(&xas))
132		return 0;
 
133
134	folio_clear_swapcache(folio);
135	folio_ref_sub(folio, nr);
136	return xas_error(&xas);
 
 
 
137}
138
139/*
140 * This must be called only on folios that have
141 * been verified to be in the swap cache.
142 */
143void __delete_from_swap_cache(struct folio *folio,
144			swp_entry_t entry, void *shadow)
145{
146	struct address_space *address_space = swap_address_space(entry);
147	int i;
148	long nr = folio_nr_pages(folio);
149	pgoff_t idx = swap_cache_index(entry);
150	XA_STATE(xas, &address_space->i_pages, idx);
151
152	xas_set_update(&xas, workingset_update_node);
153
154	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
155	VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
156	VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
157
158	for (i = 0; i < nr; i++) {
159		void *entry = xas_store(&xas, shadow);
160		VM_BUG_ON_PAGE(entry != folio, entry);
161		xas_next(&xas);
162	}
163	folio->swap.val = 0;
164	folio_clear_swapcache(folio);
165	address_space->nrpages -= nr;
166	__node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
167	__lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
168}
169
170/**
171 * add_to_swap - allocate swap space for a folio
172 * @folio: folio we want to move to swap
173 *
174 * Allocate swap space for the folio and add the folio to the
175 * swap cache.
176 *
177 * Context: Caller needs to hold the folio lock.
178 * Return: Whether the folio was added to the swap cache.
179 */
180bool add_to_swap(struct folio *folio)
181{
182	swp_entry_t entry;
183	int err;
184
185	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
186	VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
187
188	entry = folio_alloc_swap(folio);
189	if (!entry.val)
190		return false;
 
 
 
 
 
 
191
192	/*
193	 * XArray node allocations from PF_MEMALLOC contexts could
194	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
195	 * stops emergency reserves from being allocated.
196	 *
197	 * TODO: this could cause a theoretical memory reclaim
198	 * deadlock in the swap out path.
199	 */
200	/*
201	 * Add it to the swap cache.
202	 */
203	err = add_to_swap_cache(folio, entry,
204			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
205	if (err)
 
 
 
 
206		/*
207		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
208		 * clear SWAP_HAS_CACHE flag.
209		 */
210		goto fail;
211	/*
212	 * Normally the folio will be dirtied in unmap because its
213	 * pte should be dirty. A special case is MADV_FREE page. The
214	 * page's pte could have dirty bit cleared but the folio's
215	 * SwapBacked flag is still set because clearing the dirty bit
216	 * and SwapBacked flag has no lock protected. For such folio,
217	 * unmap will not set dirty bit for it, so folio reclaim will
218	 * not write the folio out. This can cause data corruption when
219	 * the folio is swapped in later. Always setting the dirty flag
220	 * for the folio solves the problem.
221	 */
222	folio_mark_dirty(folio);
223
224	return true;
225
226fail:
227	put_swap_folio(folio, entry);
228	return false;
229}
230
231/*
232 * This must be called only on folios that have
233 * been verified to be in the swap cache and locked.
234 * It will never put the folio into the free list,
235 * the caller has a reference on the folio.
236 */
237void delete_from_swap_cache(struct folio *folio)
238{
239	swp_entry_t entry = folio->swap;
240	struct address_space *address_space = swap_address_space(entry);
241
242	xa_lock_irq(&address_space->i_pages);
243	__delete_from_swap_cache(folio, entry, NULL);
244	xa_unlock_irq(&address_space->i_pages);
245
246	put_swap_folio(folio, entry);
247	folio_ref_sub(folio, folio_nr_pages(folio));
248}
249
250void clear_shadow_from_swap_cache(int type, unsigned long begin,
251				unsigned long end)
252{
253	unsigned long curr = begin;
254	void *old;
255
256	for (;;) {
257		swp_entry_t entry = swp_entry(type, curr);
258		unsigned long index = curr & SWAP_ADDRESS_SPACE_MASK;
259		struct address_space *address_space = swap_address_space(entry);
260		XA_STATE(xas, &address_space->i_pages, index);
261
262		xas_set_update(&xas, workingset_update_node);
 
 
263
264		xa_lock_irq(&address_space->i_pages);
265		xas_for_each(&xas, old, min(index + (end - curr), SWAP_ADDRESS_SPACE_PAGES)) {
266			if (!xa_is_value(old))
267				continue;
268			xas_store(&xas, NULL);
269		}
270		xa_unlock_irq(&address_space->i_pages);
271
272		/* search the next swapcache until we meet end */
273		curr >>= SWAP_ADDRESS_SPACE_SHIFT;
274		curr++;
275		curr <<= SWAP_ADDRESS_SPACE_SHIFT;
276		if (curr > end)
277			break;
278	}
279}
280
281/*
282 * If we are the only user, then try to free up the swap cache.
283 *
284 * Its ok to check the swapcache flag without the folio lock
285 * here because we are going to recheck again inside
286 * folio_free_swap() _with_ the lock.
287 * 					- Marcelo
288 */
289void free_swap_cache(struct folio *folio)
290{
291	if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
292	    folio_trylock(folio)) {
293		folio_free_swap(folio);
294		folio_unlock(folio);
295	}
296}
297
298/*
299 * Perform a free_page(), also freeing any swap cache associated with
300 * this page if it is the last user of the page.
301 */
302void free_page_and_swap_cache(struct page *page)
303{
304	struct folio *folio = page_folio(page);
305
306	free_swap_cache(folio);
307	if (!is_huge_zero_folio(folio))
308		folio_put(folio);
309}
310
311/*
312 * Passed an array of pages, drop them all from swapcache and then release
313 * them.  They are removed from the LRU and freed if this is their last use.
314 */
315void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
316{
317	struct folio_batch folios;
318	unsigned int refs[PAGEVEC_SIZE];
319
320	lru_add_drain();
321	folio_batch_init(&folios);
322	for (int i = 0; i < nr; i++) {
323		struct folio *folio = page_folio(encoded_page_ptr(pages[i]));
324
325		free_swap_cache(folio);
326		refs[folios.nr] = 1;
327		if (unlikely(encoded_page_flags(pages[i]) &
328			     ENCODED_PAGE_BIT_NR_PAGES_NEXT))
329			refs[folios.nr] = encoded_nr_pages(pages[++i]);
330
331		if (folio_batch_add(&folios, folio) == 0)
332			folios_put_refs(&folios, refs);
333	}
334	if (folios.nr)
335		folios_put_refs(&folios, refs);
336}
337
338static inline bool swap_use_vma_readahead(void)
339{
340	return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
341}
342
343/*
344 * Lookup a swap entry in the swap cache. A found folio will be returned
345 * unlocked and with its refcount incremented - we rely on the kernel
346 * lock getting page table operations atomic even if we drop the folio
347 * lock before returning.
348 *
349 * Caller must lock the swap device or hold a reference to keep it valid.
350 */
351struct folio *swap_cache_get_folio(swp_entry_t entry,
352		struct vm_area_struct *vma, unsigned long addr)
353{
354	struct folio *folio;
355
356	folio = filemap_get_folio(swap_address_space(entry), swap_cache_index(entry));
357	if (!IS_ERR(folio)) {
358		bool vma_ra = swap_use_vma_readahead();
359		bool readahead;
360
361		/*
362		 * At the moment, we don't support PG_readahead for anon THP
363		 * so let's bail out rather than confusing the readahead stat.
364		 */
365		if (unlikely(folio_test_large(folio)))
366			return folio;
367
368		readahead = folio_test_clear_readahead(folio);
369		if (vma && vma_ra) {
370			unsigned long ra_val;
371			int win, hits;
372
373			ra_val = GET_SWAP_RA_VAL(vma);
374			win = SWAP_RA_WIN(ra_val);
375			hits = SWAP_RA_HITS(ra_val);
376			if (readahead)
377				hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
378			atomic_long_set(&vma->swap_readahead_info,
379					SWAP_RA_VAL(addr, win, hits));
380		}
381
382		if (readahead) {
383			count_vm_event(SWAP_RA_HIT);
384			if (!vma || !vma_ra)
385				atomic_inc(&swapin_readahead_hits);
386		}
387	} else {
388		folio = NULL;
389	}
390
391	return folio;
 
392}
393
394/**
395 * filemap_get_incore_folio - Find and get a folio from the page or swap caches.
396 * @mapping: The address_space to search.
397 * @index: The page cache index.
398 *
399 * This differs from filemap_get_folio() in that it will also look for the
400 * folio in the swap cache.
401 *
402 * Return: The found folio or %NULL.
403 */
404struct folio *filemap_get_incore_folio(struct address_space *mapping,
405		pgoff_t index)
406{
407	swp_entry_t swp;
408	struct swap_info_struct *si;
409	struct folio *folio = filemap_get_entry(mapping, index);
410
411	if (!folio)
412		return ERR_PTR(-ENOENT);
413	if (!xa_is_value(folio))
414		return folio;
415	if (!shmem_mapping(mapping))
416		return ERR_PTR(-ENOENT);
417
418	swp = radix_to_swp_entry(folio);
419	/* There might be swapin error entries in shmem mapping. */
420	if (non_swap_entry(swp))
421		return ERR_PTR(-ENOENT);
422	/* Prevent swapoff from happening to us */
423	si = get_swap_device(swp);
424	if (!si)
425		return ERR_PTR(-ENOENT);
426	index = swap_cache_index(swp);
427	folio = filemap_get_folio(swap_address_space(swp), index);
428	put_swap_device(si);
429	return folio;
430}
431
432struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
433		struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
434		bool skip_if_exists)
435{
436	struct swap_info_struct *si;
437	struct folio *folio;
438	struct folio *new_folio = NULL;
439	struct folio *result = NULL;
440	void *shadow = NULL;
441
442	*new_page_allocated = false;
443	si = get_swap_device(entry);
444	if (!si)
445		return NULL;
446
447	for (;;) {
448		int err;
449		/*
450		 * First check the swap cache.  Since this is normally
451		 * called after swap_cache_get_folio() failed, re-calling
452		 * that would confuse statistics.
453		 */
454		folio = filemap_get_folio(swap_address_space(entry),
455					  swap_cache_index(entry));
456		if (!IS_ERR(folio))
457			goto got_folio;
458
459		/*
460		 * Just skip read ahead for unused swap slot.
461		 * During swap_off when swap_slot_cache is disabled,
462		 * we have to handle the race between putting
463		 * swap entry in swap cache and marking swap slot
464		 * as SWAP_HAS_CACHE.  That's done in later part of code or
465		 * else swap_off will be aborted if we return NULL.
466		 */
467		if (!swap_swapcount(si, entry) && swap_slot_cache_enabled)
468			goto put_and_return;
 
 
 
469
470		/*
471		 * Get a new folio to read into from swap.  Allocate it now if
472		 * new_folio not exist, before marking swap_map SWAP_HAS_CACHE,
473		 * when -EEXIST will cause any racers to loop around until we
474		 * add it to cache.
475		 */
476		if (!new_folio) {
477			new_folio = folio_alloc_mpol(gfp_mask, 0, mpol, ilx, numa_node_id());
478			if (!new_folio)
479				goto put_and_return;
480		}
481
482		/*
483		 * Swap entry may have been freed since our caller observed it.
484		 */
485		err = swapcache_prepare(entry, 1);
486		if (!err)
 
 
 
 
 
487			break;
488		else if (err != -EEXIST)
489			goto put_and_return;
490
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
491		/*
492		 * Protect against a recursive call to __read_swap_cache_async()
493		 * on the same entry waiting forever here because SWAP_HAS_CACHE
494		 * is set but the folio is not the swap cache yet. This can
495		 * happen today if mem_cgroup_swapin_charge_folio() below
496		 * triggers reclaim through zswap, which may call
497		 * __read_swap_cache_async() in the writeback path.
498		 */
499		if (skip_if_exists)
500			goto put_and_return;
501
502		/*
503		 * We might race against __delete_from_swap_cache(), and
504		 * stumble across a swap_map entry whose SWAP_HAS_CACHE
505		 * has not yet been cleared.  Or race against another
506		 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
507		 * in swap_map, but not yet added its folio to swap cache.
508		 */
509		schedule_timeout_uninterruptible(1);
510	}
511
512	/*
513	 * The swap entry is ours to swap in. Prepare the new folio.
514	 */
515	__folio_set_locked(new_folio);
516	__folio_set_swapbacked(new_folio);
517
518	if (mem_cgroup_swapin_charge_folio(new_folio, NULL, gfp_mask, entry))
519		goto fail_unlock;
520
521	/* May fail (-ENOMEM) if XArray node allocation failed. */
522	if (add_to_swap_cache(new_folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
523		goto fail_unlock;
524
525	mem_cgroup_swapin_uncharge_swap(entry, 1);
526
527	if (shadow)
528		workingset_refault(new_folio, shadow);
529
530	/* Caller will initiate read into locked new_folio */
531	folio_add_lru(new_folio);
532	*new_page_allocated = true;
533	folio = new_folio;
534got_folio:
535	result = folio;
536	goto put_and_return;
537
538fail_unlock:
539	put_swap_folio(new_folio, entry);
540	folio_unlock(new_folio);
541put_and_return:
542	put_swap_device(si);
543	if (!(*new_page_allocated) && new_folio)
544		folio_put(new_folio);
545	return result;
546}
547
548/*
549 * Locate a page of swap in physical memory, reserving swap cache space
550 * and reading the disk if it is not already cached.
551 * A failure return means that either the page allocation failed or that
552 * the swap entry is no longer in use.
553 *
554 * get/put_swap_device() aren't needed to call this function, because
555 * __read_swap_cache_async() call them and swap_read_folio() holds the
556 * swap cache folio lock.
557 */
558struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
559		struct vm_area_struct *vma, unsigned long addr,
560		struct swap_iocb **plug)
561{
562	bool page_allocated;
563	struct mempolicy *mpol;
564	pgoff_t ilx;
565	struct folio *folio;
566
567	mpol = get_vma_policy(vma, addr, 0, &ilx);
568	folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
569					&page_allocated, false);
570	mpol_cond_put(mpol);
571
572	if (page_allocated)
573		swap_read_folio(folio, plug);
574	return folio;
575}
576
577static unsigned int __swapin_nr_pages(unsigned long prev_offset,
578				      unsigned long offset,
579				      int hits,
580				      int max_pages,
581				      int prev_win)
582{
583	unsigned int pages, last_ra;
584
585	/*
586	 * This heuristic has been found to work well on both sequential and
587	 * random loads, swapping to hard disk or to SSD: please don't ask
588	 * what the "+ 2" means, it just happens to work well, that's all.
589	 */
590	pages = hits + 2;
591	if (pages == 2) {
592		/*
593		 * We can have no readahead hits to judge by: but must not get
594		 * stuck here forever, so check for an adjacent offset instead
595		 * (and don't even bother to check whether swap type is same).
596		 */
597		if (offset != prev_offset + 1 && offset != prev_offset - 1)
598			pages = 1;
599	} else {
600		unsigned int roundup = 4;
601		while (roundup < pages)
602			roundup <<= 1;
603		pages = roundup;
604	}
605
606	if (pages > max_pages)
607		pages = max_pages;
608
609	/* Don't shrink readahead too fast */
610	last_ra = prev_win / 2;
611	if (pages < last_ra)
612		pages = last_ra;
613
614	return pages;
615}
616
617static unsigned long swapin_nr_pages(unsigned long offset)
618{
619	static unsigned long prev_offset;
620	unsigned int hits, pages, max_pages;
621	static atomic_t last_readahead_pages;
622
623	max_pages = 1 << READ_ONCE(page_cluster);
624	if (max_pages <= 1)
625		return 1;
626
627	hits = atomic_xchg(&swapin_readahead_hits, 0);
628	pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
629				  max_pages,
630				  atomic_read(&last_readahead_pages));
631	if (!hits)
632		WRITE_ONCE(prev_offset, offset);
633	atomic_set(&last_readahead_pages, pages);
634
635	return pages;
636}
637
638/**
639 * swap_cluster_readahead - swap in pages in hope we need them soon
640 * @entry: swap entry of this memory
641 * @gfp_mask: memory allocation flags
642 * @mpol: NUMA memory allocation policy to be applied
643 * @ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
644 *
645 * Returns the struct folio for entry and addr, after queueing swapin.
646 *
647 * Primitive swap readahead code. We simply read an aligned block of
648 * (1 << page_cluster) entries in the swap area. This method is chosen
649 * because it doesn't cost us any seek time.  We also make sure to queue
650 * the 'original' request together with the readahead ones...
651 *
652 * Note: it is intentional that the same NUMA policy and interleave index
653 * are used for every page of the readahead: neighbouring pages on swap
654 * are fairly likely to have been swapped out from the same node.
655 */
656struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
657				    struct mempolicy *mpol, pgoff_t ilx)
658{
659	struct folio *folio;
660	unsigned long entry_offset = swp_offset(entry);
661	unsigned long offset = entry_offset;
662	unsigned long start_offset, end_offset;
663	unsigned long mask;
664	struct swap_info_struct *si = swp_swap_info(entry);
665	struct blk_plug plug;
666	struct swap_iocb *splug = NULL;
667	bool page_allocated;
668
669	mask = swapin_nr_pages(offset) - 1;
670	if (!mask)
671		goto skip;
672
673	/* Read a page_cluster sized and aligned cluster around offset. */
674	start_offset = offset & ~mask;
675	end_offset = offset | mask;
676	if (!start_offset)	/* First page is swap header. */
677		start_offset++;
678	if (end_offset >= si->max)
679		end_offset = si->max - 1;
680
681	blk_start_plug(&plug);
682	for (offset = start_offset; offset <= end_offset ; offset++) {
683		/* Ok, do the async read-ahead now */
684		folio = __read_swap_cache_async(
685				swp_entry(swp_type(entry), offset),
686				gfp_mask, mpol, ilx, &page_allocated, false);
687		if (!folio)
688			continue;
689		if (page_allocated) {
690			swap_read_folio(folio, &splug);
691			if (offset != entry_offset) {
692				folio_set_readahead(folio);
693				count_vm_event(SWAP_RA);
694			}
695		}
696		folio_put(folio);
697	}
698	blk_finish_plug(&plug);
699	swap_read_unplug(splug);
700	lru_add_drain();	/* Push any new pages onto the LRU now */
701skip:
702	/* The page was likely read above, so no need for plugging here */
703	folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
704					&page_allocated, false);
705	if (unlikely(page_allocated))
706		swap_read_folio(folio, NULL);
707	return folio;
708}
709
710int init_swap_address_space(unsigned int type, unsigned long nr_pages)
711{
712	struct address_space *spaces, *space;
713	unsigned int i, nr;
714
715	nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
716	spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
717	if (!spaces)
718		return -ENOMEM;
719	for (i = 0; i < nr; i++) {
720		space = spaces + i;
721		xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
722		atomic_set(&space->i_mmap_writable, 0);
723		space->a_ops = &swap_aops;
724		/* swap cache doesn't use writeback related tags */
725		mapping_set_no_writeback_tags(space);
726	}
727	nr_swapper_spaces[type] = nr;
728	swapper_spaces[type] = spaces;
729
730	return 0;
731}
732
733void exit_swap_address_space(unsigned int type)
734{
735	int i;
736	struct address_space *spaces = swapper_spaces[type];
737
738	for (i = 0; i < nr_swapper_spaces[type]; i++)
739		VM_WARN_ON_ONCE(!mapping_empty(&spaces[i]));
740	kvfree(spaces);
741	nr_swapper_spaces[type] = 0;
742	swapper_spaces[type] = NULL;
743}
744
745static int swap_vma_ra_win(struct vm_fault *vmf, unsigned long *start,
746			   unsigned long *end)
747{
748	struct vm_area_struct *vma = vmf->vma;
749	unsigned long ra_val;
750	unsigned long faddr, prev_faddr, left, right;
751	unsigned int max_win, hits, prev_win, win;
752
753	max_win = 1 << min(READ_ONCE(page_cluster), SWAP_RA_ORDER_CEILING);
754	if (max_win == 1)
755		return 1;
756
757	faddr = vmf->address;
758	ra_val = GET_SWAP_RA_VAL(vma);
759	prev_faddr = SWAP_RA_ADDR(ra_val);
760	prev_win = SWAP_RA_WIN(ra_val);
761	hits = SWAP_RA_HITS(ra_val);
762	win = __swapin_nr_pages(PFN_DOWN(prev_faddr), PFN_DOWN(faddr), hits,
763				max_win, prev_win);
764	atomic_long_set(&vma->swap_readahead_info, SWAP_RA_VAL(faddr, win, 0));
765	if (win == 1)
766		return 1;
767
768	if (faddr == prev_faddr + PAGE_SIZE)
769		left = faddr;
770	else if (prev_faddr == faddr + PAGE_SIZE)
771		left = faddr - (win << PAGE_SHIFT) + PAGE_SIZE;
772	else
773		left = faddr - (((win - 1) / 2) << PAGE_SHIFT);
774	right = left + (win << PAGE_SHIFT);
775	if ((long)left < 0)
776		left = 0;
777	*start = max3(left, vma->vm_start, faddr & PMD_MASK);
778	*end = min3(right, vma->vm_end, (faddr & PMD_MASK) + PMD_SIZE);
779
780	return win;
781}
782
783/**
784 * swap_vma_readahead - swap in pages in hope we need them soon
785 * @targ_entry: swap entry of the targeted memory
786 * @gfp_mask: memory allocation flags
787 * @mpol: NUMA memory allocation policy to be applied
788 * @targ_ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
789 * @vmf: fault information
790 *
791 * Returns the struct folio for entry and addr, after queueing swapin.
792 *
793 * Primitive swap readahead code. We simply read in a few pages whose
794 * virtual addresses are around the fault address in the same vma.
795 *
796 * Caller must hold read mmap_lock if vmf->vma is not NULL.
797 *
 
798 */
799static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
800		struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf)
801{
802	struct blk_plug plug;
803	struct swap_iocb *splug = NULL;
804	struct folio *folio;
805	pte_t *pte = NULL, pentry;
806	int win;
807	unsigned long start, end, addr;
808	swp_entry_t entry;
809	pgoff_t ilx;
810	bool page_allocated;
811
812	win = swap_vma_ra_win(vmf, &start, &end);
813	if (win == 1)
814		goto skip;
815
816	ilx = targ_ilx - PFN_DOWN(vmf->address - start);
817
818	blk_start_plug(&plug);
819	for (addr = start; addr < end; ilx++, addr += PAGE_SIZE) {
820		if (!pte++) {
821			pte = pte_offset_map(vmf->pmd, addr);
822			if (!pte)
823				break;
824		}
825		pentry = ptep_get_lockless(pte);
826		if (!is_swap_pte(pentry))
827			continue;
828		entry = pte_to_swp_entry(pentry);
829		if (unlikely(non_swap_entry(entry)))
830			continue;
831		pte_unmap(pte);
832		pte = NULL;
833		folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
834						&page_allocated, false);
835		if (!folio)
836			continue;
837		if (page_allocated) {
838			swap_read_folio(folio, &splug);
839			if (addr != vmf->address) {
840				folio_set_readahead(folio);
841				count_vm_event(SWAP_RA);
842			}
843		}
844		folio_put(folio);
845	}
846	if (pte)
847		pte_unmap(pte);
848	blk_finish_plug(&plug);
849	swap_read_unplug(splug);
850	lru_add_drain();
851skip:
852	/* The folio was likely read above, so no need for plugging here */
853	folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
854					&page_allocated, false);
855	if (unlikely(page_allocated))
856		swap_read_folio(folio, NULL);
857	return folio;
858}
859
860/**
861 * swapin_readahead - swap in pages in hope we need them soon
862 * @entry: swap entry of this memory
863 * @gfp_mask: memory allocation flags
864 * @vmf: fault information
865 *
866 * Returns the struct folio for entry and addr, after queueing swapin.
867 *
868 * It's a main entry function for swap readahead. By the configuration,
869 * it will read ahead blocks by cluster-based(ie, physical disk based)
870 * or vma-based(ie, virtual address based on faulty address) readahead.
871 */
872struct folio *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
873				struct vm_fault *vmf)
874{
875	struct mempolicy *mpol;
876	pgoff_t ilx;
877	struct folio *folio;
878
879	mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx);
880	folio = swap_use_vma_readahead() ?
881		swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf) :
882		swap_cluster_readahead(entry, gfp_mask, mpol, ilx);
883	mpol_cond_put(mpol);
884
885	return folio;
886}
887
888#ifdef CONFIG_SYSFS
889static ssize_t vma_ra_enabled_show(struct kobject *kobj,
890				     struct kobj_attribute *attr, char *buf)
891{
892	return sysfs_emit(buf, "%s\n", str_true_false(enable_vma_readahead));
893}
894static ssize_t vma_ra_enabled_store(struct kobject *kobj,
895				      struct kobj_attribute *attr,
896				      const char *buf, size_t count)
897{
898	ssize_t ret;
899
900	ret = kstrtobool(buf, &enable_vma_readahead);
901	if (ret)
902		return ret;
903
904	return count;
905}
906static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled);
907
908static struct attribute *swap_attrs[] = {
909	&vma_ra_enabled_attr.attr,
910	NULL,
911};
912
913static const struct attribute_group swap_attr_group = {
914	.attrs = swap_attrs,
915};
916
917static int __init swap_init_sysfs(void)
918{
919	int err;
920	struct kobject *swap_kobj;
921
922	swap_kobj = kobject_create_and_add("swap", mm_kobj);
923	if (!swap_kobj) {
924		pr_err("failed to create swap kobject\n");
925		return -ENOMEM;
926	}
927	err = sysfs_create_group(swap_kobj, &swap_attr_group);
928	if (err) {
929		pr_err("failed to register swap group\n");
930		goto delete_obj;
931	}
932	return 0;
933
934delete_obj:
935	kobject_put(swap_kobj);
936	return err;
937}
938subsys_initcall(swap_init_sysfs);
939#endif