Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 *  linux/mm/swap_state.c
  3 *
  4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  5 *  Swap reorganised 29.12.95, Stephen Tweedie
  6 *
  7 *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
  8 */
  9#include <linux/mm.h>
 10#include <linux/gfp.h>
 11#include <linux/kernel_stat.h>
 12#include <linux/swap.h>
 13#include <linux/swapops.h>
 14#include <linux/init.h>
 15#include <linux/pagemap.h>
 16#include <linux/backing-dev.h>
 17#include <linux/blkdev.h>
 18#include <linux/pagevec.h>
 19#include <linux/migrate.h>
 20#include <linux/page_cgroup.h>
 21
 22#include <asm/pgtable.h>
 23
 24/*
 25 * swapper_space is a fiction, retained to simplify the path through
 26 * vmscan's shrink_page_list.
 27 */
 28static const struct address_space_operations swap_aops = {
 29	.writepage	= swap_writepage,
 30	.set_page_dirty	= swap_set_page_dirty,
 
 31	.migratepage	= migrate_page,
 32};
 33
 34static struct backing_dev_info swap_backing_dev_info = {
 35	.name		= "swap",
 36	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
 37};
 38
 39struct address_space swapper_spaces[MAX_SWAPFILES] = {
 40	[0 ... MAX_SWAPFILES - 1] = {
 41		.page_tree	= RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
 
 42		.a_ops		= &swap_aops,
 43		.backing_dev_info = &swap_backing_dev_info,
 
 44	}
 45};
 46
 47#define INC_CACHE_INFO(x)	do { swap_cache_info.x++; } while (0)
 48
 49static struct {
 50	unsigned long add_total;
 51	unsigned long del_total;
 52	unsigned long find_success;
 53	unsigned long find_total;
 54} swap_cache_info;
 55
 56unsigned long total_swapcache_pages(void)
 57{
 58	int i;
 59	unsigned long ret = 0;
 60
 61	for (i = 0; i < MAX_SWAPFILES; i++)
 62		ret += swapper_spaces[i].nrpages;
 63	return ret;
 64}
 65
 66static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
 67
 68void show_swap_cache_info(void)
 69{
 70	printk("%lu pages in swap cache\n", total_swapcache_pages());
 71	printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
 72		swap_cache_info.add_total, swap_cache_info.del_total,
 73		swap_cache_info.find_success, swap_cache_info.find_total);
 74	printk("Free swap  = %ldkB\n",
 75		get_nr_swap_pages() << (PAGE_SHIFT - 10));
 76	printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
 77}
 78
 79/*
 80 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
 81 * but sets SwapCache flag and private instead of mapping and index.
 82 */
 83int __add_to_swap_cache(struct page *page, swp_entry_t entry)
 84{
 85	int error;
 86	struct address_space *address_space;
 87
 88	VM_BUG_ON_PAGE(!PageLocked(page), page);
 89	VM_BUG_ON_PAGE(PageSwapCache(page), page);
 90	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
 91
 92	page_cache_get(page);
 93	SetPageSwapCache(page);
 94	set_page_private(page, entry.val);
 95
 96	address_space = swap_address_space(entry);
 97	spin_lock_irq(&address_space->tree_lock);
 98	error = radix_tree_insert(&address_space->page_tree,
 99					entry.val, page);
100	if (likely(!error)) {
101		address_space->nrpages++;
102		__inc_zone_page_state(page, NR_FILE_PAGES);
103		INC_CACHE_INFO(add_total);
104	}
105	spin_unlock_irq(&address_space->tree_lock);
106
107	if (unlikely(error)) {
108		/*
109		 * Only the context which have set SWAP_HAS_CACHE flag
110		 * would call add_to_swap_cache().
111		 * So add_to_swap_cache() doesn't returns -EEXIST.
112		 */
113		VM_BUG_ON(error == -EEXIST);
114		set_page_private(page, 0UL);
115		ClearPageSwapCache(page);
116		page_cache_release(page);
117	}
118
119	return error;
120}
121
122
123int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
124{
125	int error;
126
127	error = radix_tree_maybe_preload(gfp_mask);
128	if (!error) {
129		error = __add_to_swap_cache(page, entry);
130		radix_tree_preload_end();
131	}
132	return error;
133}
134
135/*
136 * This must be called only on pages that have
137 * been verified to be in the swap cache.
138 */
139void __delete_from_swap_cache(struct page *page)
140{
141	swp_entry_t entry;
142	struct address_space *address_space;
143
144	VM_BUG_ON_PAGE(!PageLocked(page), page);
145	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
146	VM_BUG_ON_PAGE(PageWriteback(page), page);
147
148	entry.val = page_private(page);
149	address_space = swap_address_space(entry);
150	radix_tree_delete(&address_space->page_tree, page_private(page));
151	set_page_private(page, 0);
152	ClearPageSwapCache(page);
153	address_space->nrpages--;
154	__dec_zone_page_state(page, NR_FILE_PAGES);
155	INC_CACHE_INFO(del_total);
156}
157
158/**
159 * add_to_swap - allocate swap space for a page
160 * @page: page we want to move to swap
161 *
162 * Allocate swap space for the page and add the page to the
163 * swap cache.  Caller needs to hold the page lock. 
164 */
165int add_to_swap(struct page *page, struct list_head *list)
166{
167	swp_entry_t entry;
168	int err;
169
170	VM_BUG_ON_PAGE(!PageLocked(page), page);
171	VM_BUG_ON_PAGE(!PageUptodate(page), page);
172
173	entry = get_swap_page();
174	if (!entry.val)
175		return 0;
176
 
 
 
 
 
177	if (unlikely(PageTransHuge(page)))
178		if (unlikely(split_huge_page_to_list(page, list))) {
179			swapcache_free(entry, NULL);
180			return 0;
181		}
182
183	/*
184	 * Radix-tree node allocations from PF_MEMALLOC contexts could
185	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
186	 * stops emergency reserves from being allocated.
187	 *
188	 * TODO: this could cause a theoretical memory reclaim
189	 * deadlock in the swap out path.
190	 */
191	/*
192	 * Add it to the swap cache and mark it dirty
193	 */
194	err = add_to_swap_cache(page, entry,
195			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
196
197	if (!err) {	/* Success */
198		SetPageDirty(page);
199		return 1;
200	} else {	/* -ENOMEM radix-tree allocation failure */
201		/*
202		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
203		 * clear SWAP_HAS_CACHE flag.
204		 */
205		swapcache_free(entry, NULL);
206		return 0;
207	}
208}
209
210/*
211 * This must be called only on pages that have
212 * been verified to be in the swap cache and locked.
213 * It will never put the page into the free list,
214 * the caller has a reference on the page.
215 */
216void delete_from_swap_cache(struct page *page)
217{
218	swp_entry_t entry;
219	struct address_space *address_space;
220
221	entry.val = page_private(page);
222
223	address_space = swap_address_space(entry);
224	spin_lock_irq(&address_space->tree_lock);
225	__delete_from_swap_cache(page);
226	spin_unlock_irq(&address_space->tree_lock);
227
228	swapcache_free(entry, page);
229	page_cache_release(page);
230}
231
232/* 
233 * If we are the only user, then try to free up the swap cache. 
234 * 
235 * Its ok to check for PageSwapCache without the page lock
236 * here because we are going to recheck again inside
237 * try_to_free_swap() _with_ the lock.
238 * 					- Marcelo
239 */
240static inline void free_swap_cache(struct page *page)
241{
242	if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
243		try_to_free_swap(page);
244		unlock_page(page);
245	}
246}
247
248/* 
249 * Perform a free_page(), also freeing any swap cache associated with
250 * this page if it is the last user of the page.
251 */
252void free_page_and_swap_cache(struct page *page)
253{
254	free_swap_cache(page);
255	page_cache_release(page);
 
256}
257
258/*
259 * Passed an array of pages, drop them all from swapcache and then release
260 * them.  They are removed from the LRU and freed if this is their last use.
261 */
262void free_pages_and_swap_cache(struct page **pages, int nr)
263{
264	struct page **pagep = pages;
 
265
266	lru_add_drain();
267	while (nr) {
268		int todo = min(nr, PAGEVEC_SIZE);
269		int i;
270
271		for (i = 0; i < todo; i++)
272			free_swap_cache(pagep[i]);
273		release_pages(pagep, todo, 0);
274		pagep += todo;
275		nr -= todo;
276	}
277}
278
279/*
280 * Lookup a swap entry in the swap cache. A found page will be returned
281 * unlocked and with its refcount incremented - we rely on the kernel
282 * lock getting page table operations atomic even if we drop the page
283 * lock before returning.
284 */
285struct page * lookup_swap_cache(swp_entry_t entry)
286{
287	struct page *page;
288
289	page = find_get_page(swap_address_space(entry), entry.val);
290
291	if (page) {
292		INC_CACHE_INFO(find_success);
293		if (TestClearPageReadahead(page))
294			atomic_inc(&swapin_readahead_hits);
295	}
296
297	INC_CACHE_INFO(find_total);
298	return page;
299}
300
301/* 
302 * Locate a page of swap in physical memory, reserving swap cache space
303 * and reading the disk if it is not already cached.
304 * A failure return means that either the page allocation failed or that
305 * the swap entry is no longer in use.
306 */
307struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
308			struct vm_area_struct *vma, unsigned long addr)
309{
310	struct page *found_page, *new_page = NULL;
 
311	int err;
 
312
313	do {
314		/*
315		 * First check the swap cache.  Since this is normally
316		 * called after lookup_swap_cache() failed, re-calling
317		 * that would confuse statistics.
318		 */
319		found_page = find_get_page(swap_address_space(entry),
320					entry.val);
321		if (found_page)
322			break;
323
324		/*
325		 * Get a new page to read into from swap.
326		 */
327		if (!new_page) {
328			new_page = alloc_page_vma(gfp_mask, vma, addr);
329			if (!new_page)
330				break;		/* Out of memory */
331		}
332
333		/*
334		 * call radix_tree_preload() while we can wait.
335		 */
336		err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
337		if (err)
338			break;
339
340		/*
341		 * Swap entry may have been freed since our caller observed it.
342		 */
343		err = swapcache_prepare(entry);
344		if (err == -EEXIST) {
345			radix_tree_preload_end();
346			/*
347			 * We might race against get_swap_page() and stumble
348			 * across a SWAP_HAS_CACHE swap_map entry whose page
349			 * has not been brought into the swapcache yet, while
350			 * the other end is scheduled away waiting on discard
351			 * I/O completion at scan_swap_map().
352			 *
353			 * In order to avoid turning this transitory state
354			 * into a permanent loop around this -EEXIST case
355			 * if !CONFIG_PREEMPT and the I/O completion happens
356			 * to be waiting on the CPU waitqueue where we are now
357			 * busy looping, we just conditionally invoke the
358			 * scheduler here, if there are some more important
359			 * tasks to run.
360			 */
361			cond_resched();
362			continue;
363		}
364		if (err) {		/* swp entry is obsolete ? */
365			radix_tree_preload_end();
366			break;
367		}
368
369		/* May fail (-ENOMEM) if radix-tree node allocation failed. */
370		__set_page_locked(new_page);
371		SetPageSwapBacked(new_page);
372		err = __add_to_swap_cache(new_page, entry);
373		if (likely(!err)) {
374			radix_tree_preload_end();
375			/*
376			 * Initiate read into locked page and return.
377			 */
378			lru_cache_add_anon(new_page);
379			swap_readpage(new_page);
380			return new_page;
381		}
382		radix_tree_preload_end();
383		ClearPageSwapBacked(new_page);
384		__clear_page_locked(new_page);
385		/*
386		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
387		 * clear SWAP_HAS_CACHE flag.
388		 */
389		swapcache_free(entry, NULL);
390	} while (err != -ENOMEM);
391
392	if (new_page)
393		page_cache_release(new_page);
394	return found_page;
395}
396
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397static unsigned long swapin_nr_pages(unsigned long offset)
398{
399	static unsigned long prev_offset;
400	unsigned int pages, max_pages, last_ra;
401	static atomic_t last_readahead_pages;
402
403	max_pages = 1 << ACCESS_ONCE(page_cluster);
404	if (max_pages <= 1)
405		return 1;
406
407	/*
408	 * This heuristic has been found to work well on both sequential and
409	 * random loads, swapping to hard disk or to SSD: please don't ask
410	 * what the "+ 2" means, it just happens to work well, that's all.
411	 */
412	pages = atomic_xchg(&swapin_readahead_hits, 0) + 2;
413	if (pages == 2) {
414		/*
415		 * We can have no readahead hits to judge by: but must not get
416		 * stuck here forever, so check for an adjacent offset instead
417		 * (and don't even bother to check whether swap type is same).
418		 */
419		if (offset != prev_offset + 1 && offset != prev_offset - 1)
420			pages = 1;
421		prev_offset = offset;
422	} else {
423		unsigned int roundup = 4;
424		while (roundup < pages)
425			roundup <<= 1;
426		pages = roundup;
427	}
428
429	if (pages > max_pages)
430		pages = max_pages;
431
432	/* Don't shrink readahead too fast */
433	last_ra = atomic_read(&last_readahead_pages) / 2;
434	if (pages < last_ra)
435		pages = last_ra;
436	atomic_set(&last_readahead_pages, pages);
437
438	return pages;
439}
440
441/**
442 * swapin_readahead - swap in pages in hope we need them soon
443 * @entry: swap entry of this memory
444 * @gfp_mask: memory allocation flags
445 * @vma: user vma this address belongs to
446 * @addr: target address for mempolicy
447 *
448 * Returns the struct page for entry and addr, after queueing swapin.
449 *
450 * Primitive swap readahead code. We simply read an aligned block of
451 * (1 << page_cluster) entries in the swap area. This method is chosen
452 * because it doesn't cost us any seek time.  We also make sure to queue
453 * the 'original' request together with the readahead ones...
454 *
455 * This has been extended to use the NUMA policies from the mm triggering
456 * the readahead.
457 *
458 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
459 */
460struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
461			struct vm_area_struct *vma, unsigned long addr)
462{
463	struct page *page;
464	unsigned long entry_offset = swp_offset(entry);
465	unsigned long offset = entry_offset;
466	unsigned long start_offset, end_offset;
467	unsigned long mask;
468	struct blk_plug plug;
469
470	mask = swapin_nr_pages(offset) - 1;
471	if (!mask)
472		goto skip;
473
474	/* Read a page_cluster sized and aligned cluster around offset. */
475	start_offset = offset & ~mask;
476	end_offset = offset | mask;
477	if (!start_offset)	/* First page is swap header. */
478		start_offset++;
479
480	blk_start_plug(&plug);
481	for (offset = start_offset; offset <= end_offset ; offset++) {
482		/* Ok, do the async read-ahead now */
483		page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
484						gfp_mask, vma, addr);
485		if (!page)
486			continue;
487		if (offset != entry_offset)
488			SetPageReadahead(page);
489		page_cache_release(page);
490	}
491	blk_finish_plug(&plug);
492
493	lru_add_drain();	/* Push any new pages onto the LRU now */
494skip:
495	return read_swap_cache_async(entry, gfp_mask, vma, addr);
496}
v4.10.11
  1/*
  2 *  linux/mm/swap_state.c
  3 *
  4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  5 *  Swap reorganised 29.12.95, Stephen Tweedie
  6 *
  7 *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
  8 */
  9#include <linux/mm.h>
 10#include <linux/gfp.h>
 11#include <linux/kernel_stat.h>
 12#include <linux/swap.h>
 13#include <linux/swapops.h>
 14#include <linux/init.h>
 15#include <linux/pagemap.h>
 16#include <linux/backing-dev.h>
 17#include <linux/blkdev.h>
 18#include <linux/pagevec.h>
 19#include <linux/migrate.h>
 
 20
 21#include <asm/pgtable.h>
 22
 23/*
 24 * swapper_space is a fiction, retained to simplify the path through
 25 * vmscan's shrink_page_list.
 26 */
 27static const struct address_space_operations swap_aops = {
 28	.writepage	= swap_writepage,
 29	.set_page_dirty	= swap_set_page_dirty,
 30#ifdef CONFIG_MIGRATION
 31	.migratepage	= migrate_page,
 32#endif
 
 
 
 
 33};
 34
 35struct address_space swapper_spaces[MAX_SWAPFILES] = {
 36	[0 ... MAX_SWAPFILES - 1] = {
 37		.page_tree	= RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
 38		.i_mmap_writable = ATOMIC_INIT(0),
 39		.a_ops		= &swap_aops,
 40		/* swap cache doesn't use writeback related tags */
 41		.flags		= 1 << AS_NO_WRITEBACK_TAGS,
 42	}
 43};
 44
 45#define INC_CACHE_INFO(x)	do { swap_cache_info.x++; } while (0)
 46
 47static struct {
 48	unsigned long add_total;
 49	unsigned long del_total;
 50	unsigned long find_success;
 51	unsigned long find_total;
 52} swap_cache_info;
 53
 54unsigned long total_swapcache_pages(void)
 55{
 56	int i;
 57	unsigned long ret = 0;
 58
 59	for (i = 0; i < MAX_SWAPFILES; i++)
 60		ret += swapper_spaces[i].nrpages;
 61	return ret;
 62}
 63
 64static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
 65
 66void show_swap_cache_info(void)
 67{
 68	printk("%lu pages in swap cache\n", total_swapcache_pages());
 69	printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
 70		swap_cache_info.add_total, swap_cache_info.del_total,
 71		swap_cache_info.find_success, swap_cache_info.find_total);
 72	printk("Free swap  = %ldkB\n",
 73		get_nr_swap_pages() << (PAGE_SHIFT - 10));
 74	printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
 75}
 76
 77/*
 78 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
 79 * but sets SwapCache flag and private instead of mapping and index.
 80 */
 81int __add_to_swap_cache(struct page *page, swp_entry_t entry)
 82{
 83	int error;
 84	struct address_space *address_space;
 85
 86	VM_BUG_ON_PAGE(!PageLocked(page), page);
 87	VM_BUG_ON_PAGE(PageSwapCache(page), page);
 88	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
 89
 90	get_page(page);
 91	SetPageSwapCache(page);
 92	set_page_private(page, entry.val);
 93
 94	address_space = swap_address_space(entry);
 95	spin_lock_irq(&address_space->tree_lock);
 96	error = radix_tree_insert(&address_space->page_tree,
 97				  swp_offset(entry), page);
 98	if (likely(!error)) {
 99		address_space->nrpages++;
100		__inc_node_page_state(page, NR_FILE_PAGES);
101		INC_CACHE_INFO(add_total);
102	}
103	spin_unlock_irq(&address_space->tree_lock);
104
105	if (unlikely(error)) {
106		/*
107		 * Only the context which have set SWAP_HAS_CACHE flag
108		 * would call add_to_swap_cache().
109		 * So add_to_swap_cache() doesn't returns -EEXIST.
110		 */
111		VM_BUG_ON(error == -EEXIST);
112		set_page_private(page, 0UL);
113		ClearPageSwapCache(page);
114		put_page(page);
115	}
116
117	return error;
118}
119
120
121int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
122{
123	int error;
124
125	error = radix_tree_maybe_preload(gfp_mask);
126	if (!error) {
127		error = __add_to_swap_cache(page, entry);
128		radix_tree_preload_end();
129	}
130	return error;
131}
132
133/*
134 * This must be called only on pages that have
135 * been verified to be in the swap cache.
136 */
137void __delete_from_swap_cache(struct page *page)
138{
139	swp_entry_t entry;
140	struct address_space *address_space;
141
142	VM_BUG_ON_PAGE(!PageLocked(page), page);
143	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
144	VM_BUG_ON_PAGE(PageWriteback(page), page);
145
146	entry.val = page_private(page);
147	address_space = swap_address_space(entry);
148	radix_tree_delete(&address_space->page_tree, swp_offset(entry));
149	set_page_private(page, 0);
150	ClearPageSwapCache(page);
151	address_space->nrpages--;
152	__dec_node_page_state(page, NR_FILE_PAGES);
153	INC_CACHE_INFO(del_total);
154}
155
156/**
157 * add_to_swap - allocate swap space for a page
158 * @page: page we want to move to swap
159 *
160 * Allocate swap space for the page and add the page to the
161 * swap cache.  Caller needs to hold the page lock. 
162 */
163int add_to_swap(struct page *page, struct list_head *list)
164{
165	swp_entry_t entry;
166	int err;
167
168	VM_BUG_ON_PAGE(!PageLocked(page), page);
169	VM_BUG_ON_PAGE(!PageUptodate(page), page);
170
171	entry = get_swap_page();
172	if (!entry.val)
173		return 0;
174
175	if (mem_cgroup_try_charge_swap(page, entry)) {
176		swapcache_free(entry);
177		return 0;
178	}
179
180	if (unlikely(PageTransHuge(page)))
181		if (unlikely(split_huge_page_to_list(page, list))) {
182			swapcache_free(entry);
183			return 0;
184		}
185
186	/*
187	 * Radix-tree node allocations from PF_MEMALLOC contexts could
188	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
189	 * stops emergency reserves from being allocated.
190	 *
191	 * TODO: this could cause a theoretical memory reclaim
192	 * deadlock in the swap out path.
193	 */
194	/*
195	 * Add it to the swap cache.
196	 */
197	err = add_to_swap_cache(page, entry,
198			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
199
200	if (!err) {
 
201		return 1;
202	} else {	/* -ENOMEM radix-tree allocation failure */
203		/*
204		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
205		 * clear SWAP_HAS_CACHE flag.
206		 */
207		swapcache_free(entry);
208		return 0;
209	}
210}
211
212/*
213 * This must be called only on pages that have
214 * been verified to be in the swap cache and locked.
215 * It will never put the page into the free list,
216 * the caller has a reference on the page.
217 */
218void delete_from_swap_cache(struct page *page)
219{
220	swp_entry_t entry;
221	struct address_space *address_space;
222
223	entry.val = page_private(page);
224
225	address_space = swap_address_space(entry);
226	spin_lock_irq(&address_space->tree_lock);
227	__delete_from_swap_cache(page);
228	spin_unlock_irq(&address_space->tree_lock);
229
230	swapcache_free(entry);
231	put_page(page);
232}
233
234/* 
235 * If we are the only user, then try to free up the swap cache. 
236 * 
237 * Its ok to check for PageSwapCache without the page lock
238 * here because we are going to recheck again inside
239 * try_to_free_swap() _with_ the lock.
240 * 					- Marcelo
241 */
242static inline void free_swap_cache(struct page *page)
243{
244	if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
245		try_to_free_swap(page);
246		unlock_page(page);
247	}
248}
249
250/* 
251 * Perform a free_page(), also freeing any swap cache associated with
252 * this page if it is the last user of the page.
253 */
254void free_page_and_swap_cache(struct page *page)
255{
256	free_swap_cache(page);
257	if (!is_huge_zero_page(page))
258		put_page(page);
259}
260
261/*
262 * Passed an array of pages, drop them all from swapcache and then release
263 * them.  They are removed from the LRU and freed if this is their last use.
264 */
265void free_pages_and_swap_cache(struct page **pages, int nr)
266{
267	struct page **pagep = pages;
268	int i;
269
270	lru_add_drain();
271	for (i = 0; i < nr; i++)
272		free_swap_cache(pagep[i]);
273	release_pages(pagep, nr, false);
 
 
 
 
 
 
 
274}
275
276/*
277 * Lookup a swap entry in the swap cache. A found page will be returned
278 * unlocked and with its refcount incremented - we rely on the kernel
279 * lock getting page table operations atomic even if we drop the page
280 * lock before returning.
281 */
282struct page * lookup_swap_cache(swp_entry_t entry)
283{
284	struct page *page;
285
286	page = find_get_page(swap_address_space(entry), swp_offset(entry));
287
288	if (page) {
289		INC_CACHE_INFO(find_success);
290		if (TestClearPageReadahead(page))
291			atomic_inc(&swapin_readahead_hits);
292	}
293
294	INC_CACHE_INFO(find_total);
295	return page;
296}
297
298struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
299			struct vm_area_struct *vma, unsigned long addr,
300			bool *new_page_allocated)
 
 
 
 
 
301{
302	struct page *found_page, *new_page = NULL;
303	struct address_space *swapper_space = swap_address_space(entry);
304	int err;
305	*new_page_allocated = false;
306
307	do {
308		/*
309		 * First check the swap cache.  Since this is normally
310		 * called after lookup_swap_cache() failed, re-calling
311		 * that would confuse statistics.
312		 */
313		found_page = find_get_page(swapper_space, swp_offset(entry));
 
314		if (found_page)
315			break;
316
317		/*
318		 * Get a new page to read into from swap.
319		 */
320		if (!new_page) {
321			new_page = alloc_page_vma(gfp_mask, vma, addr);
322			if (!new_page)
323				break;		/* Out of memory */
324		}
325
326		/*
327		 * call radix_tree_preload() while we can wait.
328		 */
329		err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
330		if (err)
331			break;
332
333		/*
334		 * Swap entry may have been freed since our caller observed it.
335		 */
336		err = swapcache_prepare(entry);
337		if (err == -EEXIST) {
338			radix_tree_preload_end();
339			/*
340			 * We might race against get_swap_page() and stumble
341			 * across a SWAP_HAS_CACHE swap_map entry whose page
342			 * has not been brought into the swapcache yet, while
343			 * the other end is scheduled away waiting on discard
344			 * I/O completion at scan_swap_map().
345			 *
346			 * In order to avoid turning this transitory state
347			 * into a permanent loop around this -EEXIST case
348			 * if !CONFIG_PREEMPT and the I/O completion happens
349			 * to be waiting on the CPU waitqueue where we are now
350			 * busy looping, we just conditionally invoke the
351			 * scheduler here, if there are some more important
352			 * tasks to run.
353			 */
354			cond_resched();
355			continue;
356		}
357		if (err) {		/* swp entry is obsolete ? */
358			radix_tree_preload_end();
359			break;
360		}
361
362		/* May fail (-ENOMEM) if radix-tree node allocation failed. */
363		__SetPageLocked(new_page);
364		__SetPageSwapBacked(new_page);
365		err = __add_to_swap_cache(new_page, entry);
366		if (likely(!err)) {
367			radix_tree_preload_end();
368			/*
369			 * Initiate read into locked page and return.
370			 */
371			lru_cache_add_anon(new_page);
372			*new_page_allocated = true;
373			return new_page;
374		}
375		radix_tree_preload_end();
376		__ClearPageLocked(new_page);
 
377		/*
378		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
379		 * clear SWAP_HAS_CACHE flag.
380		 */
381		swapcache_free(entry);
382	} while (err != -ENOMEM);
383
384	if (new_page)
385		put_page(new_page);
386	return found_page;
387}
388
389/*
390 * Locate a page of swap in physical memory, reserving swap cache space
391 * and reading the disk if it is not already cached.
392 * A failure return means that either the page allocation failed or that
393 * the swap entry is no longer in use.
394 */
395struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
396			struct vm_area_struct *vma, unsigned long addr)
397{
398	bool page_was_allocated;
399	struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
400			vma, addr, &page_was_allocated);
401
402	if (page_was_allocated)
403		swap_readpage(retpage);
404
405	return retpage;
406}
407
408static unsigned long swapin_nr_pages(unsigned long offset)
409{
410	static unsigned long prev_offset;
411	unsigned int pages, max_pages, last_ra;
412	static atomic_t last_readahead_pages;
413
414	max_pages = 1 << READ_ONCE(page_cluster);
415	if (max_pages <= 1)
416		return 1;
417
418	/*
419	 * This heuristic has been found to work well on both sequential and
420	 * random loads, swapping to hard disk or to SSD: please don't ask
421	 * what the "+ 2" means, it just happens to work well, that's all.
422	 */
423	pages = atomic_xchg(&swapin_readahead_hits, 0) + 2;
424	if (pages == 2) {
425		/*
426		 * We can have no readahead hits to judge by: but must not get
427		 * stuck here forever, so check for an adjacent offset instead
428		 * (and don't even bother to check whether swap type is same).
429		 */
430		if (offset != prev_offset + 1 && offset != prev_offset - 1)
431			pages = 1;
432		prev_offset = offset;
433	} else {
434		unsigned int roundup = 4;
435		while (roundup < pages)
436			roundup <<= 1;
437		pages = roundup;
438	}
439
440	if (pages > max_pages)
441		pages = max_pages;
442
443	/* Don't shrink readahead too fast */
444	last_ra = atomic_read(&last_readahead_pages) / 2;
445	if (pages < last_ra)
446		pages = last_ra;
447	atomic_set(&last_readahead_pages, pages);
448
449	return pages;
450}
451
452/**
453 * swapin_readahead - swap in pages in hope we need them soon
454 * @entry: swap entry of this memory
455 * @gfp_mask: memory allocation flags
456 * @vma: user vma this address belongs to
457 * @addr: target address for mempolicy
458 *
459 * Returns the struct page for entry and addr, after queueing swapin.
460 *
461 * Primitive swap readahead code. We simply read an aligned block of
462 * (1 << page_cluster) entries in the swap area. This method is chosen
463 * because it doesn't cost us any seek time.  We also make sure to queue
464 * the 'original' request together with the readahead ones...
465 *
466 * This has been extended to use the NUMA policies from the mm triggering
467 * the readahead.
468 *
469 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
470 */
471struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
472			struct vm_area_struct *vma, unsigned long addr)
473{
474	struct page *page;
475	unsigned long entry_offset = swp_offset(entry);
476	unsigned long offset = entry_offset;
477	unsigned long start_offset, end_offset;
478	unsigned long mask;
479	struct blk_plug plug;
480
481	mask = swapin_nr_pages(offset) - 1;
482	if (!mask)
483		goto skip;
484
485	/* Read a page_cluster sized and aligned cluster around offset. */
486	start_offset = offset & ~mask;
487	end_offset = offset | mask;
488	if (!start_offset)	/* First page is swap header. */
489		start_offset++;
490
491	blk_start_plug(&plug);
492	for (offset = start_offset; offset <= end_offset ; offset++) {
493		/* Ok, do the async read-ahead now */
494		page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
495						gfp_mask, vma, addr);
496		if (!page)
497			continue;
498		if (offset != entry_offset)
499			SetPageReadahead(page);
500		put_page(page);
501	}
502	blk_finish_plug(&plug);
503
504	lru_add_drain();	/* Push any new pages onto the LRU now */
505skip:
506	return read_swap_cache_async(entry, gfp_mask, vma, addr);
507}