Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 *  linux/mm/swap_state.c
  3 *
  4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  5 *  Swap reorganised 29.12.95, Stephen Tweedie
  6 *
  7 *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
  8 */
  9#include <linux/mm.h>
 10#include <linux/gfp.h>
 11#include <linux/kernel_stat.h>
 12#include <linux/swap.h>
 13#include <linux/swapops.h>
 14#include <linux/init.h>
 15#include <linux/pagemap.h>
 16#include <linux/backing-dev.h>
 17#include <linux/blkdev.h>
 18#include <linux/pagevec.h>
 19#include <linux/migrate.h>
 20#include <linux/page_cgroup.h>
 21
 22#include <asm/pgtable.h>
 23
 24/*
 25 * swapper_space is a fiction, retained to simplify the path through
 26 * vmscan's shrink_page_list.
 27 */
 28static const struct address_space_operations swap_aops = {
 29	.writepage	= swap_writepage,
 30	.set_page_dirty	= swap_set_page_dirty,
 
 31	.migratepage	= migrate_page,
 32};
 33
 34static struct backing_dev_info swap_backing_dev_info = {
 35	.name		= "swap",
 36	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
 37};
 38
 39struct address_space swapper_spaces[MAX_SWAPFILES] = {
 40	[0 ... MAX_SWAPFILES - 1] = {
 41		.page_tree	= RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
 
 42		.a_ops		= &swap_aops,
 43		.backing_dev_info = &swap_backing_dev_info,
 44	}
 45};
 46
 47#define INC_CACHE_INFO(x)	do { swap_cache_info.x++; } while (0)
 48
 49static struct {
 50	unsigned long add_total;
 51	unsigned long del_total;
 52	unsigned long find_success;
 53	unsigned long find_total;
 54} swap_cache_info;
 55
 56unsigned long total_swapcache_pages(void)
 57{
 58	int i;
 59	unsigned long ret = 0;
 60
 61	for (i = 0; i < MAX_SWAPFILES; i++)
 62		ret += swapper_spaces[i].nrpages;
 63	return ret;
 64}
 65
 66static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
 67
 68void show_swap_cache_info(void)
 69{
 70	printk("%lu pages in swap cache\n", total_swapcache_pages());
 71	printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
 72		swap_cache_info.add_total, swap_cache_info.del_total,
 73		swap_cache_info.find_success, swap_cache_info.find_total);
 74	printk("Free swap  = %ldkB\n",
 75		get_nr_swap_pages() << (PAGE_SHIFT - 10));
 76	printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
 77}
 78
 79/*
 80 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
 81 * but sets SwapCache flag and private instead of mapping and index.
 82 */
 83int __add_to_swap_cache(struct page *page, swp_entry_t entry)
 84{
 85	int error;
 86	struct address_space *address_space;
 87
 88	VM_BUG_ON_PAGE(!PageLocked(page), page);
 89	VM_BUG_ON_PAGE(PageSwapCache(page), page);
 90	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
 91
 92	page_cache_get(page);
 93	SetPageSwapCache(page);
 94	set_page_private(page, entry.val);
 95
 96	address_space = swap_address_space(entry);
 97	spin_lock_irq(&address_space->tree_lock);
 98	error = radix_tree_insert(&address_space->page_tree,
 99					entry.val, page);
100	if (likely(!error)) {
101		address_space->nrpages++;
102		__inc_zone_page_state(page, NR_FILE_PAGES);
103		INC_CACHE_INFO(add_total);
104	}
105	spin_unlock_irq(&address_space->tree_lock);
106
107	if (unlikely(error)) {
108		/*
109		 * Only the context which have set SWAP_HAS_CACHE flag
110		 * would call add_to_swap_cache().
111		 * So add_to_swap_cache() doesn't returns -EEXIST.
112		 */
113		VM_BUG_ON(error == -EEXIST);
114		set_page_private(page, 0UL);
115		ClearPageSwapCache(page);
116		page_cache_release(page);
117	}
118
119	return error;
120}
121
122
123int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
124{
125	int error;
126
127	error = radix_tree_maybe_preload(gfp_mask);
128	if (!error) {
129		error = __add_to_swap_cache(page, entry);
130		radix_tree_preload_end();
131	}
132	return error;
133}
134
135/*
136 * This must be called only on pages that have
137 * been verified to be in the swap cache.
138 */
139void __delete_from_swap_cache(struct page *page)
140{
141	swp_entry_t entry;
142	struct address_space *address_space;
143
144	VM_BUG_ON_PAGE(!PageLocked(page), page);
145	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
146	VM_BUG_ON_PAGE(PageWriteback(page), page);
147
148	entry.val = page_private(page);
149	address_space = swap_address_space(entry);
150	radix_tree_delete(&address_space->page_tree, page_private(page));
151	set_page_private(page, 0);
152	ClearPageSwapCache(page);
153	address_space->nrpages--;
154	__dec_zone_page_state(page, NR_FILE_PAGES);
155	INC_CACHE_INFO(del_total);
156}
157
158/**
159 * add_to_swap - allocate swap space for a page
160 * @page: page we want to move to swap
161 *
162 * Allocate swap space for the page and add the page to the
163 * swap cache.  Caller needs to hold the page lock. 
164 */
165int add_to_swap(struct page *page, struct list_head *list)
166{
167	swp_entry_t entry;
168	int err;
169
170	VM_BUG_ON_PAGE(!PageLocked(page), page);
171	VM_BUG_ON_PAGE(!PageUptodate(page), page);
172
173	entry = get_swap_page();
174	if (!entry.val)
175		return 0;
176
 
 
 
 
 
177	if (unlikely(PageTransHuge(page)))
178		if (unlikely(split_huge_page_to_list(page, list))) {
179			swapcache_free(entry, NULL);
180			return 0;
181		}
182
183	/*
184	 * Radix-tree node allocations from PF_MEMALLOC contexts could
185	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
186	 * stops emergency reserves from being allocated.
187	 *
188	 * TODO: this could cause a theoretical memory reclaim
189	 * deadlock in the swap out path.
190	 */
191	/*
192	 * Add it to the swap cache and mark it dirty
193	 */
194	err = add_to_swap_cache(page, entry,
195			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
196
197	if (!err) {	/* Success */
198		SetPageDirty(page);
199		return 1;
200	} else {	/* -ENOMEM radix-tree allocation failure */
201		/*
202		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
203		 * clear SWAP_HAS_CACHE flag.
204		 */
205		swapcache_free(entry, NULL);
206		return 0;
207	}
208}
209
210/*
211 * This must be called only on pages that have
212 * been verified to be in the swap cache and locked.
213 * It will never put the page into the free list,
214 * the caller has a reference on the page.
215 */
216void delete_from_swap_cache(struct page *page)
217{
218	swp_entry_t entry;
219	struct address_space *address_space;
220
221	entry.val = page_private(page);
222
223	address_space = swap_address_space(entry);
224	spin_lock_irq(&address_space->tree_lock);
225	__delete_from_swap_cache(page);
226	spin_unlock_irq(&address_space->tree_lock);
227
228	swapcache_free(entry, page);
229	page_cache_release(page);
230}
231
232/* 
233 * If we are the only user, then try to free up the swap cache. 
234 * 
235 * Its ok to check for PageSwapCache without the page lock
236 * here because we are going to recheck again inside
237 * try_to_free_swap() _with_ the lock.
238 * 					- Marcelo
239 */
240static inline void free_swap_cache(struct page *page)
241{
242	if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
243		try_to_free_swap(page);
244		unlock_page(page);
245	}
246}
247
248/* 
249 * Perform a free_page(), also freeing any swap cache associated with
250 * this page if it is the last user of the page.
251 */
252void free_page_and_swap_cache(struct page *page)
253{
254	free_swap_cache(page);
255	page_cache_release(page);
256}
257
258/*
259 * Passed an array of pages, drop them all from swapcache and then release
260 * them.  They are removed from the LRU and freed if this is their last use.
261 */
262void free_pages_and_swap_cache(struct page **pages, int nr)
263{
264	struct page **pagep = pages;
 
265
266	lru_add_drain();
267	while (nr) {
268		int todo = min(nr, PAGEVEC_SIZE);
269		int i;
270
271		for (i = 0; i < todo; i++)
272			free_swap_cache(pagep[i]);
273		release_pages(pagep, todo, 0);
274		pagep += todo;
275		nr -= todo;
276	}
277}
278
279/*
280 * Lookup a swap entry in the swap cache. A found page will be returned
281 * unlocked and with its refcount incremented - we rely on the kernel
282 * lock getting page table operations atomic even if we drop the page
283 * lock before returning.
284 */
285struct page * lookup_swap_cache(swp_entry_t entry)
286{
287	struct page *page;
288
289	page = find_get_page(swap_address_space(entry), entry.val);
290
291	if (page) {
292		INC_CACHE_INFO(find_success);
293		if (TestClearPageReadahead(page))
294			atomic_inc(&swapin_readahead_hits);
295	}
296
297	INC_CACHE_INFO(find_total);
298	return page;
299}
300
301/* 
302 * Locate a page of swap in physical memory, reserving swap cache space
303 * and reading the disk if it is not already cached.
304 * A failure return means that either the page allocation failed or that
305 * the swap entry is no longer in use.
306 */
307struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
308			struct vm_area_struct *vma, unsigned long addr)
309{
310	struct page *found_page, *new_page = NULL;
 
311	int err;
 
312
313	do {
314		/*
315		 * First check the swap cache.  Since this is normally
316		 * called after lookup_swap_cache() failed, re-calling
317		 * that would confuse statistics.
318		 */
319		found_page = find_get_page(swap_address_space(entry),
320					entry.val);
321		if (found_page)
322			break;
323
324		/*
325		 * Get a new page to read into from swap.
326		 */
327		if (!new_page) {
328			new_page = alloc_page_vma(gfp_mask, vma, addr);
329			if (!new_page)
330				break;		/* Out of memory */
331		}
332
333		/*
334		 * call radix_tree_preload() while we can wait.
335		 */
336		err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
337		if (err)
338			break;
339
340		/*
341		 * Swap entry may have been freed since our caller observed it.
342		 */
343		err = swapcache_prepare(entry);
344		if (err == -EEXIST) {
345			radix_tree_preload_end();
346			/*
347			 * We might race against get_swap_page() and stumble
348			 * across a SWAP_HAS_CACHE swap_map entry whose page
349			 * has not been brought into the swapcache yet, while
350			 * the other end is scheduled away waiting on discard
351			 * I/O completion at scan_swap_map().
352			 *
353			 * In order to avoid turning this transitory state
354			 * into a permanent loop around this -EEXIST case
355			 * if !CONFIG_PREEMPT and the I/O completion happens
356			 * to be waiting on the CPU waitqueue where we are now
357			 * busy looping, we just conditionally invoke the
358			 * scheduler here, if there are some more important
359			 * tasks to run.
360			 */
361			cond_resched();
362			continue;
363		}
364		if (err) {		/* swp entry is obsolete ? */
365			radix_tree_preload_end();
366			break;
367		}
368
369		/* May fail (-ENOMEM) if radix-tree node allocation failed. */
370		__set_page_locked(new_page);
371		SetPageSwapBacked(new_page);
372		err = __add_to_swap_cache(new_page, entry);
373		if (likely(!err)) {
374			radix_tree_preload_end();
375			/*
376			 * Initiate read into locked page and return.
377			 */
378			lru_cache_add_anon(new_page);
379			swap_readpage(new_page);
380			return new_page;
381		}
382		radix_tree_preload_end();
383		ClearPageSwapBacked(new_page);
384		__clear_page_locked(new_page);
385		/*
386		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
387		 * clear SWAP_HAS_CACHE flag.
388		 */
389		swapcache_free(entry, NULL);
390	} while (err != -ENOMEM);
391
392	if (new_page)
393		page_cache_release(new_page);
394	return found_page;
395}
396
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397static unsigned long swapin_nr_pages(unsigned long offset)
398{
399	static unsigned long prev_offset;
400	unsigned int pages, max_pages, last_ra;
401	static atomic_t last_readahead_pages;
402
403	max_pages = 1 << ACCESS_ONCE(page_cluster);
404	if (max_pages <= 1)
405		return 1;
406
407	/*
408	 * This heuristic has been found to work well on both sequential and
409	 * random loads, swapping to hard disk or to SSD: please don't ask
410	 * what the "+ 2" means, it just happens to work well, that's all.
411	 */
412	pages = atomic_xchg(&swapin_readahead_hits, 0) + 2;
413	if (pages == 2) {
414		/*
415		 * We can have no readahead hits to judge by: but must not get
416		 * stuck here forever, so check for an adjacent offset instead
417		 * (and don't even bother to check whether swap type is same).
418		 */
419		if (offset != prev_offset + 1 && offset != prev_offset - 1)
420			pages = 1;
421		prev_offset = offset;
422	} else {
423		unsigned int roundup = 4;
424		while (roundup < pages)
425			roundup <<= 1;
426		pages = roundup;
427	}
428
429	if (pages > max_pages)
430		pages = max_pages;
431
432	/* Don't shrink readahead too fast */
433	last_ra = atomic_read(&last_readahead_pages) / 2;
434	if (pages < last_ra)
435		pages = last_ra;
436	atomic_set(&last_readahead_pages, pages);
437
438	return pages;
439}
440
441/**
442 * swapin_readahead - swap in pages in hope we need them soon
443 * @entry: swap entry of this memory
444 * @gfp_mask: memory allocation flags
445 * @vma: user vma this address belongs to
446 * @addr: target address for mempolicy
447 *
448 * Returns the struct page for entry and addr, after queueing swapin.
449 *
450 * Primitive swap readahead code. We simply read an aligned block of
451 * (1 << page_cluster) entries in the swap area. This method is chosen
452 * because it doesn't cost us any seek time.  We also make sure to queue
453 * the 'original' request together with the readahead ones...
454 *
455 * This has been extended to use the NUMA policies from the mm triggering
456 * the readahead.
457 *
458 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
459 */
460struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
461			struct vm_area_struct *vma, unsigned long addr)
462{
463	struct page *page;
464	unsigned long entry_offset = swp_offset(entry);
465	unsigned long offset = entry_offset;
466	unsigned long start_offset, end_offset;
467	unsigned long mask;
468	struct blk_plug plug;
469
470	mask = swapin_nr_pages(offset) - 1;
471	if (!mask)
472		goto skip;
473
474	/* Read a page_cluster sized and aligned cluster around offset. */
475	start_offset = offset & ~mask;
476	end_offset = offset | mask;
477	if (!start_offset)	/* First page is swap header. */
478		start_offset++;
479
480	blk_start_plug(&plug);
481	for (offset = start_offset; offset <= end_offset ; offset++) {
482		/* Ok, do the async read-ahead now */
483		page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
484						gfp_mask, vma, addr);
485		if (!page)
486			continue;
487		if (offset != entry_offset)
488			SetPageReadahead(page);
489		page_cache_release(page);
490	}
491	blk_finish_plug(&plug);
492
493	lru_add_drain();	/* Push any new pages onto the LRU now */
494skip:
495	return read_swap_cache_async(entry, gfp_mask, vma, addr);
496}
v4.6
  1/*
  2 *  linux/mm/swap_state.c
  3 *
  4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  5 *  Swap reorganised 29.12.95, Stephen Tweedie
  6 *
  7 *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
  8 */
  9#include <linux/mm.h>
 10#include <linux/gfp.h>
 11#include <linux/kernel_stat.h>
 12#include <linux/swap.h>
 13#include <linux/swapops.h>
 14#include <linux/init.h>
 15#include <linux/pagemap.h>
 16#include <linux/backing-dev.h>
 17#include <linux/blkdev.h>
 18#include <linux/pagevec.h>
 19#include <linux/migrate.h>
 
 20
 21#include <asm/pgtable.h>
 22
 23/*
 24 * swapper_space is a fiction, retained to simplify the path through
 25 * vmscan's shrink_page_list.
 26 */
 27static const struct address_space_operations swap_aops = {
 28	.writepage	= swap_writepage,
 29	.set_page_dirty	= swap_set_page_dirty,
 30#ifdef CONFIG_MIGRATION
 31	.migratepage	= migrate_page,
 32#endif
 
 
 
 
 33};
 34
 35struct address_space swapper_spaces[MAX_SWAPFILES] = {
 36	[0 ... MAX_SWAPFILES - 1] = {
 37		.page_tree	= RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
 38		.i_mmap_writable = ATOMIC_INIT(0),
 39		.a_ops		= &swap_aops,
 
 40	}
 41};
 42
 43#define INC_CACHE_INFO(x)	do { swap_cache_info.x++; } while (0)
 44
 45static struct {
 46	unsigned long add_total;
 47	unsigned long del_total;
 48	unsigned long find_success;
 49	unsigned long find_total;
 50} swap_cache_info;
 51
 52unsigned long total_swapcache_pages(void)
 53{
 54	int i;
 55	unsigned long ret = 0;
 56
 57	for (i = 0; i < MAX_SWAPFILES; i++)
 58		ret += swapper_spaces[i].nrpages;
 59	return ret;
 60}
 61
 62static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
 63
 64void show_swap_cache_info(void)
 65{
 66	printk("%lu pages in swap cache\n", total_swapcache_pages());
 67	printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
 68		swap_cache_info.add_total, swap_cache_info.del_total,
 69		swap_cache_info.find_success, swap_cache_info.find_total);
 70	printk("Free swap  = %ldkB\n",
 71		get_nr_swap_pages() << (PAGE_SHIFT - 10));
 72	printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
 73}
 74
 75/*
 76 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
 77 * but sets SwapCache flag and private instead of mapping and index.
 78 */
 79int __add_to_swap_cache(struct page *page, swp_entry_t entry)
 80{
 81	int error;
 82	struct address_space *address_space;
 83
 84	VM_BUG_ON_PAGE(!PageLocked(page), page);
 85	VM_BUG_ON_PAGE(PageSwapCache(page), page);
 86	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
 87
 88	get_page(page);
 89	SetPageSwapCache(page);
 90	set_page_private(page, entry.val);
 91
 92	address_space = swap_address_space(entry);
 93	spin_lock_irq(&address_space->tree_lock);
 94	error = radix_tree_insert(&address_space->page_tree,
 95					entry.val, page);
 96	if (likely(!error)) {
 97		address_space->nrpages++;
 98		__inc_zone_page_state(page, NR_FILE_PAGES);
 99		INC_CACHE_INFO(add_total);
100	}
101	spin_unlock_irq(&address_space->tree_lock);
102
103	if (unlikely(error)) {
104		/*
105		 * Only the context which have set SWAP_HAS_CACHE flag
106		 * would call add_to_swap_cache().
107		 * So add_to_swap_cache() doesn't returns -EEXIST.
108		 */
109		VM_BUG_ON(error == -EEXIST);
110		set_page_private(page, 0UL);
111		ClearPageSwapCache(page);
112		put_page(page);
113	}
114
115	return error;
116}
117
118
119int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
120{
121	int error;
122
123	error = radix_tree_maybe_preload(gfp_mask);
124	if (!error) {
125		error = __add_to_swap_cache(page, entry);
126		radix_tree_preload_end();
127	}
128	return error;
129}
130
131/*
132 * This must be called only on pages that have
133 * been verified to be in the swap cache.
134 */
135void __delete_from_swap_cache(struct page *page)
136{
137	swp_entry_t entry;
138	struct address_space *address_space;
139
140	VM_BUG_ON_PAGE(!PageLocked(page), page);
141	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
142	VM_BUG_ON_PAGE(PageWriteback(page), page);
143
144	entry.val = page_private(page);
145	address_space = swap_address_space(entry);
146	radix_tree_delete(&address_space->page_tree, page_private(page));
147	set_page_private(page, 0);
148	ClearPageSwapCache(page);
149	address_space->nrpages--;
150	__dec_zone_page_state(page, NR_FILE_PAGES);
151	INC_CACHE_INFO(del_total);
152}
153
154/**
155 * add_to_swap - allocate swap space for a page
156 * @page: page we want to move to swap
157 *
158 * Allocate swap space for the page and add the page to the
159 * swap cache.  Caller needs to hold the page lock. 
160 */
161int add_to_swap(struct page *page, struct list_head *list)
162{
163	swp_entry_t entry;
164	int err;
165
166	VM_BUG_ON_PAGE(!PageLocked(page), page);
167	VM_BUG_ON_PAGE(!PageUptodate(page), page);
168
169	entry = get_swap_page();
170	if (!entry.val)
171		return 0;
172
173	if (mem_cgroup_try_charge_swap(page, entry)) {
174		swapcache_free(entry);
175		return 0;
176	}
177
178	if (unlikely(PageTransHuge(page)))
179		if (unlikely(split_huge_page_to_list(page, list))) {
180			swapcache_free(entry);
181			return 0;
182		}
183
184	/*
185	 * Radix-tree node allocations from PF_MEMALLOC contexts could
186	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
187	 * stops emergency reserves from being allocated.
188	 *
189	 * TODO: this could cause a theoretical memory reclaim
190	 * deadlock in the swap out path.
191	 */
192	/*
193	 * Add it to the swap cache.
194	 */
195	err = add_to_swap_cache(page, entry,
196			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
197
198	if (!err) {
 
199		return 1;
200	} else {	/* -ENOMEM radix-tree allocation failure */
201		/*
202		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
203		 * clear SWAP_HAS_CACHE flag.
204		 */
205		swapcache_free(entry);
206		return 0;
207	}
208}
209
210/*
211 * This must be called only on pages that have
212 * been verified to be in the swap cache and locked.
213 * It will never put the page into the free list,
214 * the caller has a reference on the page.
215 */
216void delete_from_swap_cache(struct page *page)
217{
218	swp_entry_t entry;
219	struct address_space *address_space;
220
221	entry.val = page_private(page);
222
223	address_space = swap_address_space(entry);
224	spin_lock_irq(&address_space->tree_lock);
225	__delete_from_swap_cache(page);
226	spin_unlock_irq(&address_space->tree_lock);
227
228	swapcache_free(entry);
229	put_page(page);
230}
231
232/* 
233 * If we are the only user, then try to free up the swap cache. 
234 * 
235 * Its ok to check for PageSwapCache without the page lock
236 * here because we are going to recheck again inside
237 * try_to_free_swap() _with_ the lock.
238 * 					- Marcelo
239 */
240static inline void free_swap_cache(struct page *page)
241{
242	if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
243		try_to_free_swap(page);
244		unlock_page(page);
245	}
246}
247
248/* 
249 * Perform a free_page(), also freeing any swap cache associated with
250 * this page if it is the last user of the page.
251 */
252void free_page_and_swap_cache(struct page *page)
253{
254	free_swap_cache(page);
255	put_page(page);
256}
257
258/*
259 * Passed an array of pages, drop them all from swapcache and then release
260 * them.  They are removed from the LRU and freed if this is their last use.
261 */
262void free_pages_and_swap_cache(struct page **pages, int nr)
263{
264	struct page **pagep = pages;
265	int i;
266
267	lru_add_drain();
268	for (i = 0; i < nr; i++)
269		free_swap_cache(pagep[i]);
270	release_pages(pagep, nr, false);
 
 
 
 
 
 
 
271}
272
273/*
274 * Lookup a swap entry in the swap cache. A found page will be returned
275 * unlocked and with its refcount incremented - we rely on the kernel
276 * lock getting page table operations atomic even if we drop the page
277 * lock before returning.
278 */
279struct page * lookup_swap_cache(swp_entry_t entry)
280{
281	struct page *page;
282
283	page = find_get_page(swap_address_space(entry), entry.val);
284
285	if (page) {
286		INC_CACHE_INFO(find_success);
287		if (TestClearPageReadahead(page))
288			atomic_inc(&swapin_readahead_hits);
289	}
290
291	INC_CACHE_INFO(find_total);
292	return page;
293}
294
295struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
296			struct vm_area_struct *vma, unsigned long addr,
297			bool *new_page_allocated)
 
 
 
 
 
298{
299	struct page *found_page, *new_page = NULL;
300	struct address_space *swapper_space = swap_address_space(entry);
301	int err;
302	*new_page_allocated = false;
303
304	do {
305		/*
306		 * First check the swap cache.  Since this is normally
307		 * called after lookup_swap_cache() failed, re-calling
308		 * that would confuse statistics.
309		 */
310		found_page = find_get_page(swapper_space, entry.val);
 
311		if (found_page)
312			break;
313
314		/*
315		 * Get a new page to read into from swap.
316		 */
317		if (!new_page) {
318			new_page = alloc_page_vma(gfp_mask, vma, addr);
319			if (!new_page)
320				break;		/* Out of memory */
321		}
322
323		/*
324		 * call radix_tree_preload() while we can wait.
325		 */
326		err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
327		if (err)
328			break;
329
330		/*
331		 * Swap entry may have been freed since our caller observed it.
332		 */
333		err = swapcache_prepare(entry);
334		if (err == -EEXIST) {
335			radix_tree_preload_end();
336			/*
337			 * We might race against get_swap_page() and stumble
338			 * across a SWAP_HAS_CACHE swap_map entry whose page
339			 * has not been brought into the swapcache yet, while
340			 * the other end is scheduled away waiting on discard
341			 * I/O completion at scan_swap_map().
342			 *
343			 * In order to avoid turning this transitory state
344			 * into a permanent loop around this -EEXIST case
345			 * if !CONFIG_PREEMPT and the I/O completion happens
346			 * to be waiting on the CPU waitqueue where we are now
347			 * busy looping, we just conditionally invoke the
348			 * scheduler here, if there are some more important
349			 * tasks to run.
350			 */
351			cond_resched();
352			continue;
353		}
354		if (err) {		/* swp entry is obsolete ? */
355			radix_tree_preload_end();
356			break;
357		}
358
359		/* May fail (-ENOMEM) if radix-tree node allocation failed. */
360		__SetPageLocked(new_page);
361		SetPageSwapBacked(new_page);
362		err = __add_to_swap_cache(new_page, entry);
363		if (likely(!err)) {
364			radix_tree_preload_end();
365			/*
366			 * Initiate read into locked page and return.
367			 */
368			lru_cache_add_anon(new_page);
369			*new_page_allocated = true;
370			return new_page;
371		}
372		radix_tree_preload_end();
373		ClearPageSwapBacked(new_page);
374		__ClearPageLocked(new_page);
375		/*
376		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
377		 * clear SWAP_HAS_CACHE flag.
378		 */
379		swapcache_free(entry);
380	} while (err != -ENOMEM);
381
382	if (new_page)
383		put_page(new_page);
384	return found_page;
385}
386
387/*
388 * Locate a page of swap in physical memory, reserving swap cache space
389 * and reading the disk if it is not already cached.
390 * A failure return means that either the page allocation failed or that
391 * the swap entry is no longer in use.
392 */
393struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
394			struct vm_area_struct *vma, unsigned long addr)
395{
396	bool page_was_allocated;
397	struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
398			vma, addr, &page_was_allocated);
399
400	if (page_was_allocated)
401		swap_readpage(retpage);
402
403	return retpage;
404}
405
406static unsigned long swapin_nr_pages(unsigned long offset)
407{
408	static unsigned long prev_offset;
409	unsigned int pages, max_pages, last_ra;
410	static atomic_t last_readahead_pages;
411
412	max_pages = 1 << READ_ONCE(page_cluster);
413	if (max_pages <= 1)
414		return 1;
415
416	/*
417	 * This heuristic has been found to work well on both sequential and
418	 * random loads, swapping to hard disk or to SSD: please don't ask
419	 * what the "+ 2" means, it just happens to work well, that's all.
420	 */
421	pages = atomic_xchg(&swapin_readahead_hits, 0) + 2;
422	if (pages == 2) {
423		/*
424		 * We can have no readahead hits to judge by: but must not get
425		 * stuck here forever, so check for an adjacent offset instead
426		 * (and don't even bother to check whether swap type is same).
427		 */
428		if (offset != prev_offset + 1 && offset != prev_offset - 1)
429			pages = 1;
430		prev_offset = offset;
431	} else {
432		unsigned int roundup = 4;
433		while (roundup < pages)
434			roundup <<= 1;
435		pages = roundup;
436	}
437
438	if (pages > max_pages)
439		pages = max_pages;
440
441	/* Don't shrink readahead too fast */
442	last_ra = atomic_read(&last_readahead_pages) / 2;
443	if (pages < last_ra)
444		pages = last_ra;
445	atomic_set(&last_readahead_pages, pages);
446
447	return pages;
448}
449
450/**
451 * swapin_readahead - swap in pages in hope we need them soon
452 * @entry: swap entry of this memory
453 * @gfp_mask: memory allocation flags
454 * @vma: user vma this address belongs to
455 * @addr: target address for mempolicy
456 *
457 * Returns the struct page for entry and addr, after queueing swapin.
458 *
459 * Primitive swap readahead code. We simply read an aligned block of
460 * (1 << page_cluster) entries in the swap area. This method is chosen
461 * because it doesn't cost us any seek time.  We also make sure to queue
462 * the 'original' request together with the readahead ones...
463 *
464 * This has been extended to use the NUMA policies from the mm triggering
465 * the readahead.
466 *
467 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
468 */
469struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
470			struct vm_area_struct *vma, unsigned long addr)
471{
472	struct page *page;
473	unsigned long entry_offset = swp_offset(entry);
474	unsigned long offset = entry_offset;
475	unsigned long start_offset, end_offset;
476	unsigned long mask;
477	struct blk_plug plug;
478
479	mask = swapin_nr_pages(offset) - 1;
480	if (!mask)
481		goto skip;
482
483	/* Read a page_cluster sized and aligned cluster around offset. */
484	start_offset = offset & ~mask;
485	end_offset = offset | mask;
486	if (!start_offset)	/* First page is swap header. */
487		start_offset++;
488
489	blk_start_plug(&plug);
490	for (offset = start_offset; offset <= end_offset ; offset++) {
491		/* Ok, do the async read-ahead now */
492		page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
493						gfp_mask, vma, addr);
494		if (!page)
495			continue;
496		if (offset != entry_offset)
497			SetPageReadahead(page);
498		put_page(page);
499	}
500	blk_finish_plug(&plug);
501
502	lru_add_drain();	/* Push any new pages onto the LRU now */
503skip:
504	return read_swap_cache_async(entry, gfp_mask, vma, addr);
505}