Loading...
1/*
2 * linux/mm/swap_state.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
6 *
7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
8 */
9#include <linux/mm.h>
10#include <linux/gfp.h>
11#include <linux/kernel_stat.h>
12#include <linux/swap.h>
13#include <linux/swapops.h>
14#include <linux/init.h>
15#include <linux/pagemap.h>
16#include <linux/backing-dev.h>
17#include <linux/blkdev.h>
18#include <linux/pagevec.h>
19#include <linux/migrate.h>
20
21#include <asm/pgtable.h>
22
23/*
24 * swapper_space is a fiction, retained to simplify the path through
25 * vmscan's shrink_page_list.
26 */
27static const struct address_space_operations swap_aops = {
28 .writepage = swap_writepage,
29 .set_page_dirty = swap_set_page_dirty,
30#ifdef CONFIG_MIGRATION
31 .migratepage = migrate_page,
32#endif
33};
34
35struct address_space swapper_spaces[MAX_SWAPFILES] = {
36 [0 ... MAX_SWAPFILES - 1] = {
37 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
38 .i_mmap_writable = ATOMIC_INIT(0),
39 .a_ops = &swap_aops,
40 }
41};
42
43#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
44
45static struct {
46 unsigned long add_total;
47 unsigned long del_total;
48 unsigned long find_success;
49 unsigned long find_total;
50} swap_cache_info;
51
52unsigned long total_swapcache_pages(void)
53{
54 int i;
55 unsigned long ret = 0;
56
57 for (i = 0; i < MAX_SWAPFILES; i++)
58 ret += swapper_spaces[i].nrpages;
59 return ret;
60}
61
62static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
63
64void show_swap_cache_info(void)
65{
66 printk("%lu pages in swap cache\n", total_swapcache_pages());
67 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
68 swap_cache_info.add_total, swap_cache_info.del_total,
69 swap_cache_info.find_success, swap_cache_info.find_total);
70 printk("Free swap = %ldkB\n",
71 get_nr_swap_pages() << (PAGE_SHIFT - 10));
72 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
73}
74
75/*
76 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
77 * but sets SwapCache flag and private instead of mapping and index.
78 */
79int __add_to_swap_cache(struct page *page, swp_entry_t entry)
80{
81 int error;
82 struct address_space *address_space;
83
84 VM_BUG_ON_PAGE(!PageLocked(page), page);
85 VM_BUG_ON_PAGE(PageSwapCache(page), page);
86 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
87
88 get_page(page);
89 SetPageSwapCache(page);
90 set_page_private(page, entry.val);
91
92 address_space = swap_address_space(entry);
93 spin_lock_irq(&address_space->tree_lock);
94 error = radix_tree_insert(&address_space->page_tree,
95 entry.val, page);
96 if (likely(!error)) {
97 address_space->nrpages++;
98 __inc_zone_page_state(page, NR_FILE_PAGES);
99 INC_CACHE_INFO(add_total);
100 }
101 spin_unlock_irq(&address_space->tree_lock);
102
103 if (unlikely(error)) {
104 /*
105 * Only the context which have set SWAP_HAS_CACHE flag
106 * would call add_to_swap_cache().
107 * So add_to_swap_cache() doesn't returns -EEXIST.
108 */
109 VM_BUG_ON(error == -EEXIST);
110 set_page_private(page, 0UL);
111 ClearPageSwapCache(page);
112 put_page(page);
113 }
114
115 return error;
116}
117
118
119int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
120{
121 int error;
122
123 error = radix_tree_maybe_preload(gfp_mask);
124 if (!error) {
125 error = __add_to_swap_cache(page, entry);
126 radix_tree_preload_end();
127 }
128 return error;
129}
130
131/*
132 * This must be called only on pages that have
133 * been verified to be in the swap cache.
134 */
135void __delete_from_swap_cache(struct page *page)
136{
137 swp_entry_t entry;
138 struct address_space *address_space;
139
140 VM_BUG_ON_PAGE(!PageLocked(page), page);
141 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
142 VM_BUG_ON_PAGE(PageWriteback(page), page);
143
144 entry.val = page_private(page);
145 address_space = swap_address_space(entry);
146 radix_tree_delete(&address_space->page_tree, page_private(page));
147 set_page_private(page, 0);
148 ClearPageSwapCache(page);
149 address_space->nrpages--;
150 __dec_zone_page_state(page, NR_FILE_PAGES);
151 INC_CACHE_INFO(del_total);
152}
153
154/**
155 * add_to_swap - allocate swap space for a page
156 * @page: page we want to move to swap
157 *
158 * Allocate swap space for the page and add the page to the
159 * swap cache. Caller needs to hold the page lock.
160 */
161int add_to_swap(struct page *page, struct list_head *list)
162{
163 swp_entry_t entry;
164 int err;
165
166 VM_BUG_ON_PAGE(!PageLocked(page), page);
167 VM_BUG_ON_PAGE(!PageUptodate(page), page);
168
169 entry = get_swap_page();
170 if (!entry.val)
171 return 0;
172
173 if (mem_cgroup_try_charge_swap(page, entry)) {
174 swapcache_free(entry);
175 return 0;
176 }
177
178 if (unlikely(PageTransHuge(page)))
179 if (unlikely(split_huge_page_to_list(page, list))) {
180 swapcache_free(entry);
181 return 0;
182 }
183
184 /*
185 * Radix-tree node allocations from PF_MEMALLOC contexts could
186 * completely exhaust the page allocator. __GFP_NOMEMALLOC
187 * stops emergency reserves from being allocated.
188 *
189 * TODO: this could cause a theoretical memory reclaim
190 * deadlock in the swap out path.
191 */
192 /*
193 * Add it to the swap cache.
194 */
195 err = add_to_swap_cache(page, entry,
196 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
197
198 if (!err) {
199 return 1;
200 } else { /* -ENOMEM radix-tree allocation failure */
201 /*
202 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
203 * clear SWAP_HAS_CACHE flag.
204 */
205 swapcache_free(entry);
206 return 0;
207 }
208}
209
210/*
211 * This must be called only on pages that have
212 * been verified to be in the swap cache and locked.
213 * It will never put the page into the free list,
214 * the caller has a reference on the page.
215 */
216void delete_from_swap_cache(struct page *page)
217{
218 swp_entry_t entry;
219 struct address_space *address_space;
220
221 entry.val = page_private(page);
222
223 address_space = swap_address_space(entry);
224 spin_lock_irq(&address_space->tree_lock);
225 __delete_from_swap_cache(page);
226 spin_unlock_irq(&address_space->tree_lock);
227
228 swapcache_free(entry);
229 put_page(page);
230}
231
232/*
233 * If we are the only user, then try to free up the swap cache.
234 *
235 * Its ok to check for PageSwapCache without the page lock
236 * here because we are going to recheck again inside
237 * try_to_free_swap() _with_ the lock.
238 * - Marcelo
239 */
240static inline void free_swap_cache(struct page *page)
241{
242 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
243 try_to_free_swap(page);
244 unlock_page(page);
245 }
246}
247
248/*
249 * Perform a free_page(), also freeing any swap cache associated with
250 * this page if it is the last user of the page.
251 */
252void free_page_and_swap_cache(struct page *page)
253{
254 free_swap_cache(page);
255 put_page(page);
256}
257
258/*
259 * Passed an array of pages, drop them all from swapcache and then release
260 * them. They are removed from the LRU and freed if this is their last use.
261 */
262void free_pages_and_swap_cache(struct page **pages, int nr)
263{
264 struct page **pagep = pages;
265 int i;
266
267 lru_add_drain();
268 for (i = 0; i < nr; i++)
269 free_swap_cache(pagep[i]);
270 release_pages(pagep, nr, false);
271}
272
273/*
274 * Lookup a swap entry in the swap cache. A found page will be returned
275 * unlocked and with its refcount incremented - we rely on the kernel
276 * lock getting page table operations atomic even if we drop the page
277 * lock before returning.
278 */
279struct page * lookup_swap_cache(swp_entry_t entry)
280{
281 struct page *page;
282
283 page = find_get_page(swap_address_space(entry), entry.val);
284
285 if (page) {
286 INC_CACHE_INFO(find_success);
287 if (TestClearPageReadahead(page))
288 atomic_inc(&swapin_readahead_hits);
289 }
290
291 INC_CACHE_INFO(find_total);
292 return page;
293}
294
295struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
296 struct vm_area_struct *vma, unsigned long addr,
297 bool *new_page_allocated)
298{
299 struct page *found_page, *new_page = NULL;
300 struct address_space *swapper_space = swap_address_space(entry);
301 int err;
302 *new_page_allocated = false;
303
304 do {
305 /*
306 * First check the swap cache. Since this is normally
307 * called after lookup_swap_cache() failed, re-calling
308 * that would confuse statistics.
309 */
310 found_page = find_get_page(swapper_space, entry.val);
311 if (found_page)
312 break;
313
314 /*
315 * Get a new page to read into from swap.
316 */
317 if (!new_page) {
318 new_page = alloc_page_vma(gfp_mask, vma, addr);
319 if (!new_page)
320 break; /* Out of memory */
321 }
322
323 /*
324 * call radix_tree_preload() while we can wait.
325 */
326 err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
327 if (err)
328 break;
329
330 /*
331 * Swap entry may have been freed since our caller observed it.
332 */
333 err = swapcache_prepare(entry);
334 if (err == -EEXIST) {
335 radix_tree_preload_end();
336 /*
337 * We might race against get_swap_page() and stumble
338 * across a SWAP_HAS_CACHE swap_map entry whose page
339 * has not been brought into the swapcache yet, while
340 * the other end is scheduled away waiting on discard
341 * I/O completion at scan_swap_map().
342 *
343 * In order to avoid turning this transitory state
344 * into a permanent loop around this -EEXIST case
345 * if !CONFIG_PREEMPT and the I/O completion happens
346 * to be waiting on the CPU waitqueue where we are now
347 * busy looping, we just conditionally invoke the
348 * scheduler here, if there are some more important
349 * tasks to run.
350 */
351 cond_resched();
352 continue;
353 }
354 if (err) { /* swp entry is obsolete ? */
355 radix_tree_preload_end();
356 break;
357 }
358
359 /* May fail (-ENOMEM) if radix-tree node allocation failed. */
360 __SetPageLocked(new_page);
361 SetPageSwapBacked(new_page);
362 err = __add_to_swap_cache(new_page, entry);
363 if (likely(!err)) {
364 radix_tree_preload_end();
365 /*
366 * Initiate read into locked page and return.
367 */
368 lru_cache_add_anon(new_page);
369 *new_page_allocated = true;
370 return new_page;
371 }
372 radix_tree_preload_end();
373 ClearPageSwapBacked(new_page);
374 __ClearPageLocked(new_page);
375 /*
376 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
377 * clear SWAP_HAS_CACHE flag.
378 */
379 swapcache_free(entry);
380 } while (err != -ENOMEM);
381
382 if (new_page)
383 put_page(new_page);
384 return found_page;
385}
386
387/*
388 * Locate a page of swap in physical memory, reserving swap cache space
389 * and reading the disk if it is not already cached.
390 * A failure return means that either the page allocation failed or that
391 * the swap entry is no longer in use.
392 */
393struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
394 struct vm_area_struct *vma, unsigned long addr)
395{
396 bool page_was_allocated;
397 struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
398 vma, addr, &page_was_allocated);
399
400 if (page_was_allocated)
401 swap_readpage(retpage);
402
403 return retpage;
404}
405
406static unsigned long swapin_nr_pages(unsigned long offset)
407{
408 static unsigned long prev_offset;
409 unsigned int pages, max_pages, last_ra;
410 static atomic_t last_readahead_pages;
411
412 max_pages = 1 << READ_ONCE(page_cluster);
413 if (max_pages <= 1)
414 return 1;
415
416 /*
417 * This heuristic has been found to work well on both sequential and
418 * random loads, swapping to hard disk or to SSD: please don't ask
419 * what the "+ 2" means, it just happens to work well, that's all.
420 */
421 pages = atomic_xchg(&swapin_readahead_hits, 0) + 2;
422 if (pages == 2) {
423 /*
424 * We can have no readahead hits to judge by: but must not get
425 * stuck here forever, so check for an adjacent offset instead
426 * (and don't even bother to check whether swap type is same).
427 */
428 if (offset != prev_offset + 1 && offset != prev_offset - 1)
429 pages = 1;
430 prev_offset = offset;
431 } else {
432 unsigned int roundup = 4;
433 while (roundup < pages)
434 roundup <<= 1;
435 pages = roundup;
436 }
437
438 if (pages > max_pages)
439 pages = max_pages;
440
441 /* Don't shrink readahead too fast */
442 last_ra = atomic_read(&last_readahead_pages) / 2;
443 if (pages < last_ra)
444 pages = last_ra;
445 atomic_set(&last_readahead_pages, pages);
446
447 return pages;
448}
449
450/**
451 * swapin_readahead - swap in pages in hope we need them soon
452 * @entry: swap entry of this memory
453 * @gfp_mask: memory allocation flags
454 * @vma: user vma this address belongs to
455 * @addr: target address for mempolicy
456 *
457 * Returns the struct page for entry and addr, after queueing swapin.
458 *
459 * Primitive swap readahead code. We simply read an aligned block of
460 * (1 << page_cluster) entries in the swap area. This method is chosen
461 * because it doesn't cost us any seek time. We also make sure to queue
462 * the 'original' request together with the readahead ones...
463 *
464 * This has been extended to use the NUMA policies from the mm triggering
465 * the readahead.
466 *
467 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
468 */
469struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
470 struct vm_area_struct *vma, unsigned long addr)
471{
472 struct page *page;
473 unsigned long entry_offset = swp_offset(entry);
474 unsigned long offset = entry_offset;
475 unsigned long start_offset, end_offset;
476 unsigned long mask;
477 struct blk_plug plug;
478
479 mask = swapin_nr_pages(offset) - 1;
480 if (!mask)
481 goto skip;
482
483 /* Read a page_cluster sized and aligned cluster around offset. */
484 start_offset = offset & ~mask;
485 end_offset = offset | mask;
486 if (!start_offset) /* First page is swap header. */
487 start_offset++;
488
489 blk_start_plug(&plug);
490 for (offset = start_offset; offset <= end_offset ; offset++) {
491 /* Ok, do the async read-ahead now */
492 page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
493 gfp_mask, vma, addr);
494 if (!page)
495 continue;
496 if (offset != entry_offset)
497 SetPageReadahead(page);
498 put_page(page);
499 }
500 blk_finish_plug(&plug);
501
502 lru_add_drain(); /* Push any new pages onto the LRU now */
503skip:
504 return read_swap_cache_async(entry, gfp_mask, vma, addr);
505}
1/*
2 * linux/mm/swap_state.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
6 *
7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
8 */
9#include <linux/module.h>
10#include <linux/mm.h>
11#include <linux/gfp.h>
12#include <linux/kernel_stat.h>
13#include <linux/swap.h>
14#include <linux/swapops.h>
15#include <linux/init.h>
16#include <linux/pagemap.h>
17#include <linux/buffer_head.h>
18#include <linux/backing-dev.h>
19#include <linux/pagevec.h>
20#include <linux/migrate.h>
21#include <linux/page_cgroup.h>
22
23#include <asm/pgtable.h>
24
25/*
26 * swapper_space is a fiction, retained to simplify the path through
27 * vmscan's shrink_page_list.
28 */
29static const struct address_space_operations swap_aops = {
30 .writepage = swap_writepage,
31 .set_page_dirty = __set_page_dirty_nobuffers,
32 .migratepage = migrate_page,
33};
34
35static struct backing_dev_info swap_backing_dev_info = {
36 .name = "swap",
37 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
38};
39
40struct address_space swapper_space = {
41 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
42 .tree_lock = __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock),
43 .a_ops = &swap_aops,
44 .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
45 .backing_dev_info = &swap_backing_dev_info,
46};
47
48#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
49
50static struct {
51 unsigned long add_total;
52 unsigned long del_total;
53 unsigned long find_success;
54 unsigned long find_total;
55} swap_cache_info;
56
57void show_swap_cache_info(void)
58{
59 printk("%lu pages in swap cache\n", total_swapcache_pages);
60 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
61 swap_cache_info.add_total, swap_cache_info.del_total,
62 swap_cache_info.find_success, swap_cache_info.find_total);
63 printk("Free swap = %ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10));
64 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
65}
66
67/*
68 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
69 * but sets SwapCache flag and private instead of mapping and index.
70 */
71static int __add_to_swap_cache(struct page *page, swp_entry_t entry)
72{
73 int error;
74
75 VM_BUG_ON(!PageLocked(page));
76 VM_BUG_ON(PageSwapCache(page));
77 VM_BUG_ON(!PageSwapBacked(page));
78
79 page_cache_get(page);
80 SetPageSwapCache(page);
81 set_page_private(page, entry.val);
82
83 spin_lock_irq(&swapper_space.tree_lock);
84 error = radix_tree_insert(&swapper_space.page_tree, entry.val, page);
85 if (likely(!error)) {
86 total_swapcache_pages++;
87 __inc_zone_page_state(page, NR_FILE_PAGES);
88 INC_CACHE_INFO(add_total);
89 }
90 spin_unlock_irq(&swapper_space.tree_lock);
91
92 if (unlikely(error)) {
93 /*
94 * Only the context which have set SWAP_HAS_CACHE flag
95 * would call add_to_swap_cache().
96 * So add_to_swap_cache() doesn't returns -EEXIST.
97 */
98 VM_BUG_ON(error == -EEXIST);
99 set_page_private(page, 0UL);
100 ClearPageSwapCache(page);
101 page_cache_release(page);
102 }
103
104 return error;
105}
106
107
108int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
109{
110 int error;
111
112 error = radix_tree_preload(gfp_mask);
113 if (!error) {
114 error = __add_to_swap_cache(page, entry);
115 radix_tree_preload_end();
116 }
117 return error;
118}
119
120/*
121 * This must be called only on pages that have
122 * been verified to be in the swap cache.
123 */
124void __delete_from_swap_cache(struct page *page)
125{
126 VM_BUG_ON(!PageLocked(page));
127 VM_BUG_ON(!PageSwapCache(page));
128 VM_BUG_ON(PageWriteback(page));
129
130 radix_tree_delete(&swapper_space.page_tree, page_private(page));
131 set_page_private(page, 0);
132 ClearPageSwapCache(page);
133 total_swapcache_pages--;
134 __dec_zone_page_state(page, NR_FILE_PAGES);
135 INC_CACHE_INFO(del_total);
136}
137
138/**
139 * add_to_swap - allocate swap space for a page
140 * @page: page we want to move to swap
141 *
142 * Allocate swap space for the page and add the page to the
143 * swap cache. Caller needs to hold the page lock.
144 */
145int add_to_swap(struct page *page)
146{
147 swp_entry_t entry;
148 int err;
149
150 VM_BUG_ON(!PageLocked(page));
151 VM_BUG_ON(!PageUptodate(page));
152
153 entry = get_swap_page();
154 if (!entry.val)
155 return 0;
156
157 if (unlikely(PageTransHuge(page)))
158 if (unlikely(split_huge_page(page))) {
159 swapcache_free(entry, NULL);
160 return 0;
161 }
162
163 /*
164 * Radix-tree node allocations from PF_MEMALLOC contexts could
165 * completely exhaust the page allocator. __GFP_NOMEMALLOC
166 * stops emergency reserves from being allocated.
167 *
168 * TODO: this could cause a theoretical memory reclaim
169 * deadlock in the swap out path.
170 */
171 /*
172 * Add it to the swap cache and mark it dirty
173 */
174 err = add_to_swap_cache(page, entry,
175 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
176
177 if (!err) { /* Success */
178 SetPageDirty(page);
179 return 1;
180 } else { /* -ENOMEM radix-tree allocation failure */
181 /*
182 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
183 * clear SWAP_HAS_CACHE flag.
184 */
185 swapcache_free(entry, NULL);
186 return 0;
187 }
188}
189
190/*
191 * This must be called only on pages that have
192 * been verified to be in the swap cache and locked.
193 * It will never put the page into the free list,
194 * the caller has a reference on the page.
195 */
196void delete_from_swap_cache(struct page *page)
197{
198 swp_entry_t entry;
199
200 entry.val = page_private(page);
201
202 spin_lock_irq(&swapper_space.tree_lock);
203 __delete_from_swap_cache(page);
204 spin_unlock_irq(&swapper_space.tree_lock);
205
206 swapcache_free(entry, page);
207 page_cache_release(page);
208}
209
210/*
211 * If we are the only user, then try to free up the swap cache.
212 *
213 * Its ok to check for PageSwapCache without the page lock
214 * here because we are going to recheck again inside
215 * try_to_free_swap() _with_ the lock.
216 * - Marcelo
217 */
218static inline void free_swap_cache(struct page *page)
219{
220 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
221 try_to_free_swap(page);
222 unlock_page(page);
223 }
224}
225
226/*
227 * Perform a free_page(), also freeing any swap cache associated with
228 * this page if it is the last user of the page.
229 */
230void free_page_and_swap_cache(struct page *page)
231{
232 free_swap_cache(page);
233 page_cache_release(page);
234}
235
236/*
237 * Passed an array of pages, drop them all from swapcache and then release
238 * them. They are removed from the LRU and freed if this is their last use.
239 */
240void free_pages_and_swap_cache(struct page **pages, int nr)
241{
242 struct page **pagep = pages;
243
244 lru_add_drain();
245 while (nr) {
246 int todo = min(nr, PAGEVEC_SIZE);
247 int i;
248
249 for (i = 0; i < todo; i++)
250 free_swap_cache(pagep[i]);
251 release_pages(pagep, todo, 0);
252 pagep += todo;
253 nr -= todo;
254 }
255}
256
257/*
258 * Lookup a swap entry in the swap cache. A found page will be returned
259 * unlocked and with its refcount incremented - we rely on the kernel
260 * lock getting page table operations atomic even if we drop the page
261 * lock before returning.
262 */
263struct page * lookup_swap_cache(swp_entry_t entry)
264{
265 struct page *page;
266
267 page = find_get_page(&swapper_space, entry.val);
268
269 if (page)
270 INC_CACHE_INFO(find_success);
271
272 INC_CACHE_INFO(find_total);
273 return page;
274}
275
276/*
277 * Locate a page of swap in physical memory, reserving swap cache space
278 * and reading the disk if it is not already cached.
279 * A failure return means that either the page allocation failed or that
280 * the swap entry is no longer in use.
281 */
282struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
283 struct vm_area_struct *vma, unsigned long addr)
284{
285 struct page *found_page, *new_page = NULL;
286 int err;
287
288 do {
289 /*
290 * First check the swap cache. Since this is normally
291 * called after lookup_swap_cache() failed, re-calling
292 * that would confuse statistics.
293 */
294 found_page = find_get_page(&swapper_space, entry.val);
295 if (found_page)
296 break;
297
298 /*
299 * Get a new page to read into from swap.
300 */
301 if (!new_page) {
302 new_page = alloc_page_vma(gfp_mask, vma, addr);
303 if (!new_page)
304 break; /* Out of memory */
305 }
306
307 /*
308 * call radix_tree_preload() while we can wait.
309 */
310 err = radix_tree_preload(gfp_mask & GFP_KERNEL);
311 if (err)
312 break;
313
314 /*
315 * Swap entry may have been freed since our caller observed it.
316 */
317 err = swapcache_prepare(entry);
318 if (err == -EEXIST) { /* seems racy */
319 radix_tree_preload_end();
320 continue;
321 }
322 if (err) { /* swp entry is obsolete ? */
323 radix_tree_preload_end();
324 break;
325 }
326
327 /* May fail (-ENOMEM) if radix-tree node allocation failed. */
328 __set_page_locked(new_page);
329 SetPageSwapBacked(new_page);
330 err = __add_to_swap_cache(new_page, entry);
331 if (likely(!err)) {
332 radix_tree_preload_end();
333 /*
334 * Initiate read into locked page and return.
335 */
336 lru_cache_add_anon(new_page);
337 swap_readpage(new_page);
338 return new_page;
339 }
340 radix_tree_preload_end();
341 ClearPageSwapBacked(new_page);
342 __clear_page_locked(new_page);
343 /*
344 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
345 * clear SWAP_HAS_CACHE flag.
346 */
347 swapcache_free(entry, NULL);
348 } while (err != -ENOMEM);
349
350 if (new_page)
351 page_cache_release(new_page);
352 return found_page;
353}
354
355/**
356 * swapin_readahead - swap in pages in hope we need them soon
357 * @entry: swap entry of this memory
358 * @gfp_mask: memory allocation flags
359 * @vma: user vma this address belongs to
360 * @addr: target address for mempolicy
361 *
362 * Returns the struct page for entry and addr, after queueing swapin.
363 *
364 * Primitive swap readahead code. We simply read an aligned block of
365 * (1 << page_cluster) entries in the swap area. This method is chosen
366 * because it doesn't cost us any seek time. We also make sure to queue
367 * the 'original' request together with the readahead ones...
368 *
369 * This has been extended to use the NUMA policies from the mm triggering
370 * the readahead.
371 *
372 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
373 */
374struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
375 struct vm_area_struct *vma, unsigned long addr)
376{
377 int nr_pages;
378 struct page *page;
379 unsigned long offset;
380 unsigned long end_offset;
381
382 /*
383 * Get starting offset for readaround, and number of pages to read.
384 * Adjust starting address by readbehind (for NUMA interleave case)?
385 * No, it's very unlikely that swap layout would follow vma layout,
386 * more likely that neighbouring swap pages came from the same node:
387 * so use the same "addr" to choose the same node for each swap read.
388 */
389 nr_pages = valid_swaphandles(entry, &offset);
390 for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
391 /* Ok, do the async read-ahead now */
392 page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
393 gfp_mask, vma, addr);
394 if (!page)
395 break;
396 page_cache_release(page);
397 }
398 lru_add_drain(); /* Push any new pages onto the LRU now */
399 return read_swap_cache_async(entry, gfp_mask, vma, addr);
400}