Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 *  linux/mm/swap.c
  3 *
  4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  5 */
  6
  7/*
  8 * This file contains the default values for the operation of the
  9 * Linux VM subsystem. Fine-tuning documentation can be found in
 10 * Documentation/sysctl/vm.txt.
 11 * Started 18.12.91
 12 * Swap aging added 23.2.95, Stephen Tweedie.
 13 * Buffermem limits added 12.3.98, Rik van Riel.
 14 */
 15
 16#include <linux/mm.h>
 17#include <linux/sched.h>
 18#include <linux/kernel_stat.h>
 19#include <linux/swap.h>
 20#include <linux/mman.h>
 21#include <linux/pagemap.h>
 22#include <linux/pagevec.h>
 23#include <linux/init.h>
 24#include <linux/module.h>
 25#include <linux/mm_inline.h>
 26#include <linux/buffer_head.h>	/* for try_to_release_page() */
 27#include <linux/percpu_counter.h>
 28#include <linux/percpu.h>
 29#include <linux/cpu.h>
 30#include <linux/notifier.h>
 31#include <linux/backing-dev.h>
 32#include <linux/memcontrol.h>
 33#include <linux/gfp.h>
 34
 35#include "internal.h"
 36
 37/* How many pages do we try to swap or page in/out together? */
 38int page_cluster;
 39
 40static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
 41static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
 42static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
 43
 44/*
 45 * This path almost never happens for VM activity - pages are normally
 46 * freed via pagevecs.  But it gets used by networking.
 47 */
 48static void __page_cache_release(struct page *page)
 49{
 50	if (PageLRU(page)) {
 51		unsigned long flags;
 52		struct zone *zone = page_zone(page);
 
 
 53
 54		spin_lock_irqsave(&zone->lru_lock, flags);
 
 55		VM_BUG_ON(!PageLRU(page));
 56		__ClearPageLRU(page);
 57		del_page_from_lru(zone, page);
 58		spin_unlock_irqrestore(&zone->lru_lock, flags);
 59	}
 60}
 61
 62static void __put_single_page(struct page *page)
 63{
 64	__page_cache_release(page);
 65	free_hot_cold_page(page, 0);
 66}
 67
 68static void __put_compound_page(struct page *page)
 69{
 70	compound_page_dtor *dtor;
 71
 72	__page_cache_release(page);
 73	dtor = get_compound_page_dtor(page);
 74	(*dtor)(page);
 75}
 76
 77static void put_compound_page(struct page *page)
 78{
 79	if (unlikely(PageTail(page))) {
 80		/* __split_huge_page_refcount can run under us */
 81		struct page *page_head = page->first_page;
 82		smp_rmb();
 83		/*
 84		 * If PageTail is still set after smp_rmb() we can be sure
 85		 * that the page->first_page we read wasn't a dangling pointer.
 86		 * See __split_huge_page_refcount() smp_wmb().
 87		 */
 88		if (likely(PageTail(page) && get_page_unless_zero(page_head))) {
 89			unsigned long flags;
 
 90			/*
 91			 * Verify that our page_head wasn't converted
 92			 * to a a regular page before we got a
 93			 * reference on it.
 
 
 
 94			 */
 95			if (unlikely(!PageHead(page_head))) {
 96				/* PageHead is cleared after PageTail */
 97				smp_rmb();
 98				VM_BUG_ON(PageTail(page));
 99				goto out_put_head;
 
 
 
 
100			}
101			/*
102			 * Only run compound_lock on a valid PageHead,
103			 * after having it pinned with
104			 * get_page_unless_zero() above.
 
105			 */
106			smp_mb();
107			/* page_head wasn't a dangling pointer */
108			flags = compound_lock_irqsave(page_head);
109			if (unlikely(!PageTail(page))) {
110				/* __split_huge_page_refcount run before us */
111				compound_unlock_irqrestore(page_head, flags);
112				VM_BUG_ON(PageHead(page_head));
113			out_put_head:
114				if (put_page_testzero(page_head))
115					__put_single_page(page_head);
116			out_put_single:
117				if (put_page_testzero(page))
118					__put_single_page(page);
119				return;
120			}
121			VM_BUG_ON(page_head != page->first_page);
122			/*
123			 * We can release the refcount taken by
124			 * get_page_unless_zero now that
125			 * split_huge_page_refcount is blocked on the
126			 * compound_lock.
127			 */
128			if (put_page_testzero(page_head))
129				VM_BUG_ON(1);
130			/* __split_huge_page_refcount will wait now */
131			VM_BUG_ON(atomic_read(&page->_count) <= 0);
132			atomic_dec(&page->_count);
133			VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
 
134			compound_unlock_irqrestore(page_head, flags);
 
 
135			if (put_page_testzero(page_head)) {
136				if (PageHead(page_head))
137					__put_compound_page(page_head);
138				else
139					__put_single_page(page_head);
140			}
141		} else {
142			/* page_head is a dangling pointer */
143			VM_BUG_ON(PageTail(page));
144			goto out_put_single;
145		}
146	} else if (put_page_testzero(page)) {
147		if (PageHead(page))
148			__put_compound_page(page);
149		else
150			__put_single_page(page);
151	}
152}
153
154void put_page(struct page *page)
155{
156	if (unlikely(PageCompound(page)))
157		put_compound_page(page);
158	else if (put_page_testzero(page))
159		__put_single_page(page);
160}
161EXPORT_SYMBOL(put_page);
162
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163/**
164 * put_pages_list() - release a list of pages
165 * @pages: list of pages threaded on page->lru
166 *
167 * Release a list of pages which are strung together on page.lru.  Currently
168 * used by read_cache_pages() and related error recovery code.
169 */
170void put_pages_list(struct list_head *pages)
171{
172	while (!list_empty(pages)) {
173		struct page *victim;
174
175		victim = list_entry(pages->prev, struct page, lru);
176		list_del(&victim->lru);
177		page_cache_release(victim);
178	}
179}
180EXPORT_SYMBOL(put_pages_list);
181
182static void pagevec_lru_move_fn(struct pagevec *pvec,
183				void (*move_fn)(struct page *page, void *arg),
184				void *arg)
185{
186	int i;
187	struct zone *zone = NULL;
 
188	unsigned long flags = 0;
189
190	for (i = 0; i < pagevec_count(pvec); i++) {
191		struct page *page = pvec->pages[i];
192		struct zone *pagezone = page_zone(page);
193
194		if (pagezone != zone) {
195			if (zone)
196				spin_unlock_irqrestore(&zone->lru_lock, flags);
197			zone = pagezone;
198			spin_lock_irqsave(&zone->lru_lock, flags);
199		}
200
201		(*move_fn)(page, arg);
 
202	}
203	if (zone)
204		spin_unlock_irqrestore(&zone->lru_lock, flags);
205	release_pages(pvec->pages, pvec->nr, pvec->cold);
206	pagevec_reinit(pvec);
207}
208
209static void pagevec_move_tail_fn(struct page *page, void *arg)
 
210{
211	int *pgmoved = arg;
212	struct zone *zone = page_zone(page);
213
214	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
215		enum lru_list lru = page_lru_base_type(page);
216		list_move_tail(&page->lru, &zone->lru[lru].list);
217		mem_cgroup_rotate_reclaimable_page(page);
218		(*pgmoved)++;
219	}
220}
221
222/*
223 * pagevec_move_tail() must be called with IRQ disabled.
224 * Otherwise this may cause nasty races.
225 */
226static void pagevec_move_tail(struct pagevec *pvec)
227{
228	int pgmoved = 0;
229
230	pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
231	__count_vm_events(PGROTATED, pgmoved);
232}
233
234/*
235 * Writeback is about to end against a page which has been marked for immediate
236 * reclaim.  If it still appears to be reclaimable, move it to the tail of the
237 * inactive list.
238 */
239void rotate_reclaimable_page(struct page *page)
240{
241	if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
242	    !PageUnevictable(page) && PageLRU(page)) {
243		struct pagevec *pvec;
244		unsigned long flags;
245
246		page_cache_get(page);
247		local_irq_save(flags);
248		pvec = &__get_cpu_var(lru_rotate_pvecs);
249		if (!pagevec_add(pvec, page))
250			pagevec_move_tail(pvec);
251		local_irq_restore(flags);
252	}
253}
254
255static void update_page_reclaim_stat(struct zone *zone, struct page *page,
256				     int file, int rotated)
257{
258	struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat;
259	struct zone_reclaim_stat *memcg_reclaim_stat;
260
261	memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
262
263	reclaim_stat->recent_scanned[file]++;
264	if (rotated)
265		reclaim_stat->recent_rotated[file]++;
266
267	if (!memcg_reclaim_stat)
268		return;
269
270	memcg_reclaim_stat->recent_scanned[file]++;
271	if (rotated)
272		memcg_reclaim_stat->recent_rotated[file]++;
273}
274
275static void __activate_page(struct page *page, void *arg)
 
276{
277	struct zone *zone = page_zone(page);
278
279	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
280		int file = page_is_file_cache(page);
281		int lru = page_lru_base_type(page);
282		del_page_from_lru_list(zone, page, lru);
283
 
284		SetPageActive(page);
285		lru += LRU_ACTIVE;
286		add_page_to_lru_list(zone, page, lru);
287		__count_vm_event(PGACTIVATE);
288
289		update_page_reclaim_stat(zone, page, file, 1);
 
290	}
291}
292
293#ifdef CONFIG_SMP
294static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
295
296static void activate_page_drain(int cpu)
297{
298	struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
299
300	if (pagevec_count(pvec))
301		pagevec_lru_move_fn(pvec, __activate_page, NULL);
302}
303
304void activate_page(struct page *page)
305{
306	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
307		struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
308
309		page_cache_get(page);
310		if (!pagevec_add(pvec, page))
311			pagevec_lru_move_fn(pvec, __activate_page, NULL);
312		put_cpu_var(activate_page_pvecs);
313	}
314}
315
316#else
317static inline void activate_page_drain(int cpu)
318{
319}
320
321void activate_page(struct page *page)
322{
323	struct zone *zone = page_zone(page);
324
325	spin_lock_irq(&zone->lru_lock);
326	__activate_page(page, NULL);
327	spin_unlock_irq(&zone->lru_lock);
328}
329#endif
330
331/*
332 * Mark a page as having seen activity.
333 *
334 * inactive,unreferenced	->	inactive,referenced
335 * inactive,referenced		->	active,unreferenced
336 * active,unreferenced		->	active,referenced
337 */
338void mark_page_accessed(struct page *page)
339{
340	if (!PageActive(page) && !PageUnevictable(page) &&
341			PageReferenced(page) && PageLRU(page)) {
342		activate_page(page);
343		ClearPageReferenced(page);
344	} else if (!PageReferenced(page)) {
345		SetPageReferenced(page);
346	}
347}
348
349EXPORT_SYMBOL(mark_page_accessed);
350
351void __lru_cache_add(struct page *page, enum lru_list lru)
352{
353	struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
354
355	page_cache_get(page);
356	if (!pagevec_add(pvec, page))
357		____pagevec_lru_add(pvec, lru);
358	put_cpu_var(lru_add_pvecs);
359}
360EXPORT_SYMBOL(__lru_cache_add);
361
362/**
363 * lru_cache_add_lru - add a page to a page list
364 * @page: the page to be added to the LRU.
365 * @lru: the LRU list to which the page is added.
366 */
367void lru_cache_add_lru(struct page *page, enum lru_list lru)
368{
369	if (PageActive(page)) {
370		VM_BUG_ON(PageUnevictable(page));
371		ClearPageActive(page);
372	} else if (PageUnevictable(page)) {
373		VM_BUG_ON(PageActive(page));
374		ClearPageUnevictable(page);
375	}
376
377	VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
378	__lru_cache_add(page, lru);
379}
380
381/**
382 * add_page_to_unevictable_list - add a page to the unevictable list
383 * @page:  the page to be added to the unevictable list
384 *
385 * Add page directly to its zone's unevictable list.  To avoid races with
386 * tasks that might be making the page evictable, through eg. munlock,
387 * munmap or exit, while it's not on the lru, we want to add the page
388 * while it's locked or otherwise "invisible" to other tasks.  This is
389 * difficult to do when using the pagevec cache, so bypass that.
390 */
391void add_page_to_unevictable_list(struct page *page)
392{
393	struct zone *zone = page_zone(page);
 
394
395	spin_lock_irq(&zone->lru_lock);
 
396	SetPageUnevictable(page);
397	SetPageLRU(page);
398	add_page_to_lru_list(zone, page, LRU_UNEVICTABLE);
399	spin_unlock_irq(&zone->lru_lock);
400}
401
402/*
403 * If the page can not be invalidated, it is moved to the
404 * inactive list to speed up its reclaim.  It is moved to the
405 * head of the list, rather than the tail, to give the flusher
406 * threads some time to write it out, as this is much more
407 * effective than the single-page writeout from reclaim.
408 *
409 * If the page isn't page_mapped and dirty/writeback, the page
410 * could reclaim asap using PG_reclaim.
411 *
412 * 1. active, mapped page -> none
413 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
414 * 3. inactive, mapped page -> none
415 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
416 * 5. inactive, clean -> inactive, tail
417 * 6. Others -> none
418 *
419 * In 4, why it moves inactive's head, the VM expects the page would
420 * be write it out by flusher threads as this is much more effective
421 * than the single-page writeout from reclaim.
422 */
423static void lru_deactivate_fn(struct page *page, void *arg)
 
424{
425	int lru, file;
426	bool active;
427	struct zone *zone = page_zone(page);
428
429	if (!PageLRU(page))
430		return;
431
432	if (PageUnevictable(page))
433		return;
434
435	/* Some processes are using the page */
436	if (page_mapped(page))
437		return;
438
439	active = PageActive(page);
440
441	file = page_is_file_cache(page);
442	lru = page_lru_base_type(page);
443	del_page_from_lru_list(zone, page, lru + active);
 
444	ClearPageActive(page);
445	ClearPageReferenced(page);
446	add_page_to_lru_list(zone, page, lru);
447
448	if (PageWriteback(page) || PageDirty(page)) {
449		/*
450		 * PG_reclaim could be raced with end_page_writeback
451		 * It can make readahead confusing.  But race window
452		 * is _really_ small and  it's non-critical problem.
453		 */
454		SetPageReclaim(page);
455	} else {
456		/*
457		 * The page's writeback ends up during pagevec
458		 * We moves tha page into tail of inactive.
459		 */
460		list_move_tail(&page->lru, &zone->lru[lru].list);
461		mem_cgroup_rotate_reclaimable_page(page);
462		__count_vm_event(PGROTATED);
463	}
464
465	if (active)
466		__count_vm_event(PGDEACTIVATE);
467	update_page_reclaim_stat(zone, page, file, 0);
468}
469
470/*
471 * Drain pages out of the cpu's pagevecs.
472 * Either "cpu" is the current CPU, and preemption has already been
473 * disabled; or "cpu" is being hot-unplugged, and is already dead.
474 */
475static void drain_cpu_pagevecs(int cpu)
476{
477	struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu);
478	struct pagevec *pvec;
479	int lru;
480
481	for_each_lru(lru) {
482		pvec = &pvecs[lru - LRU_BASE];
483		if (pagevec_count(pvec))
484			____pagevec_lru_add(pvec, lru);
485	}
486
487	pvec = &per_cpu(lru_rotate_pvecs, cpu);
488	if (pagevec_count(pvec)) {
489		unsigned long flags;
490
491		/* No harm done if a racing interrupt already did this */
492		local_irq_save(flags);
493		pagevec_move_tail(pvec);
494		local_irq_restore(flags);
495	}
496
497	pvec = &per_cpu(lru_deactivate_pvecs, cpu);
498	if (pagevec_count(pvec))
499		pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
500
501	activate_page_drain(cpu);
502}
503
504/**
505 * deactivate_page - forcefully deactivate a page
506 * @page: page to deactivate
507 *
508 * This function hints the VM that @page is a good reclaim candidate,
509 * for example if its invalidation fails due to the page being dirty
510 * or under writeback.
511 */
512void deactivate_page(struct page *page)
513{
514	/*
515	 * In a workload with many unevictable page such as mprotect, unevictable
516	 * page deactivation for accelerating reclaim is pointless.
517	 */
518	if (PageUnevictable(page))
519		return;
520
521	if (likely(get_page_unless_zero(page))) {
522		struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
523
524		if (!pagevec_add(pvec, page))
525			pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
526		put_cpu_var(lru_deactivate_pvecs);
527	}
528}
529
530void lru_add_drain(void)
531{
532	drain_cpu_pagevecs(get_cpu());
533	put_cpu();
534}
535
536static void lru_add_drain_per_cpu(struct work_struct *dummy)
537{
538	lru_add_drain();
539}
540
541/*
542 * Returns 0 for success
543 */
544int lru_add_drain_all(void)
545{
546	return schedule_on_each_cpu(lru_add_drain_per_cpu);
547}
548
549/*
550 * Batched page_cache_release().  Decrement the reference count on all the
551 * passed pages.  If it fell to zero then remove the page from the LRU and
552 * free it.
553 *
554 * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
555 * for the remainder of the operation.
556 *
557 * The locking in this function is against shrink_inactive_list(): we recheck
558 * the page count inside the lock to see whether shrink_inactive_list()
559 * grabbed the page via the LRU.  If it did, give up: shrink_inactive_list()
560 * will free it.
561 */
562void release_pages(struct page **pages, int nr, int cold)
563{
564	int i;
565	struct pagevec pages_to_free;
566	struct zone *zone = NULL;
 
567	unsigned long uninitialized_var(flags);
568
569	pagevec_init(&pages_to_free, cold);
570	for (i = 0; i < nr; i++) {
571		struct page *page = pages[i];
572
573		if (unlikely(PageCompound(page))) {
574			if (zone) {
575				spin_unlock_irqrestore(&zone->lru_lock, flags);
576				zone = NULL;
577			}
578			put_compound_page(page);
579			continue;
580		}
581
582		if (!put_page_testzero(page))
583			continue;
584
585		if (PageLRU(page)) {
586			struct zone *pagezone = page_zone(page);
587
588			if (pagezone != zone) {
589				if (zone)
590					spin_unlock_irqrestore(&zone->lru_lock,
591									flags);
592				zone = pagezone;
593				spin_lock_irqsave(&zone->lru_lock, flags);
594			}
 
 
595			VM_BUG_ON(!PageLRU(page));
596			__ClearPageLRU(page);
597			del_page_from_lru(zone, page);
598		}
599
600		if (!pagevec_add(&pages_to_free, page)) {
601			if (zone) {
602				spin_unlock_irqrestore(&zone->lru_lock, flags);
603				zone = NULL;
604			}
605			__pagevec_free(&pages_to_free);
606			pagevec_reinit(&pages_to_free);
607  		}
608	}
609	if (zone)
610		spin_unlock_irqrestore(&zone->lru_lock, flags);
611
612	pagevec_free(&pages_to_free);
613}
614EXPORT_SYMBOL(release_pages);
615
616/*
617 * The pages which we're about to release may be in the deferred lru-addition
618 * queues.  That would prevent them from really being freed right now.  That's
619 * OK from a correctness point of view but is inefficient - those pages may be
620 * cache-warm and we want to give them back to the page allocator ASAP.
621 *
622 * So __pagevec_release() will drain those queues here.  __pagevec_lru_add()
623 * and __pagevec_lru_add_active() call release_pages() directly to avoid
624 * mutual recursion.
625 */
626void __pagevec_release(struct pagevec *pvec)
627{
628	lru_add_drain();
629	release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
630	pagevec_reinit(pvec);
631}
632
633EXPORT_SYMBOL(__pagevec_release);
634
 
635/* used by __split_huge_page_refcount() */
636void lru_add_page_tail(struct zone* zone,
637		       struct page *page, struct page *page_tail)
638{
639	int active;
640	enum lru_list lru;
641	const int file = 0;
642	struct list_head *head;
643
644	VM_BUG_ON(!PageHead(page));
645	VM_BUG_ON(PageCompound(page_tail));
646	VM_BUG_ON(PageLRU(page_tail));
647	VM_BUG_ON(!spin_is_locked(&zone->lru_lock));
 
648
649	SetPageLRU(page_tail);
650
651	if (page_evictable(page_tail, NULL)) {
652		if (PageActive(page)) {
653			SetPageActive(page_tail);
654			active = 1;
655			lru = LRU_ACTIVE_ANON;
656		} else {
657			active = 0;
658			lru = LRU_INACTIVE_ANON;
659		}
660		update_page_reclaim_stat(zone, page_tail, file, active);
661		if (likely(PageLRU(page)))
662			head = page->lru.prev;
663		else
664			head = &zone->lru[lru].list;
665		__add_page_to_lru_list(zone, page_tail, lru, head);
666	} else {
667		SetPageUnevictable(page_tail);
668		add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
669	}
 
 
 
670}
 
671
672static void ____pagevec_lru_add_fn(struct page *page, void *arg)
 
673{
674	enum lru_list lru = (enum lru_list)arg;
675	struct zone *zone = page_zone(page);
676	int file = is_file_lru(lru);
677	int active = is_active_lru(lru);
678
679	VM_BUG_ON(PageActive(page));
680	VM_BUG_ON(PageUnevictable(page));
681	VM_BUG_ON(PageLRU(page));
682
683	SetPageLRU(page);
684	if (active)
685		SetPageActive(page);
686	update_page_reclaim_stat(zone, page, file, active);
687	add_page_to_lru_list(zone, page, lru);
688}
689
690/*
691 * Add the passed pages to the LRU, then drop the caller's refcount
692 * on them.  Reinitialises the caller's pagevec.
693 */
694void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
695{
696	VM_BUG_ON(is_unevictable_lru(lru));
697
698	pagevec_lru_move_fn(pvec, ____pagevec_lru_add_fn, (void *)lru);
699}
700
701EXPORT_SYMBOL(____pagevec_lru_add);
702
703/*
704 * Try to drop buffers from the pages in a pagevec
705 */
706void pagevec_strip(struct pagevec *pvec)
707{
708	int i;
709
710	for (i = 0; i < pagevec_count(pvec); i++) {
711		struct page *page = pvec->pages[i];
712
713		if (page_has_private(page) && trylock_page(page)) {
714			if (page_has_private(page))
715				try_to_release_page(page, 0);
716			unlock_page(page);
717		}
718	}
719}
 
720
721/**
722 * pagevec_lookup - gang pagecache lookup
723 * @pvec:	Where the resulting pages are placed
724 * @mapping:	The address_space to search
725 * @start:	The starting page index
726 * @nr_pages:	The maximum number of pages
727 *
728 * pagevec_lookup() will search for and return a group of up to @nr_pages pages
729 * in the mapping.  The pages are placed in @pvec.  pagevec_lookup() takes a
730 * reference against the pages in @pvec.
731 *
732 * The search returns a group of mapping-contiguous pages with ascending
733 * indexes.  There may be holes in the indices due to not-present pages.
734 *
735 * pagevec_lookup() returns the number of pages which were found.
736 */
737unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
738		pgoff_t start, unsigned nr_pages)
739{
740	pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
741	return pagevec_count(pvec);
742}
743
744EXPORT_SYMBOL(pagevec_lookup);
745
746unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
747		pgoff_t *index, int tag, unsigned nr_pages)
748{
749	pvec->nr = find_get_pages_tag(mapping, index, tag,
750					nr_pages, pvec->pages);
751	return pagevec_count(pvec);
752}
753
754EXPORT_SYMBOL(pagevec_lookup_tag);
755
756/*
757 * Perform any setup for the swap system
758 */
759void __init swap_setup(void)
760{
761	unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
762
763#ifdef CONFIG_SWAP
764	bdi_init(swapper_space.backing_dev_info);
765#endif
766
767	/* Use a smaller cluster for small-memory machines */
768	if (megs < 16)
769		page_cluster = 2;
770	else
771		page_cluster = 3;
772	/*
773	 * Right now other parts of the system means that we
774	 * _really_ don't want to cluster much more
775	 */
776}
v3.5.6
  1/*
  2 *  linux/mm/swap.c
  3 *
  4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  5 */
  6
  7/*
  8 * This file contains the default values for the operation of the
  9 * Linux VM subsystem. Fine-tuning documentation can be found in
 10 * Documentation/sysctl/vm.txt.
 11 * Started 18.12.91
 12 * Swap aging added 23.2.95, Stephen Tweedie.
 13 * Buffermem limits added 12.3.98, Rik van Riel.
 14 */
 15
 16#include <linux/mm.h>
 17#include <linux/sched.h>
 18#include <linux/kernel_stat.h>
 19#include <linux/swap.h>
 20#include <linux/mman.h>
 21#include <linux/pagemap.h>
 22#include <linux/pagevec.h>
 23#include <linux/init.h>
 24#include <linux/export.h>
 25#include <linux/mm_inline.h>
 
 26#include <linux/percpu_counter.h>
 27#include <linux/percpu.h>
 28#include <linux/cpu.h>
 29#include <linux/notifier.h>
 30#include <linux/backing-dev.h>
 31#include <linux/memcontrol.h>
 32#include <linux/gfp.h>
 33
 34#include "internal.h"
 35
 36/* How many pages do we try to swap or page in/out together? */
 37int page_cluster;
 38
 39static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
 40static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
 41static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
 42
 43/*
 44 * This path almost never happens for VM activity - pages are normally
 45 * freed via pagevecs.  But it gets used by networking.
 46 */
 47static void __page_cache_release(struct page *page)
 48{
 49	if (PageLRU(page)) {
 
 50		struct zone *zone = page_zone(page);
 51		struct lruvec *lruvec;
 52		unsigned long flags;
 53
 54		spin_lock_irqsave(&zone->lru_lock, flags);
 55		lruvec = mem_cgroup_page_lruvec(page, zone);
 56		VM_BUG_ON(!PageLRU(page));
 57		__ClearPageLRU(page);
 58		del_page_from_lru_list(page, lruvec, page_off_lru(page));
 59		spin_unlock_irqrestore(&zone->lru_lock, flags);
 60	}
 61}
 62
 63static void __put_single_page(struct page *page)
 64{
 65	__page_cache_release(page);
 66	free_hot_cold_page(page, 0);
 67}
 68
 69static void __put_compound_page(struct page *page)
 70{
 71	compound_page_dtor *dtor;
 72
 73	__page_cache_release(page);
 74	dtor = get_compound_page_dtor(page);
 75	(*dtor)(page);
 76}
 77
 78static void put_compound_page(struct page *page)
 79{
 80	if (unlikely(PageTail(page))) {
 81		/* __split_huge_page_refcount can run under us */
 82		struct page *page_head = compound_trans_head(page);
 83
 84		if (likely(page != page_head &&
 85			   get_page_unless_zero(page_head))) {
 
 
 
 
 86			unsigned long flags;
 87
 88			/*
 89			 * THP can not break up slab pages so avoid taking
 90			 * compound_lock().  Slab performs non-atomic bit ops
 91			 * on page->flags for better performance.  In particular
 92			 * slab_unlock() in slub used to be a hot path.  It is
 93			 * still hot on arches that do not support
 94			 * this_cpu_cmpxchg_double().
 95			 */
 96			if (PageSlab(page_head)) {
 97				if (PageTail(page)) {
 98					if (put_page_testzero(page_head))
 99						VM_BUG_ON(1);
100
101					atomic_dec(&page->_mapcount);
102					goto skip_lock_tail;
103				} else
104					goto skip_lock;
105			}
106			/*
107			 * page_head wasn't a dangling pointer but it
108			 * may not be a head page anymore by the time
109			 * we obtain the lock. That is ok as long as it
110			 * can't be freed from under us.
111			 */
 
 
112			flags = compound_lock_irqsave(page_head);
113			if (unlikely(!PageTail(page))) {
114				/* __split_huge_page_refcount run before us */
115				compound_unlock_irqrestore(page_head, flags);
116skip_lock:
 
117				if (put_page_testzero(page_head))
118					__put_single_page(page_head);
119out_put_single:
120				if (put_page_testzero(page))
121					__put_single_page(page);
122				return;
123			}
124			VM_BUG_ON(page_head != page->first_page);
125			/*
126			 * We can release the refcount taken by
127			 * get_page_unless_zero() now that
128			 * __split_huge_page_refcount() is blocked on
129			 * the compound_lock.
130			 */
131			if (put_page_testzero(page_head))
132				VM_BUG_ON(1);
133			/* __split_huge_page_refcount will wait now */
134			VM_BUG_ON(page_mapcount(page) <= 0);
135			atomic_dec(&page->_mapcount);
136			VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
137			VM_BUG_ON(atomic_read(&page->_count) != 0);
138			compound_unlock_irqrestore(page_head, flags);
139
140skip_lock_tail:
141			if (put_page_testzero(page_head)) {
142				if (PageHead(page_head))
143					__put_compound_page(page_head);
144				else
145					__put_single_page(page_head);
146			}
147		} else {
148			/* page_head is a dangling pointer */
149			VM_BUG_ON(PageTail(page));
150			goto out_put_single;
151		}
152	} else if (put_page_testzero(page)) {
153		if (PageHead(page))
154			__put_compound_page(page);
155		else
156			__put_single_page(page);
157	}
158}
159
160void put_page(struct page *page)
161{
162	if (unlikely(PageCompound(page)))
163		put_compound_page(page);
164	else if (put_page_testzero(page))
165		__put_single_page(page);
166}
167EXPORT_SYMBOL(put_page);
168
169/*
170 * This function is exported but must not be called by anything other
171 * than get_page(). It implements the slow path of get_page().
172 */
173bool __get_page_tail(struct page *page)
174{
175	/*
176	 * This takes care of get_page() if run on a tail page
177	 * returned by one of the get_user_pages/follow_page variants.
178	 * get_user_pages/follow_page itself doesn't need the compound
179	 * lock because it runs __get_page_tail_foll() under the
180	 * proper PT lock that already serializes against
181	 * split_huge_page().
182	 */
183	unsigned long flags;
184	bool got = false;
185	struct page *page_head = compound_trans_head(page);
186
187	if (likely(page != page_head && get_page_unless_zero(page_head))) {
188
189		/* Ref to put_compound_page() comment. */
190		if (PageSlab(page_head)) {
191			if (likely(PageTail(page))) {
192				__get_page_tail_foll(page, false);
193				return true;
194			} else {
195				put_page(page_head);
196				return false;
197			}
198		}
199
200		/*
201		 * page_head wasn't a dangling pointer but it
202		 * may not be a head page anymore by the time
203		 * we obtain the lock. That is ok as long as it
204		 * can't be freed from under us.
205		 */
206		flags = compound_lock_irqsave(page_head);
207		/* here __split_huge_page_refcount won't run anymore */
208		if (likely(PageTail(page))) {
209			__get_page_tail_foll(page, false);
210			got = true;
211		}
212		compound_unlock_irqrestore(page_head, flags);
213		if (unlikely(!got))
214			put_page(page_head);
215	}
216	return got;
217}
218EXPORT_SYMBOL(__get_page_tail);
219
220/**
221 * put_pages_list() - release a list of pages
222 * @pages: list of pages threaded on page->lru
223 *
224 * Release a list of pages which are strung together on page.lru.  Currently
225 * used by read_cache_pages() and related error recovery code.
226 */
227void put_pages_list(struct list_head *pages)
228{
229	while (!list_empty(pages)) {
230		struct page *victim;
231
232		victim = list_entry(pages->prev, struct page, lru);
233		list_del(&victim->lru);
234		page_cache_release(victim);
235	}
236}
237EXPORT_SYMBOL(put_pages_list);
238
239static void pagevec_lru_move_fn(struct pagevec *pvec,
240	void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
241	void *arg)
242{
243	int i;
244	struct zone *zone = NULL;
245	struct lruvec *lruvec;
246	unsigned long flags = 0;
247
248	for (i = 0; i < pagevec_count(pvec); i++) {
249		struct page *page = pvec->pages[i];
250		struct zone *pagezone = page_zone(page);
251
252		if (pagezone != zone) {
253			if (zone)
254				spin_unlock_irqrestore(&zone->lru_lock, flags);
255			zone = pagezone;
256			spin_lock_irqsave(&zone->lru_lock, flags);
257		}
258
259		lruvec = mem_cgroup_page_lruvec(page, zone);
260		(*move_fn)(page, lruvec, arg);
261	}
262	if (zone)
263		spin_unlock_irqrestore(&zone->lru_lock, flags);
264	release_pages(pvec->pages, pvec->nr, pvec->cold);
265	pagevec_reinit(pvec);
266}
267
268static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
269				 void *arg)
270{
271	int *pgmoved = arg;
 
272
273	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
274		enum lru_list lru = page_lru_base_type(page);
275		list_move_tail(&page->lru, &lruvec->lists[lru]);
 
276		(*pgmoved)++;
277	}
278}
279
280/*
281 * pagevec_move_tail() must be called with IRQ disabled.
282 * Otherwise this may cause nasty races.
283 */
284static void pagevec_move_tail(struct pagevec *pvec)
285{
286	int pgmoved = 0;
287
288	pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
289	__count_vm_events(PGROTATED, pgmoved);
290}
291
292/*
293 * Writeback is about to end against a page which has been marked for immediate
294 * reclaim.  If it still appears to be reclaimable, move it to the tail of the
295 * inactive list.
296 */
297void rotate_reclaimable_page(struct page *page)
298{
299	if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
300	    !PageUnevictable(page) && PageLRU(page)) {
301		struct pagevec *pvec;
302		unsigned long flags;
303
304		page_cache_get(page);
305		local_irq_save(flags);
306		pvec = &__get_cpu_var(lru_rotate_pvecs);
307		if (!pagevec_add(pvec, page))
308			pagevec_move_tail(pvec);
309		local_irq_restore(flags);
310	}
311}
312
313static void update_page_reclaim_stat(struct lruvec *lruvec,
314				     int file, int rotated)
315{
316	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
 
 
 
317
318	reclaim_stat->recent_scanned[file]++;
319	if (rotated)
320		reclaim_stat->recent_rotated[file]++;
 
 
 
 
 
 
 
321}
322
323static void __activate_page(struct page *page, struct lruvec *lruvec,
324			    void *arg)
325{
 
 
326	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
327		int file = page_is_file_cache(page);
328		int lru = page_lru_base_type(page);
 
329
330		del_page_from_lru_list(page, lruvec, lru);
331		SetPageActive(page);
332		lru += LRU_ACTIVE;
333		add_page_to_lru_list(page, lruvec, lru);
 
334
335		__count_vm_event(PGACTIVATE);
336		update_page_reclaim_stat(lruvec, file, 1);
337	}
338}
339
340#ifdef CONFIG_SMP
341static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
342
343static void activate_page_drain(int cpu)
344{
345	struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
346
347	if (pagevec_count(pvec))
348		pagevec_lru_move_fn(pvec, __activate_page, NULL);
349}
350
351void activate_page(struct page *page)
352{
353	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
354		struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
355
356		page_cache_get(page);
357		if (!pagevec_add(pvec, page))
358			pagevec_lru_move_fn(pvec, __activate_page, NULL);
359		put_cpu_var(activate_page_pvecs);
360	}
361}
362
363#else
364static inline void activate_page_drain(int cpu)
365{
366}
367
368void activate_page(struct page *page)
369{
370	struct zone *zone = page_zone(page);
371
372	spin_lock_irq(&zone->lru_lock);
373	__activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL);
374	spin_unlock_irq(&zone->lru_lock);
375}
376#endif
377
378/*
379 * Mark a page as having seen activity.
380 *
381 * inactive,unreferenced	->	inactive,referenced
382 * inactive,referenced		->	active,unreferenced
383 * active,unreferenced		->	active,referenced
384 */
385void mark_page_accessed(struct page *page)
386{
387	if (!PageActive(page) && !PageUnevictable(page) &&
388			PageReferenced(page) && PageLRU(page)) {
389		activate_page(page);
390		ClearPageReferenced(page);
391	} else if (!PageReferenced(page)) {
392		SetPageReferenced(page);
393	}
394}
 
395EXPORT_SYMBOL(mark_page_accessed);
396
397void __lru_cache_add(struct page *page, enum lru_list lru)
398{
399	struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
400
401	page_cache_get(page);
402	if (!pagevec_add(pvec, page))
403		__pagevec_lru_add(pvec, lru);
404	put_cpu_var(lru_add_pvecs);
405}
406EXPORT_SYMBOL(__lru_cache_add);
407
408/**
409 * lru_cache_add_lru - add a page to a page list
410 * @page: the page to be added to the LRU.
411 * @lru: the LRU list to which the page is added.
412 */
413void lru_cache_add_lru(struct page *page, enum lru_list lru)
414{
415	if (PageActive(page)) {
416		VM_BUG_ON(PageUnevictable(page));
417		ClearPageActive(page);
418	} else if (PageUnevictable(page)) {
419		VM_BUG_ON(PageActive(page));
420		ClearPageUnevictable(page);
421	}
422
423	VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
424	__lru_cache_add(page, lru);
425}
426
427/**
428 * add_page_to_unevictable_list - add a page to the unevictable list
429 * @page:  the page to be added to the unevictable list
430 *
431 * Add page directly to its zone's unevictable list.  To avoid races with
432 * tasks that might be making the page evictable, through eg. munlock,
433 * munmap or exit, while it's not on the lru, we want to add the page
434 * while it's locked or otherwise "invisible" to other tasks.  This is
435 * difficult to do when using the pagevec cache, so bypass that.
436 */
437void add_page_to_unevictable_list(struct page *page)
438{
439	struct zone *zone = page_zone(page);
440	struct lruvec *lruvec;
441
442	spin_lock_irq(&zone->lru_lock);
443	lruvec = mem_cgroup_page_lruvec(page, zone);
444	SetPageUnevictable(page);
445	SetPageLRU(page);
446	add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
447	spin_unlock_irq(&zone->lru_lock);
448}
449
450/*
451 * If the page can not be invalidated, it is moved to the
452 * inactive list to speed up its reclaim.  It is moved to the
453 * head of the list, rather than the tail, to give the flusher
454 * threads some time to write it out, as this is much more
455 * effective than the single-page writeout from reclaim.
456 *
457 * If the page isn't page_mapped and dirty/writeback, the page
458 * could reclaim asap using PG_reclaim.
459 *
460 * 1. active, mapped page -> none
461 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
462 * 3. inactive, mapped page -> none
463 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
464 * 5. inactive, clean -> inactive, tail
465 * 6. Others -> none
466 *
467 * In 4, why it moves inactive's head, the VM expects the page would
468 * be write it out by flusher threads as this is much more effective
469 * than the single-page writeout from reclaim.
470 */
471static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
472			      void *arg)
473{
474	int lru, file;
475	bool active;
 
476
477	if (!PageLRU(page))
478		return;
479
480	if (PageUnevictable(page))
481		return;
482
483	/* Some processes are using the page */
484	if (page_mapped(page))
485		return;
486
487	active = PageActive(page);
 
488	file = page_is_file_cache(page);
489	lru = page_lru_base_type(page);
490
491	del_page_from_lru_list(page, lruvec, lru + active);
492	ClearPageActive(page);
493	ClearPageReferenced(page);
494	add_page_to_lru_list(page, lruvec, lru);
495
496	if (PageWriteback(page) || PageDirty(page)) {
497		/*
498		 * PG_reclaim could be raced with end_page_writeback
499		 * It can make readahead confusing.  But race window
500		 * is _really_ small and  it's non-critical problem.
501		 */
502		SetPageReclaim(page);
503	} else {
504		/*
505		 * The page's writeback ends up during pagevec
506		 * We moves tha page into tail of inactive.
507		 */
508		list_move_tail(&page->lru, &lruvec->lists[lru]);
 
509		__count_vm_event(PGROTATED);
510	}
511
512	if (active)
513		__count_vm_event(PGDEACTIVATE);
514	update_page_reclaim_stat(lruvec, file, 0);
515}
516
517/*
518 * Drain pages out of the cpu's pagevecs.
519 * Either "cpu" is the current CPU, and preemption has already been
520 * disabled; or "cpu" is being hot-unplugged, and is already dead.
521 */
522void lru_add_drain_cpu(int cpu)
523{
524	struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu);
525	struct pagevec *pvec;
526	int lru;
527
528	for_each_lru(lru) {
529		pvec = &pvecs[lru - LRU_BASE];
530		if (pagevec_count(pvec))
531			__pagevec_lru_add(pvec, lru);
532	}
533
534	pvec = &per_cpu(lru_rotate_pvecs, cpu);
535	if (pagevec_count(pvec)) {
536		unsigned long flags;
537
538		/* No harm done if a racing interrupt already did this */
539		local_irq_save(flags);
540		pagevec_move_tail(pvec);
541		local_irq_restore(flags);
542	}
543
544	pvec = &per_cpu(lru_deactivate_pvecs, cpu);
545	if (pagevec_count(pvec))
546		pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
547
548	activate_page_drain(cpu);
549}
550
551/**
552 * deactivate_page - forcefully deactivate a page
553 * @page: page to deactivate
554 *
555 * This function hints the VM that @page is a good reclaim candidate,
556 * for example if its invalidation fails due to the page being dirty
557 * or under writeback.
558 */
559void deactivate_page(struct page *page)
560{
561	/*
562	 * In a workload with many unevictable page such as mprotect, unevictable
563	 * page deactivation for accelerating reclaim is pointless.
564	 */
565	if (PageUnevictable(page))
566		return;
567
568	if (likely(get_page_unless_zero(page))) {
569		struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
570
571		if (!pagevec_add(pvec, page))
572			pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
573		put_cpu_var(lru_deactivate_pvecs);
574	}
575}
576
577void lru_add_drain(void)
578{
579	lru_add_drain_cpu(get_cpu());
580	put_cpu();
581}
582
583static void lru_add_drain_per_cpu(struct work_struct *dummy)
584{
585	lru_add_drain();
586}
587
588/*
589 * Returns 0 for success
590 */
591int lru_add_drain_all(void)
592{
593	return schedule_on_each_cpu(lru_add_drain_per_cpu);
594}
595
596/*
597 * Batched page_cache_release().  Decrement the reference count on all the
598 * passed pages.  If it fell to zero then remove the page from the LRU and
599 * free it.
600 *
601 * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
602 * for the remainder of the operation.
603 *
604 * The locking in this function is against shrink_inactive_list(): we recheck
605 * the page count inside the lock to see whether shrink_inactive_list()
606 * grabbed the page via the LRU.  If it did, give up: shrink_inactive_list()
607 * will free it.
608 */
609void release_pages(struct page **pages, int nr, int cold)
610{
611	int i;
612	LIST_HEAD(pages_to_free);
613	struct zone *zone = NULL;
614	struct lruvec *lruvec;
615	unsigned long uninitialized_var(flags);
616
 
617	for (i = 0; i < nr; i++) {
618		struct page *page = pages[i];
619
620		if (unlikely(PageCompound(page))) {
621			if (zone) {
622				spin_unlock_irqrestore(&zone->lru_lock, flags);
623				zone = NULL;
624			}
625			put_compound_page(page);
626			continue;
627		}
628
629		if (!put_page_testzero(page))
630			continue;
631
632		if (PageLRU(page)) {
633			struct zone *pagezone = page_zone(page);
634
635			if (pagezone != zone) {
636				if (zone)
637					spin_unlock_irqrestore(&zone->lru_lock,
638									flags);
639				zone = pagezone;
640				spin_lock_irqsave(&zone->lru_lock, flags);
641			}
642
643			lruvec = mem_cgroup_page_lruvec(page, zone);
644			VM_BUG_ON(!PageLRU(page));
645			__ClearPageLRU(page);
646			del_page_from_lru_list(page, lruvec, page_off_lru(page));
647		}
648
649		list_add(&page->lru, &pages_to_free);
 
 
 
 
 
 
 
650	}
651	if (zone)
652		spin_unlock_irqrestore(&zone->lru_lock, flags);
653
654	free_hot_cold_page_list(&pages_to_free, cold);
655}
656EXPORT_SYMBOL(release_pages);
657
658/*
659 * The pages which we're about to release may be in the deferred lru-addition
660 * queues.  That would prevent them from really being freed right now.  That's
661 * OK from a correctness point of view but is inefficient - those pages may be
662 * cache-warm and we want to give them back to the page allocator ASAP.
663 *
664 * So __pagevec_release() will drain those queues here.  __pagevec_lru_add()
665 * and __pagevec_lru_add_active() call release_pages() directly to avoid
666 * mutual recursion.
667 */
668void __pagevec_release(struct pagevec *pvec)
669{
670	lru_add_drain();
671	release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
672	pagevec_reinit(pvec);
673}
 
674EXPORT_SYMBOL(__pagevec_release);
675
676#ifdef CONFIG_TRANSPARENT_HUGEPAGE
677/* used by __split_huge_page_refcount() */
678void lru_add_page_tail(struct page *page, struct page *page_tail,
679		       struct lruvec *lruvec)
680{
681	int uninitialized_var(active);
682	enum lru_list lru;
683	const int file = 0;
 
684
685	VM_BUG_ON(!PageHead(page));
686	VM_BUG_ON(PageCompound(page_tail));
687	VM_BUG_ON(PageLRU(page_tail));
688	VM_BUG_ON(NR_CPUS != 1 &&
689		  !spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
690
691	SetPageLRU(page_tail);
692
693	if (page_evictable(page_tail, NULL)) {
694		if (PageActive(page)) {
695			SetPageActive(page_tail);
696			active = 1;
697			lru = LRU_ACTIVE_ANON;
698		} else {
699			active = 0;
700			lru = LRU_INACTIVE_ANON;
701		}
 
 
 
 
 
 
702	} else {
703		SetPageUnevictable(page_tail);
704		lru = LRU_UNEVICTABLE;
705	}
706
707	if (likely(PageLRU(page)))
708		list_add_tail(&page_tail->lru, &page->lru);
709	else {
710		struct list_head *list_head;
711		/*
712		 * Head page has not yet been counted, as an hpage,
713		 * so we must account for each subpage individually.
714		 *
715		 * Use the standard add function to put page_tail on the list,
716		 * but then correct its position so they all end up in order.
717		 */
718		add_page_to_lru_list(page_tail, lruvec, lru);
719		list_head = page_tail->lru.prev;
720		list_move_tail(&page_tail->lru, list_head);
721	}
722
723	if (!PageUnevictable(page))
724		update_page_reclaim_stat(lruvec, file, active);
725}
726#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
727
728static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
729				 void *arg)
730{
731	enum lru_list lru = (enum lru_list)arg;
 
732	int file = is_file_lru(lru);
733	int active = is_active_lru(lru);
734
735	VM_BUG_ON(PageActive(page));
736	VM_BUG_ON(PageUnevictable(page));
737	VM_BUG_ON(PageLRU(page));
738
739	SetPageLRU(page);
740	if (active)
741		SetPageActive(page);
742	add_page_to_lru_list(page, lruvec, lru);
743	update_page_reclaim_stat(lruvec, file, active);
744}
745
746/*
747 * Add the passed pages to the LRU, then drop the caller's refcount
748 * on them.  Reinitialises the caller's pagevec.
749 */
750void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
751{
752	VM_BUG_ON(is_unevictable_lru(lru));
753
754	pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, (void *)lru);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
755}
756EXPORT_SYMBOL(__pagevec_lru_add);
757
758/**
759 * pagevec_lookup - gang pagecache lookup
760 * @pvec:	Where the resulting pages are placed
761 * @mapping:	The address_space to search
762 * @start:	The starting page index
763 * @nr_pages:	The maximum number of pages
764 *
765 * pagevec_lookup() will search for and return a group of up to @nr_pages pages
766 * in the mapping.  The pages are placed in @pvec.  pagevec_lookup() takes a
767 * reference against the pages in @pvec.
768 *
769 * The search returns a group of mapping-contiguous pages with ascending
770 * indexes.  There may be holes in the indices due to not-present pages.
771 *
772 * pagevec_lookup() returns the number of pages which were found.
773 */
774unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
775		pgoff_t start, unsigned nr_pages)
776{
777	pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
778	return pagevec_count(pvec);
779}
 
780EXPORT_SYMBOL(pagevec_lookup);
781
782unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
783		pgoff_t *index, int tag, unsigned nr_pages)
784{
785	pvec->nr = find_get_pages_tag(mapping, index, tag,
786					nr_pages, pvec->pages);
787	return pagevec_count(pvec);
788}
 
789EXPORT_SYMBOL(pagevec_lookup_tag);
790
791/*
792 * Perform any setup for the swap system
793 */
794void __init swap_setup(void)
795{
796	unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
797
798#ifdef CONFIG_SWAP
799	bdi_init(swapper_space.backing_dev_info);
800#endif
801
802	/* Use a smaller cluster for small-memory machines */
803	if (megs < 16)
804		page_cluster = 2;
805	else
806		page_cluster = 3;
807	/*
808	 * Right now other parts of the system means that we
809	 * _really_ don't want to cluster much more
810	 */
811}