Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 *  linux/mm/swap.c
  3 *
  4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  5 */
  6
  7/*
  8 * This file contains the default values for the operation of the
  9 * Linux VM subsystem. Fine-tuning documentation can be found in
 10 * Documentation/sysctl/vm.txt.
 11 * Started 18.12.91
 12 * Swap aging added 23.2.95, Stephen Tweedie.
 13 * Buffermem limits added 12.3.98, Rik van Riel.
 14 */
 15
 16#include <linux/mm.h>
 17#include <linux/sched.h>
 18#include <linux/kernel_stat.h>
 19#include <linux/swap.h>
 20#include <linux/mman.h>
 21#include <linux/pagemap.h>
 22#include <linux/pagevec.h>
 23#include <linux/init.h>
 24#include <linux/export.h>
 25#include <linux/mm_inline.h>
 26#include <linux/percpu_counter.h>
 27#include <linux/memremap.h>
 28#include <linux/percpu.h>
 29#include <linux/cpu.h>
 30#include <linux/notifier.h>
 31#include <linux/backing-dev.h>
 32#include <linux/memcontrol.h>
 33#include <linux/gfp.h>
 34#include <linux/uio.h>
 35#include <linux/hugetlb.h>
 36#include <linux/page_idle.h>
 37
 38#include "internal.h"
 39
 40#define CREATE_TRACE_POINTS
 41#include <trace/events/pagemap.h>
 42
 43/* How many pages do we try to swap or page in/out together? */
 44int page_cluster;
 45
 46static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
 47static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
 48static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
 49static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
 50
 51/*
 52 * This path almost never happens for VM activity - pages are normally
 53 * freed via pagevecs.  But it gets used by networking.
 54 */
 55static void __page_cache_release(struct page *page)
 56{
 57	if (PageLRU(page)) {
 58		struct zone *zone = page_zone(page);
 59		struct lruvec *lruvec;
 60		unsigned long flags;
 61
 62		spin_lock_irqsave(&zone->lru_lock, flags);
 63		lruvec = mem_cgroup_page_lruvec(page, zone);
 64		VM_BUG_ON_PAGE(!PageLRU(page), page);
 65		__ClearPageLRU(page);
 66		del_page_from_lru_list(page, lruvec, page_off_lru(page));
 67		spin_unlock_irqrestore(&zone->lru_lock, flags);
 68	}
 69	mem_cgroup_uncharge(page);
 70}
 71
 72static void __put_single_page(struct page *page)
 73{
 74	__page_cache_release(page);
 75	free_hot_cold_page(page, false);
 76}
 77
 78static void __put_compound_page(struct page *page)
 79{
 80	compound_page_dtor *dtor;
 81
 82	/*
 83	 * __page_cache_release() is supposed to be called for thp, not for
 84	 * hugetlb. This is because hugetlb page does never have PageLRU set
 85	 * (it's never listed to any LRU lists) and no memcg routines should
 86	 * be called for hugetlb (it has a separate hugetlb_cgroup.)
 87	 */
 88	if (!PageHuge(page))
 89		__page_cache_release(page);
 90	dtor = get_compound_page_dtor(page);
 91	(*dtor)(page);
 92}
 93
 94void __put_page(struct page *page)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 95{
 96	if (unlikely(PageCompound(page)))
 97		__put_compound_page(page);
 98	else
 99		__put_single_page(page);
100}
101EXPORT_SYMBOL(__put_page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
103/**
104 * put_pages_list() - release a list of pages
105 * @pages: list of pages threaded on page->lru
106 *
107 * Release a list of pages which are strung together on page.lru.  Currently
108 * used by read_cache_pages() and related error recovery code.
109 */
110void put_pages_list(struct list_head *pages)
111{
112	while (!list_empty(pages)) {
113		struct page *victim;
114
115		victim = list_entry(pages->prev, struct page, lru);
116		list_del(&victim->lru);
117		put_page(victim);
118	}
119}
120EXPORT_SYMBOL(put_pages_list);
121
122/*
123 * get_kernel_pages() - pin kernel pages in memory
124 * @kiov:	An array of struct kvec structures
125 * @nr_segs:	number of segments to pin
126 * @write:	pinning for read/write, currently ignored
127 * @pages:	array that receives pointers to the pages pinned.
128 *		Should be at least nr_segs long.
129 *
130 * Returns number of pages pinned. This may be fewer than the number
131 * requested. If nr_pages is 0 or negative, returns 0. If no pages
132 * were pinned, returns -errno. Each page returned must be released
133 * with a put_page() call when it is finished with.
134 */
135int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
136		struct page **pages)
137{
138	int seg;
139
140	for (seg = 0; seg < nr_segs; seg++) {
141		if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
142			return seg;
143
144		pages[seg] = kmap_to_page(kiov[seg].iov_base);
145		get_page(pages[seg]);
146	}
147
148	return seg;
149}
150EXPORT_SYMBOL_GPL(get_kernel_pages);
151
152/*
153 * get_kernel_page() - pin a kernel page in memory
154 * @start:	starting kernel address
155 * @write:	pinning for read/write, currently ignored
156 * @pages:	array that receives pointer to the page pinned.
157 *		Must be at least nr_segs long.
158 *
159 * Returns 1 if page is pinned. If the page was not pinned, returns
160 * -errno. The page returned must be released with a put_page() call
161 * when it is finished with.
162 */
163int get_kernel_page(unsigned long start, int write, struct page **pages)
164{
165	const struct kvec kiov = {
166		.iov_base = (void *)start,
167		.iov_len = PAGE_SIZE
168	};
169
170	return get_kernel_pages(&kiov, 1, write, pages);
171}
172EXPORT_SYMBOL_GPL(get_kernel_page);
173
174static void pagevec_lru_move_fn(struct pagevec *pvec,
175	void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
176	void *arg)
177{
178	int i;
179	struct zone *zone = NULL;
180	struct lruvec *lruvec;
181	unsigned long flags = 0;
182
183	for (i = 0; i < pagevec_count(pvec); i++) {
184		struct page *page = pvec->pages[i];
185		struct zone *pagezone = page_zone(page);
186
187		if (pagezone != zone) {
188			if (zone)
189				spin_unlock_irqrestore(&zone->lru_lock, flags);
190			zone = pagezone;
191			spin_lock_irqsave(&zone->lru_lock, flags);
192		}
193
194		lruvec = mem_cgroup_page_lruvec(page, zone);
195		(*move_fn)(page, lruvec, arg);
196	}
197	if (zone)
198		spin_unlock_irqrestore(&zone->lru_lock, flags);
199	release_pages(pvec->pages, pvec->nr, pvec->cold);
200	pagevec_reinit(pvec);
201}
202
203static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
204				 void *arg)
205{
206	int *pgmoved = arg;
207
208	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
209		enum lru_list lru = page_lru_base_type(page);
210		list_move_tail(&page->lru, &lruvec->lists[lru]);
211		(*pgmoved)++;
212	}
213}
214
215/*
216 * pagevec_move_tail() must be called with IRQ disabled.
217 * Otherwise this may cause nasty races.
218 */
219static void pagevec_move_tail(struct pagevec *pvec)
220{
221	int pgmoved = 0;
222
223	pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
224	__count_vm_events(PGROTATED, pgmoved);
225}
226
227/*
228 * Writeback is about to end against a page which has been marked for immediate
229 * reclaim.  If it still appears to be reclaimable, move it to the tail of the
230 * inactive list.
231 */
232void rotate_reclaimable_page(struct page *page)
233{
234	if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
235	    !PageUnevictable(page) && PageLRU(page)) {
236		struct pagevec *pvec;
237		unsigned long flags;
238
239		get_page(page);
240		local_irq_save(flags);
241		pvec = this_cpu_ptr(&lru_rotate_pvecs);
242		if (!pagevec_add(pvec, page))
243			pagevec_move_tail(pvec);
244		local_irq_restore(flags);
245	}
246}
247
248static void update_page_reclaim_stat(struct lruvec *lruvec,
249				     int file, int rotated)
250{
251	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
252
253	reclaim_stat->recent_scanned[file]++;
254	if (rotated)
255		reclaim_stat->recent_rotated[file]++;
256}
257
258static void __activate_page(struct page *page, struct lruvec *lruvec,
259			    void *arg)
260{
261	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
262		int file = page_is_file_cache(page);
263		int lru = page_lru_base_type(page);
264
265		del_page_from_lru_list(page, lruvec, lru);
266		SetPageActive(page);
267		lru += LRU_ACTIVE;
268		add_page_to_lru_list(page, lruvec, lru);
269		trace_mm_lru_activate(page);
270
271		__count_vm_event(PGACTIVATE);
272		update_page_reclaim_stat(lruvec, file, 1);
273	}
274}
275
276#ifdef CONFIG_SMP
277static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
278
279static void activate_page_drain(int cpu)
280{
281	struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
282
283	if (pagevec_count(pvec))
284		pagevec_lru_move_fn(pvec, __activate_page, NULL);
285}
286
287static bool need_activate_page_drain(int cpu)
288{
289	return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0;
290}
291
292void activate_page(struct page *page)
293{
294	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
295		struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
296
297		get_page(page);
298		if (!pagevec_add(pvec, page))
299			pagevec_lru_move_fn(pvec, __activate_page, NULL);
300		put_cpu_var(activate_page_pvecs);
301	}
302}
303
304#else
305static inline void activate_page_drain(int cpu)
306{
307}
308
309static bool need_activate_page_drain(int cpu)
310{
311	return false;
312}
313
314void activate_page(struct page *page)
315{
316	struct zone *zone = page_zone(page);
317
318	spin_lock_irq(&zone->lru_lock);
319	__activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL);
320	spin_unlock_irq(&zone->lru_lock);
321}
322#endif
323
324static void __lru_cache_activate_page(struct page *page)
325{
326	struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
327	int i;
328
329	/*
330	 * Search backwards on the optimistic assumption that the page being
331	 * activated has just been added to this pagevec. Note that only
332	 * the local pagevec is examined as a !PageLRU page could be in the
333	 * process of being released, reclaimed, migrated or on a remote
334	 * pagevec that is currently being drained. Furthermore, marking
335	 * a remote pagevec's page PageActive potentially hits a race where
336	 * a page is marked PageActive just after it is added to the inactive
337	 * list causing accounting errors and BUG_ON checks to trigger.
338	 */
339	for (i = pagevec_count(pvec) - 1; i >= 0; i--) {
340		struct page *pagevec_page = pvec->pages[i];
341
342		if (pagevec_page == page) {
343			SetPageActive(page);
344			break;
345		}
346	}
347
348	put_cpu_var(lru_add_pvec);
349}
350
351/*
352 * Mark a page as having seen activity.
353 *
354 * inactive,unreferenced	->	inactive,referenced
355 * inactive,referenced		->	active,unreferenced
356 * active,unreferenced		->	active,referenced
357 *
358 * When a newly allocated page is not yet visible, so safe for non-atomic ops,
359 * __SetPageReferenced(page) may be substituted for mark_page_accessed(page).
360 */
361void mark_page_accessed(struct page *page)
362{
363	page = compound_head(page);
364	if (!PageActive(page) && !PageUnevictable(page) &&
365			PageReferenced(page)) {
366
367		/*
368		 * If the page is on the LRU, queue it for activation via
369		 * activate_page_pvecs. Otherwise, assume the page is on a
370		 * pagevec, mark it active and it'll be moved to the active
371		 * LRU on the next drain.
372		 */
373		if (PageLRU(page))
374			activate_page(page);
375		else
376			__lru_cache_activate_page(page);
377		ClearPageReferenced(page);
378		if (page_is_file_cache(page))
379			workingset_activation(page);
380	} else if (!PageReferenced(page)) {
381		SetPageReferenced(page);
382	}
383	if (page_is_idle(page))
384		clear_page_idle(page);
385}
386EXPORT_SYMBOL(mark_page_accessed);
387
388static void __lru_cache_add(struct page *page)
 
 
 
 
 
 
389{
390	struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
391
392	get_page(page);
393	if (!pagevec_space(pvec))
394		__pagevec_lru_add(pvec);
395	pagevec_add(pvec, page);
396	put_cpu_var(lru_add_pvec);
397}
398
399/**
400 * lru_cache_add: add a page to the page lists
401 * @page: the page to add
402 */
403void lru_cache_add_anon(struct page *page)
404{
405	if (PageActive(page))
406		ClearPageActive(page);
407	__lru_cache_add(page);
408}
409
410void lru_cache_add_file(struct page *page)
411{
412	if (PageActive(page))
413		ClearPageActive(page);
414	__lru_cache_add(page);
415}
416EXPORT_SYMBOL(lru_cache_add_file);
417
418/**
419 * lru_cache_add - add a page to a page list
420 * @page: the page to be added to the LRU.
421 *
422 * Queue the page for addition to the LRU via pagevec. The decision on whether
423 * to add the page to the [in]active [file|anon] list is deferred until the
424 * pagevec is drained. This gives a chance for the caller of lru_cache_add()
425 * have the page added to the active list using mark_page_accessed().
426 */
427void lru_cache_add(struct page *page)
428{
429	VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
430	VM_BUG_ON_PAGE(PageLRU(page), page);
431	__lru_cache_add(page);
432}
433
434/**
435 * add_page_to_unevictable_list - add a page to the unevictable list
436 * @page:  the page to be added to the unevictable list
437 *
438 * Add page directly to its zone's unevictable list.  To avoid races with
439 * tasks that might be making the page evictable, through eg. munlock,
440 * munmap or exit, while it's not on the lru, we want to add the page
441 * while it's locked or otherwise "invisible" to other tasks.  This is
442 * difficult to do when using the pagevec cache, so bypass that.
443 */
444void add_page_to_unevictable_list(struct page *page)
445{
446	struct zone *zone = page_zone(page);
447	struct lruvec *lruvec;
448
449	spin_lock_irq(&zone->lru_lock);
450	lruvec = mem_cgroup_page_lruvec(page, zone);
451	ClearPageActive(page);
452	SetPageUnevictable(page);
453	SetPageLRU(page);
454	add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
455	spin_unlock_irq(&zone->lru_lock);
456}
457
458/**
459 * lru_cache_add_active_or_unevictable
460 * @page:  the page to be added to LRU
461 * @vma:   vma in which page is mapped for determining reclaimability
462 *
463 * Place @page on the active or unevictable LRU list, depending on its
464 * evictability.  Note that if the page is not evictable, it goes
465 * directly back onto it's zone's unevictable list, it does NOT use a
466 * per cpu pagevec.
467 */
468void lru_cache_add_active_or_unevictable(struct page *page,
469					 struct vm_area_struct *vma)
470{
471	VM_BUG_ON_PAGE(PageLRU(page), page);
472
473	if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) {
474		SetPageActive(page);
475		lru_cache_add(page);
476		return;
477	}
478
479	if (!TestSetPageMlocked(page)) {
480		/*
481		 * We use the irq-unsafe __mod_zone_page_stat because this
482		 * counter is not modified from interrupt context, and the pte
483		 * lock is held(spinlock), which implies preemption disabled.
484		 */
485		__mod_zone_page_state(page_zone(page), NR_MLOCK,
486				    hpage_nr_pages(page));
487		count_vm_event(UNEVICTABLE_PGMLOCKED);
488	}
489	add_page_to_unevictable_list(page);
490}
491
492/*
493 * If the page can not be invalidated, it is moved to the
494 * inactive list to speed up its reclaim.  It is moved to the
495 * head of the list, rather than the tail, to give the flusher
496 * threads some time to write it out, as this is much more
497 * effective than the single-page writeout from reclaim.
498 *
499 * If the page isn't page_mapped and dirty/writeback, the page
500 * could reclaim asap using PG_reclaim.
501 *
502 * 1. active, mapped page -> none
503 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
504 * 3. inactive, mapped page -> none
505 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
506 * 5. inactive, clean -> inactive, tail
507 * 6. Others -> none
508 *
509 * In 4, why it moves inactive's head, the VM expects the page would
510 * be write it out by flusher threads as this is much more effective
511 * than the single-page writeout from reclaim.
512 */
513static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
514			      void *arg)
515{
516	int lru, file;
517	bool active;
518
519	if (!PageLRU(page))
520		return;
521
522	if (PageUnevictable(page))
523		return;
524
525	/* Some processes are using the page */
526	if (page_mapped(page))
527		return;
528
529	active = PageActive(page);
530	file = page_is_file_cache(page);
531	lru = page_lru_base_type(page);
532
533	del_page_from_lru_list(page, lruvec, lru + active);
534	ClearPageActive(page);
535	ClearPageReferenced(page);
536	add_page_to_lru_list(page, lruvec, lru);
537
538	if (PageWriteback(page) || PageDirty(page)) {
539		/*
540		 * PG_reclaim could be raced with end_page_writeback
541		 * It can make readahead confusing.  But race window
542		 * is _really_ small and  it's non-critical problem.
543		 */
544		SetPageReclaim(page);
545	} else {
546		/*
547		 * The page's writeback ends up during pagevec
548		 * We moves tha page into tail of inactive.
549		 */
550		list_move_tail(&page->lru, &lruvec->lists[lru]);
551		__count_vm_event(PGROTATED);
552	}
553
554	if (active)
555		__count_vm_event(PGDEACTIVATE);
556	update_page_reclaim_stat(lruvec, file, 0);
557}
558
559
560static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
561			    void *arg)
562{
563	if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
564		int file = page_is_file_cache(page);
565		int lru = page_lru_base_type(page);
566
567		del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
568		ClearPageActive(page);
569		ClearPageReferenced(page);
570		add_page_to_lru_list(page, lruvec, lru);
571
572		__count_vm_event(PGDEACTIVATE);
573		update_page_reclaim_stat(lruvec, file, 0);
574	}
575}
576
577/*
578 * Drain pages out of the cpu's pagevecs.
579 * Either "cpu" is the current CPU, and preemption has already been
580 * disabled; or "cpu" is being hot-unplugged, and is already dead.
581 */
582void lru_add_drain_cpu(int cpu)
583{
584	struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu);
585
586	if (pagevec_count(pvec))
587		__pagevec_lru_add(pvec);
588
589	pvec = &per_cpu(lru_rotate_pvecs, cpu);
590	if (pagevec_count(pvec)) {
591		unsigned long flags;
592
593		/* No harm done if a racing interrupt already did this */
594		local_irq_save(flags);
595		pagevec_move_tail(pvec);
596		local_irq_restore(flags);
597	}
598
599	pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
600	if (pagevec_count(pvec))
601		pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
602
603	pvec = &per_cpu(lru_deactivate_pvecs, cpu);
604	if (pagevec_count(pvec))
605		pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
606
607	activate_page_drain(cpu);
608}
609
610/**
611 * deactivate_file_page - forcefully deactivate a file page
612 * @page: page to deactivate
613 *
614 * This function hints the VM that @page is a good reclaim candidate,
615 * for example if its invalidation fails due to the page being dirty
616 * or under writeback.
617 */
618void deactivate_file_page(struct page *page)
619{
620	/*
621	 * In a workload with many unevictable page such as mprotect,
622	 * unevictable page deactivation for accelerating reclaim is pointless.
623	 */
624	if (PageUnevictable(page))
625		return;
626
627	if (likely(get_page_unless_zero(page))) {
628		struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
629
630		if (!pagevec_add(pvec, page))
631			pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
632		put_cpu_var(lru_deactivate_file_pvecs);
633	}
634}
635
636/**
637 * deactivate_page - deactivate a page
638 * @page: page to deactivate
639 *
640 * deactivate_page() moves @page to the inactive list if @page was on the active
641 * list and was not an unevictable page.  This is done to accelerate the reclaim
642 * of @page.
643 */
644void deactivate_page(struct page *page)
645{
646	if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
647		struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
648
649		get_page(page);
650		if (!pagevec_add(pvec, page))
651			pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
652		put_cpu_var(lru_deactivate_pvecs);
653	}
654}
655
656void lru_add_drain(void)
657{
658	lru_add_drain_cpu(get_cpu());
659	put_cpu();
660}
661
662static void lru_add_drain_per_cpu(struct work_struct *dummy)
663{
664	lru_add_drain();
665}
666
667static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
668
669void lru_add_drain_all(void)
670{
671	static DEFINE_MUTEX(lock);
672	static struct cpumask has_work;
673	int cpu;
674
675	mutex_lock(&lock);
676	get_online_cpus();
677	cpumask_clear(&has_work);
678
679	for_each_online_cpu(cpu) {
680		struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
681
682		if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
683		    pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
684		    pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
685		    pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
686		    need_activate_page_drain(cpu)) {
687			INIT_WORK(work, lru_add_drain_per_cpu);
688			schedule_work_on(cpu, work);
689			cpumask_set_cpu(cpu, &has_work);
690		}
691	}
692
693	for_each_cpu(cpu, &has_work)
694		flush_work(&per_cpu(lru_add_drain_work, cpu));
695
696	put_online_cpus();
697	mutex_unlock(&lock);
698}
699
700/**
701 * release_pages - batched put_page()
702 * @pages: array of pages to release
703 * @nr: number of pages
704 * @cold: whether the pages are cache cold
705 *
706 * Decrement the reference count on all the pages in @pages.  If it
707 * fell to zero, remove the page from the LRU and free it.
 
 
 
 
708 */
709void release_pages(struct page **pages, int nr, bool cold)
710{
711	int i;
712	LIST_HEAD(pages_to_free);
713	struct zone *zone = NULL;
714	struct lruvec *lruvec;
715	unsigned long uninitialized_var(flags);
716	unsigned int uninitialized_var(lock_batch);
717
718	for (i = 0; i < nr; i++) {
719		struct page *page = pages[i];
720
721		/*
722		 * Make sure the IRQ-safe lock-holding time does not get
723		 * excessive with a continuous string of pages from the
724		 * same zone. The lock is held only if zone != NULL.
725		 */
726		if (zone && ++lock_batch == SWAP_CLUSTER_MAX) {
727			spin_unlock_irqrestore(&zone->lru_lock, flags);
728			zone = NULL;
729		}
730
731		if (is_huge_zero_page(page)) {
732			put_huge_zero_page();
733			continue;
734		}
735
736		page = compound_head(page);
737		if (!put_page_testzero(page))
738			continue;
739
740		if (PageCompound(page)) {
741			if (zone) {
742				spin_unlock_irqrestore(&zone->lru_lock, flags);
743				zone = NULL;
744			}
745			__put_compound_page(page);
746			continue;
747		}
748
 
 
 
749		if (PageLRU(page)) {
750			struct zone *pagezone = page_zone(page);
751
752			if (pagezone != zone) {
753				if (zone)
754					spin_unlock_irqrestore(&zone->lru_lock,
755									flags);
756				lock_batch = 0;
757				zone = pagezone;
758				spin_lock_irqsave(&zone->lru_lock, flags);
759			}
760
761			lruvec = mem_cgroup_page_lruvec(page, zone);
762			VM_BUG_ON_PAGE(!PageLRU(page), page);
763			__ClearPageLRU(page);
764			del_page_from_lru_list(page, lruvec, page_off_lru(page));
765		}
766
767		/* Clear Active bit in case of parallel mark_page_accessed */
768		__ClearPageActive(page);
769
770		list_add(&page->lru, &pages_to_free);
771	}
772	if (zone)
773		spin_unlock_irqrestore(&zone->lru_lock, flags);
774
775	mem_cgroup_uncharge_list(&pages_to_free);
776	free_hot_cold_page_list(&pages_to_free, cold);
777}
778EXPORT_SYMBOL(release_pages);
779
780/*
781 * The pages which we're about to release may be in the deferred lru-addition
782 * queues.  That would prevent them from really being freed right now.  That's
783 * OK from a correctness point of view but is inefficient - those pages may be
784 * cache-warm and we want to give them back to the page allocator ASAP.
785 *
786 * So __pagevec_release() will drain those queues here.  __pagevec_lru_add()
787 * and __pagevec_lru_add_active() call release_pages() directly to avoid
788 * mutual recursion.
789 */
790void __pagevec_release(struct pagevec *pvec)
791{
792	lru_add_drain();
793	release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
794	pagevec_reinit(pvec);
795}
796EXPORT_SYMBOL(__pagevec_release);
797
798#ifdef CONFIG_TRANSPARENT_HUGEPAGE
799/* used by __split_huge_page_refcount() */
800void lru_add_page_tail(struct page *page, struct page *page_tail,
801		       struct lruvec *lruvec, struct list_head *list)
802{
803	const int file = 0;
804
805	VM_BUG_ON_PAGE(!PageHead(page), page);
806	VM_BUG_ON_PAGE(PageCompound(page_tail), page);
807	VM_BUG_ON_PAGE(PageLRU(page_tail), page);
808	VM_BUG_ON(NR_CPUS != 1 &&
809		  !spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
810
811	if (!list)
812		SetPageLRU(page_tail);
813
814	if (likely(PageLRU(page)))
815		list_add_tail(&page_tail->lru, &page->lru);
816	else if (list) {
817		/* page reclaim is reclaiming a huge page */
818		get_page(page_tail);
819		list_add_tail(&page_tail->lru, list);
820	} else {
821		struct list_head *list_head;
822		/*
823		 * Head page has not yet been counted, as an hpage,
824		 * so we must account for each subpage individually.
825		 *
826		 * Use the standard add function to put page_tail on the list,
827		 * but then correct its position so they all end up in order.
828		 */
829		add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail));
830		list_head = page_tail->lru.prev;
831		list_move_tail(&page_tail->lru, list_head);
832	}
833
834	if (!PageUnevictable(page))
835		update_page_reclaim_stat(lruvec, file, PageActive(page_tail));
836}
837#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
838
839static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
840				 void *arg)
841{
842	int file = page_is_file_cache(page);
843	int active = PageActive(page);
844	enum lru_list lru = page_lru(page);
845
846	VM_BUG_ON_PAGE(PageLRU(page), page);
847
848	SetPageLRU(page);
849	add_page_to_lru_list(page, lruvec, lru);
850	update_page_reclaim_stat(lruvec, file, active);
851	trace_mm_lru_insertion(page, lru);
852}
853
854/*
855 * Add the passed pages to the LRU, then drop the caller's refcount
856 * on them.  Reinitialises the caller's pagevec.
857 */
858void __pagevec_lru_add(struct pagevec *pvec)
859{
860	pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL);
861}
862EXPORT_SYMBOL(__pagevec_lru_add);
863
864/**
865 * pagevec_lookup_entries - gang pagecache lookup
866 * @pvec:	Where the resulting entries are placed
867 * @mapping:	The address_space to search
868 * @start:	The starting entry index
869 * @nr_entries:	The maximum number of entries
870 * @indices:	The cache indices corresponding to the entries in @pvec
871 *
872 * pagevec_lookup_entries() will search for and return a group of up
873 * to @nr_entries pages and shadow entries in the mapping.  All
874 * entries are placed in @pvec.  pagevec_lookup_entries() takes a
875 * reference against actual pages in @pvec.
876 *
877 * The search returns a group of mapping-contiguous entries with
878 * ascending indexes.  There may be holes in the indices due to
879 * not-present entries.
880 *
881 * pagevec_lookup_entries() returns the number of entries which were
882 * found.
883 */
884unsigned pagevec_lookup_entries(struct pagevec *pvec,
885				struct address_space *mapping,
886				pgoff_t start, unsigned nr_pages,
887				pgoff_t *indices)
888{
889	pvec->nr = find_get_entries(mapping, start, nr_pages,
890				    pvec->pages, indices);
891	return pagevec_count(pvec);
892}
893
894/**
895 * pagevec_remove_exceptionals - pagevec exceptionals pruning
896 * @pvec:	The pagevec to prune
897 *
898 * pagevec_lookup_entries() fills both pages and exceptional radix
899 * tree entries into the pagevec.  This function prunes all
900 * exceptionals from @pvec without leaving holes, so that it can be
901 * passed on to page-only pagevec operations.
902 */
903void pagevec_remove_exceptionals(struct pagevec *pvec)
904{
905	int i, j;
906
907	for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
908		struct page *page = pvec->pages[i];
909		if (!radix_tree_exceptional_entry(page))
910			pvec->pages[j++] = page;
911	}
912	pvec->nr = j;
913}
914
915/**
916 * pagevec_lookup - gang pagecache lookup
917 * @pvec:	Where the resulting pages are placed
918 * @mapping:	The address_space to search
919 * @start:	The starting page index
920 * @nr_pages:	The maximum number of pages
921 *
922 * pagevec_lookup() will search for and return a group of up to @nr_pages pages
923 * in the mapping.  The pages are placed in @pvec.  pagevec_lookup() takes a
924 * reference against the pages in @pvec.
925 *
926 * The search returns a group of mapping-contiguous pages with ascending
927 * indexes.  There may be holes in the indices due to not-present pages.
928 *
929 * pagevec_lookup() returns the number of pages which were found.
930 */
931unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
932		pgoff_t start, unsigned nr_pages)
933{
934	pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
935	return pagevec_count(pvec);
936}
937EXPORT_SYMBOL(pagevec_lookup);
938
939unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
940		pgoff_t *index, int tag, unsigned nr_pages)
941{
942	pvec->nr = find_get_pages_tag(mapping, index, tag,
943					nr_pages, pvec->pages);
944	return pagevec_count(pvec);
945}
946EXPORT_SYMBOL(pagevec_lookup_tag);
947
948/*
949 * Perform any setup for the swap system
950 */
951void __init swap_setup(void)
952{
953	unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
954#ifdef CONFIG_SWAP
955	int i;
956
957	for (i = 0; i < MAX_SWAPFILES; i++)
 
 
958		spin_lock_init(&swapper_spaces[i].tree_lock);
 
 
959#endif
960
961	/* Use a smaller cluster for small-memory machines */
962	if (megs < 16)
963		page_cluster = 2;
964	else
965		page_cluster = 3;
966	/*
967	 * Right now other parts of the system means that we
968	 * _really_ don't want to cluster much more
969	 */
970}
v3.15
   1/*
   2 *  linux/mm/swap.c
   3 *
   4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5 */
   6
   7/*
   8 * This file contains the default values for the operation of the
   9 * Linux VM subsystem. Fine-tuning documentation can be found in
  10 * Documentation/sysctl/vm.txt.
  11 * Started 18.12.91
  12 * Swap aging added 23.2.95, Stephen Tweedie.
  13 * Buffermem limits added 12.3.98, Rik van Riel.
  14 */
  15
  16#include <linux/mm.h>
  17#include <linux/sched.h>
  18#include <linux/kernel_stat.h>
  19#include <linux/swap.h>
  20#include <linux/mman.h>
  21#include <linux/pagemap.h>
  22#include <linux/pagevec.h>
  23#include <linux/init.h>
  24#include <linux/export.h>
  25#include <linux/mm_inline.h>
  26#include <linux/percpu_counter.h>
 
  27#include <linux/percpu.h>
  28#include <linux/cpu.h>
  29#include <linux/notifier.h>
  30#include <linux/backing-dev.h>
  31#include <linux/memcontrol.h>
  32#include <linux/gfp.h>
  33#include <linux/uio.h>
 
 
  34
  35#include "internal.h"
  36
  37#define CREATE_TRACE_POINTS
  38#include <trace/events/pagemap.h>
  39
  40/* How many pages do we try to swap or page in/out together? */
  41int page_cluster;
  42
  43static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
  44static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
 
  45static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
  46
  47/*
  48 * This path almost never happens for VM activity - pages are normally
  49 * freed via pagevecs.  But it gets used by networking.
  50 */
  51static void __page_cache_release(struct page *page)
  52{
  53	if (PageLRU(page)) {
  54		struct zone *zone = page_zone(page);
  55		struct lruvec *lruvec;
  56		unsigned long flags;
  57
  58		spin_lock_irqsave(&zone->lru_lock, flags);
  59		lruvec = mem_cgroup_page_lruvec(page, zone);
  60		VM_BUG_ON_PAGE(!PageLRU(page), page);
  61		__ClearPageLRU(page);
  62		del_page_from_lru_list(page, lruvec, page_off_lru(page));
  63		spin_unlock_irqrestore(&zone->lru_lock, flags);
  64	}
 
  65}
  66
  67static void __put_single_page(struct page *page)
  68{
  69	__page_cache_release(page);
  70	free_hot_cold_page(page, 0);
  71}
  72
  73static void __put_compound_page(struct page *page)
  74{
  75	compound_page_dtor *dtor;
  76
  77	__page_cache_release(page);
 
 
 
 
 
 
 
  78	dtor = get_compound_page_dtor(page);
  79	(*dtor)(page);
  80}
  81
  82static void put_compound_page(struct page *page)
  83{
  84	struct page *page_head;
  85
  86	if (likely(!PageTail(page))) {
  87		if (put_page_testzero(page)) {
  88			/*
  89			 * By the time all refcounts have been released
  90			 * split_huge_page cannot run anymore from under us.
  91			 */
  92			if (PageHead(page))
  93				__put_compound_page(page);
  94			else
  95				__put_single_page(page);
  96		}
  97		return;
  98	}
  99
 100	/* __split_huge_page_refcount can run under us */
 101	page_head = compound_head(page);
 102
 103	/*
 104	 * THP can not break up slab pages so avoid taking
 105	 * compound_lock() and skip the tail page refcounting (in
 106	 * _mapcount) too. Slab performs non-atomic bit ops on
 107	 * page->flags for better performance. In particular
 108	 * slab_unlock() in slub used to be a hot path. It is still
 109	 * hot on arches that do not support
 110	 * this_cpu_cmpxchg_double().
 111	 *
 112	 * If "page" is part of a slab or hugetlbfs page it cannot be
 113	 * splitted and the head page cannot change from under us. And
 114	 * if "page" is part of a THP page under splitting, if the
 115	 * head page pointed by the THP tail isn't a THP head anymore,
 116	 * we'll find PageTail clear after smp_rmb() and we'll treat
 117	 * it as a single page.
 118	 */
 119	if (!__compound_tail_refcounted(page_head)) {
 120		/*
 121		 * If "page" is a THP tail, we must read the tail page
 122		 * flags after the head page flags. The
 123		 * split_huge_page side enforces write memory barriers
 124		 * between clearing PageTail and before the head page
 125		 * can be freed and reallocated.
 126		 */
 127		smp_rmb();
 128		if (likely(PageTail(page))) {
 129			/*
 130			 * __split_huge_page_refcount cannot race
 131			 * here.
 132			 */
 133			VM_BUG_ON_PAGE(!PageHead(page_head), page_head);
 134			VM_BUG_ON_PAGE(page_mapcount(page) != 0, page);
 135			if (put_page_testzero(page_head)) {
 136				/*
 137				 * If this is the tail of a slab
 138				 * compound page, the tail pin must
 139				 * not be the last reference held on
 140				 * the page, because the PG_slab
 141				 * cannot be cleared before all tail
 142				 * pins (which skips the _mapcount
 143				 * tail refcounting) have been
 144				 * released. For hugetlbfs the tail
 145				 * pin may be the last reference on
 146				 * the page instead, because
 147				 * PageHeadHuge will not go away until
 148				 * the compound page enters the buddy
 149				 * allocator.
 150				 */
 151				VM_BUG_ON_PAGE(PageSlab(page_head), page_head);
 152				__put_compound_page(page_head);
 153			}
 154			return;
 155		} else
 156			/*
 157			 * __split_huge_page_refcount run before us,
 158			 * "page" was a THP tail. The split page_head
 159			 * has been freed and reallocated as slab or
 160			 * hugetlbfs page of smaller order (only
 161			 * possible if reallocated as slab on x86).
 162			 */
 163			goto out_put_single;
 164	}
 165
 166	if (likely(page != page_head && get_page_unless_zero(page_head))) {
 167		unsigned long flags;
 168
 169		/*
 170		 * page_head wasn't a dangling pointer but it may not
 171		 * be a head page anymore by the time we obtain the
 172		 * lock. That is ok as long as it can't be freed from
 173		 * under us.
 174		 */
 175		flags = compound_lock_irqsave(page_head);
 176		if (unlikely(!PageTail(page))) {
 177			/* __split_huge_page_refcount run before us */
 178			compound_unlock_irqrestore(page_head, flags);
 179			if (put_page_testzero(page_head)) {
 180				/*
 181				 * The head page may have been freed
 182				 * and reallocated as a compound page
 183				 * of smaller order and then freed
 184				 * again.  All we know is that it
 185				 * cannot have become: a THP page, a
 186				 * compound page of higher order, a
 187				 * tail page.  That is because we
 188				 * still hold the refcount of the
 189				 * split THP tail and page_head was
 190				 * the THP head before the split.
 191				 */
 192				if (PageHead(page_head))
 193					__put_compound_page(page_head);
 194				else
 195					__put_single_page(page_head);
 196			}
 197out_put_single:
 198			if (put_page_testzero(page))
 199				__put_single_page(page);
 200			return;
 201		}
 202		VM_BUG_ON_PAGE(page_head != page->first_page, page);
 203		/*
 204		 * We can release the refcount taken by
 205		 * get_page_unless_zero() now that
 206		 * __split_huge_page_refcount() is blocked on the
 207		 * compound_lock.
 208		 */
 209		if (put_page_testzero(page_head))
 210			VM_BUG_ON_PAGE(1, page_head);
 211		/* __split_huge_page_refcount will wait now */
 212		VM_BUG_ON_PAGE(page_mapcount(page) <= 0, page);
 213		atomic_dec(&page->_mapcount);
 214		VM_BUG_ON_PAGE(atomic_read(&page_head->_count) <= 0, page_head);
 215		VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
 216		compound_unlock_irqrestore(page_head, flags);
 217
 218		if (put_page_testzero(page_head)) {
 219			if (PageHead(page_head))
 220				__put_compound_page(page_head);
 221			else
 222				__put_single_page(page_head);
 223		}
 224	} else {
 225		/* page_head is a dangling pointer */
 226		VM_BUG_ON_PAGE(PageTail(page), page);
 227		goto out_put_single;
 228	}
 229}
 230
 231void put_page(struct page *page)
 232{
 233	if (unlikely(PageCompound(page)))
 234		put_compound_page(page);
 235	else if (put_page_testzero(page))
 236		__put_single_page(page);
 237}
 238EXPORT_SYMBOL(put_page);
 239
 240/*
 241 * This function is exported but must not be called by anything other
 242 * than get_page(). It implements the slow path of get_page().
 243 */
 244bool __get_page_tail(struct page *page)
 245{
 246	/*
 247	 * This takes care of get_page() if run on a tail page
 248	 * returned by one of the get_user_pages/follow_page variants.
 249	 * get_user_pages/follow_page itself doesn't need the compound
 250	 * lock because it runs __get_page_tail_foll() under the
 251	 * proper PT lock that already serializes against
 252	 * split_huge_page().
 253	 */
 254	unsigned long flags;
 255	bool got;
 256	struct page *page_head = compound_head(page);
 257
 258	/* Ref to put_compound_page() comment. */
 259	if (!__compound_tail_refcounted(page_head)) {
 260		smp_rmb();
 261		if (likely(PageTail(page))) {
 262			/*
 263			 * This is a hugetlbfs page or a slab
 264			 * page. __split_huge_page_refcount
 265			 * cannot race here.
 266			 */
 267			VM_BUG_ON_PAGE(!PageHead(page_head), page_head);
 268			__get_page_tail_foll(page, true);
 269			return true;
 270		} else {
 271			/*
 272			 * __split_huge_page_refcount run
 273			 * before us, "page" was a THP
 274			 * tail. The split page_head has been
 275			 * freed and reallocated as slab or
 276			 * hugetlbfs page of smaller order
 277			 * (only possible if reallocated as
 278			 * slab on x86).
 279			 */
 280			return false;
 281		}
 282	}
 283
 284	got = false;
 285	if (likely(page != page_head && get_page_unless_zero(page_head))) {
 286		/*
 287		 * page_head wasn't a dangling pointer but it
 288		 * may not be a head page anymore by the time
 289		 * we obtain the lock. That is ok as long as it
 290		 * can't be freed from under us.
 291		 */
 292		flags = compound_lock_irqsave(page_head);
 293		/* here __split_huge_page_refcount won't run anymore */
 294		if (likely(PageTail(page))) {
 295			__get_page_tail_foll(page, false);
 296			got = true;
 297		}
 298		compound_unlock_irqrestore(page_head, flags);
 299		if (unlikely(!got))
 300			put_page(page_head);
 301	}
 302	return got;
 303}
 304EXPORT_SYMBOL(__get_page_tail);
 305
 306/**
 307 * put_pages_list() - release a list of pages
 308 * @pages: list of pages threaded on page->lru
 309 *
 310 * Release a list of pages which are strung together on page.lru.  Currently
 311 * used by read_cache_pages() and related error recovery code.
 312 */
 313void put_pages_list(struct list_head *pages)
 314{
 315	while (!list_empty(pages)) {
 316		struct page *victim;
 317
 318		victim = list_entry(pages->prev, struct page, lru);
 319		list_del(&victim->lru);
 320		page_cache_release(victim);
 321	}
 322}
 323EXPORT_SYMBOL(put_pages_list);
 324
 325/*
 326 * get_kernel_pages() - pin kernel pages in memory
 327 * @kiov:	An array of struct kvec structures
 328 * @nr_segs:	number of segments to pin
 329 * @write:	pinning for read/write, currently ignored
 330 * @pages:	array that receives pointers to the pages pinned.
 331 *		Should be at least nr_segs long.
 332 *
 333 * Returns number of pages pinned. This may be fewer than the number
 334 * requested. If nr_pages is 0 or negative, returns 0. If no pages
 335 * were pinned, returns -errno. Each page returned must be released
 336 * with a put_page() call when it is finished with.
 337 */
 338int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
 339		struct page **pages)
 340{
 341	int seg;
 342
 343	for (seg = 0; seg < nr_segs; seg++) {
 344		if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
 345			return seg;
 346
 347		pages[seg] = kmap_to_page(kiov[seg].iov_base);
 348		page_cache_get(pages[seg]);
 349	}
 350
 351	return seg;
 352}
 353EXPORT_SYMBOL_GPL(get_kernel_pages);
 354
 355/*
 356 * get_kernel_page() - pin a kernel page in memory
 357 * @start:	starting kernel address
 358 * @write:	pinning for read/write, currently ignored
 359 * @pages:	array that receives pointer to the page pinned.
 360 *		Must be at least nr_segs long.
 361 *
 362 * Returns 1 if page is pinned. If the page was not pinned, returns
 363 * -errno. The page returned must be released with a put_page() call
 364 * when it is finished with.
 365 */
 366int get_kernel_page(unsigned long start, int write, struct page **pages)
 367{
 368	const struct kvec kiov = {
 369		.iov_base = (void *)start,
 370		.iov_len = PAGE_SIZE
 371	};
 372
 373	return get_kernel_pages(&kiov, 1, write, pages);
 374}
 375EXPORT_SYMBOL_GPL(get_kernel_page);
 376
 377static void pagevec_lru_move_fn(struct pagevec *pvec,
 378	void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
 379	void *arg)
 380{
 381	int i;
 382	struct zone *zone = NULL;
 383	struct lruvec *lruvec;
 384	unsigned long flags = 0;
 385
 386	for (i = 0; i < pagevec_count(pvec); i++) {
 387		struct page *page = pvec->pages[i];
 388		struct zone *pagezone = page_zone(page);
 389
 390		if (pagezone != zone) {
 391			if (zone)
 392				spin_unlock_irqrestore(&zone->lru_lock, flags);
 393			zone = pagezone;
 394			spin_lock_irqsave(&zone->lru_lock, flags);
 395		}
 396
 397		lruvec = mem_cgroup_page_lruvec(page, zone);
 398		(*move_fn)(page, lruvec, arg);
 399	}
 400	if (zone)
 401		spin_unlock_irqrestore(&zone->lru_lock, flags);
 402	release_pages(pvec->pages, pvec->nr, pvec->cold);
 403	pagevec_reinit(pvec);
 404}
 405
 406static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
 407				 void *arg)
 408{
 409	int *pgmoved = arg;
 410
 411	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
 412		enum lru_list lru = page_lru_base_type(page);
 413		list_move_tail(&page->lru, &lruvec->lists[lru]);
 414		(*pgmoved)++;
 415	}
 416}
 417
 418/*
 419 * pagevec_move_tail() must be called with IRQ disabled.
 420 * Otherwise this may cause nasty races.
 421 */
 422static void pagevec_move_tail(struct pagevec *pvec)
 423{
 424	int pgmoved = 0;
 425
 426	pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
 427	__count_vm_events(PGROTATED, pgmoved);
 428}
 429
 430/*
 431 * Writeback is about to end against a page which has been marked for immediate
 432 * reclaim.  If it still appears to be reclaimable, move it to the tail of the
 433 * inactive list.
 434 */
 435void rotate_reclaimable_page(struct page *page)
 436{
 437	if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
 438	    !PageUnevictable(page) && PageLRU(page)) {
 439		struct pagevec *pvec;
 440		unsigned long flags;
 441
 442		page_cache_get(page);
 443		local_irq_save(flags);
 444		pvec = &__get_cpu_var(lru_rotate_pvecs);
 445		if (!pagevec_add(pvec, page))
 446			pagevec_move_tail(pvec);
 447		local_irq_restore(flags);
 448	}
 449}
 450
 451static void update_page_reclaim_stat(struct lruvec *lruvec,
 452				     int file, int rotated)
 453{
 454	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
 455
 456	reclaim_stat->recent_scanned[file]++;
 457	if (rotated)
 458		reclaim_stat->recent_rotated[file]++;
 459}
 460
 461static void __activate_page(struct page *page, struct lruvec *lruvec,
 462			    void *arg)
 463{
 464	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
 465		int file = page_is_file_cache(page);
 466		int lru = page_lru_base_type(page);
 467
 468		del_page_from_lru_list(page, lruvec, lru);
 469		SetPageActive(page);
 470		lru += LRU_ACTIVE;
 471		add_page_to_lru_list(page, lruvec, lru);
 472		trace_mm_lru_activate(page, page_to_pfn(page));
 473
 474		__count_vm_event(PGACTIVATE);
 475		update_page_reclaim_stat(lruvec, file, 1);
 476	}
 477}
 478
 479#ifdef CONFIG_SMP
 480static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
 481
 482static void activate_page_drain(int cpu)
 483{
 484	struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
 485
 486	if (pagevec_count(pvec))
 487		pagevec_lru_move_fn(pvec, __activate_page, NULL);
 488}
 489
 490static bool need_activate_page_drain(int cpu)
 491{
 492	return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0;
 493}
 494
 495void activate_page(struct page *page)
 496{
 497	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
 498		struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
 499
 500		page_cache_get(page);
 501		if (!pagevec_add(pvec, page))
 502			pagevec_lru_move_fn(pvec, __activate_page, NULL);
 503		put_cpu_var(activate_page_pvecs);
 504	}
 505}
 506
 507#else
 508static inline void activate_page_drain(int cpu)
 509{
 510}
 511
 512static bool need_activate_page_drain(int cpu)
 513{
 514	return false;
 515}
 516
 517void activate_page(struct page *page)
 518{
 519	struct zone *zone = page_zone(page);
 520
 521	spin_lock_irq(&zone->lru_lock);
 522	__activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL);
 523	spin_unlock_irq(&zone->lru_lock);
 524}
 525#endif
 526
 527static void __lru_cache_activate_page(struct page *page)
 528{
 529	struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
 530	int i;
 531
 532	/*
 533	 * Search backwards on the optimistic assumption that the page being
 534	 * activated has just been added to this pagevec. Note that only
 535	 * the local pagevec is examined as a !PageLRU page could be in the
 536	 * process of being released, reclaimed, migrated or on a remote
 537	 * pagevec that is currently being drained. Furthermore, marking
 538	 * a remote pagevec's page PageActive potentially hits a race where
 539	 * a page is marked PageActive just after it is added to the inactive
 540	 * list causing accounting errors and BUG_ON checks to trigger.
 541	 */
 542	for (i = pagevec_count(pvec) - 1; i >= 0; i--) {
 543		struct page *pagevec_page = pvec->pages[i];
 544
 545		if (pagevec_page == page) {
 546			SetPageActive(page);
 547			break;
 548		}
 549	}
 550
 551	put_cpu_var(lru_add_pvec);
 552}
 553
 554/*
 555 * Mark a page as having seen activity.
 556 *
 557 * inactive,unreferenced	->	inactive,referenced
 558 * inactive,referenced		->	active,unreferenced
 559 * active,unreferenced		->	active,referenced
 
 
 
 560 */
 561void mark_page_accessed(struct page *page)
 562{
 
 563	if (!PageActive(page) && !PageUnevictable(page) &&
 564			PageReferenced(page)) {
 565
 566		/*
 567		 * If the page is on the LRU, queue it for activation via
 568		 * activate_page_pvecs. Otherwise, assume the page is on a
 569		 * pagevec, mark it active and it'll be moved to the active
 570		 * LRU on the next drain.
 571		 */
 572		if (PageLRU(page))
 573			activate_page(page);
 574		else
 575			__lru_cache_activate_page(page);
 576		ClearPageReferenced(page);
 577		if (page_is_file_cache(page))
 578			workingset_activation(page);
 579	} else if (!PageReferenced(page)) {
 580		SetPageReferenced(page);
 581	}
 
 
 582}
 583EXPORT_SYMBOL(mark_page_accessed);
 584
 585/*
 586 * Queue the page for addition to the LRU via pagevec. The decision on whether
 587 * to add the page to the [in]active [file|anon] list is deferred until the
 588 * pagevec is drained. This gives a chance for the caller of __lru_cache_add()
 589 * have the page added to the active list using mark_page_accessed().
 590 */
 591void __lru_cache_add(struct page *page)
 592{
 593	struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
 594
 595	page_cache_get(page);
 596	if (!pagevec_space(pvec))
 597		__pagevec_lru_add(pvec);
 598	pagevec_add(pvec, page);
 599	put_cpu_var(lru_add_pvec);
 600}
 601EXPORT_SYMBOL(__lru_cache_add);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 602
 603/**
 604 * lru_cache_add - add a page to a page list
 605 * @page: the page to be added to the LRU.
 
 
 
 
 
 606 */
 607void lru_cache_add(struct page *page)
 608{
 609	VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
 610	VM_BUG_ON_PAGE(PageLRU(page), page);
 611	__lru_cache_add(page);
 612}
 613
 614/**
 615 * add_page_to_unevictable_list - add a page to the unevictable list
 616 * @page:  the page to be added to the unevictable list
 617 *
 618 * Add page directly to its zone's unevictable list.  To avoid races with
 619 * tasks that might be making the page evictable, through eg. munlock,
 620 * munmap or exit, while it's not on the lru, we want to add the page
 621 * while it's locked or otherwise "invisible" to other tasks.  This is
 622 * difficult to do when using the pagevec cache, so bypass that.
 623 */
 624void add_page_to_unevictable_list(struct page *page)
 625{
 626	struct zone *zone = page_zone(page);
 627	struct lruvec *lruvec;
 628
 629	spin_lock_irq(&zone->lru_lock);
 630	lruvec = mem_cgroup_page_lruvec(page, zone);
 631	ClearPageActive(page);
 632	SetPageUnevictable(page);
 633	SetPageLRU(page);
 634	add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
 635	spin_unlock_irq(&zone->lru_lock);
 636}
 637
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 638/*
 639 * If the page can not be invalidated, it is moved to the
 640 * inactive list to speed up its reclaim.  It is moved to the
 641 * head of the list, rather than the tail, to give the flusher
 642 * threads some time to write it out, as this is much more
 643 * effective than the single-page writeout from reclaim.
 644 *
 645 * If the page isn't page_mapped and dirty/writeback, the page
 646 * could reclaim asap using PG_reclaim.
 647 *
 648 * 1. active, mapped page -> none
 649 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
 650 * 3. inactive, mapped page -> none
 651 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
 652 * 5. inactive, clean -> inactive, tail
 653 * 6. Others -> none
 654 *
 655 * In 4, why it moves inactive's head, the VM expects the page would
 656 * be write it out by flusher threads as this is much more effective
 657 * than the single-page writeout from reclaim.
 658 */
 659static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
 660			      void *arg)
 661{
 662	int lru, file;
 663	bool active;
 664
 665	if (!PageLRU(page))
 666		return;
 667
 668	if (PageUnevictable(page))
 669		return;
 670
 671	/* Some processes are using the page */
 672	if (page_mapped(page))
 673		return;
 674
 675	active = PageActive(page);
 676	file = page_is_file_cache(page);
 677	lru = page_lru_base_type(page);
 678
 679	del_page_from_lru_list(page, lruvec, lru + active);
 680	ClearPageActive(page);
 681	ClearPageReferenced(page);
 682	add_page_to_lru_list(page, lruvec, lru);
 683
 684	if (PageWriteback(page) || PageDirty(page)) {
 685		/*
 686		 * PG_reclaim could be raced with end_page_writeback
 687		 * It can make readahead confusing.  But race window
 688		 * is _really_ small and  it's non-critical problem.
 689		 */
 690		SetPageReclaim(page);
 691	} else {
 692		/*
 693		 * The page's writeback ends up during pagevec
 694		 * We moves tha page into tail of inactive.
 695		 */
 696		list_move_tail(&page->lru, &lruvec->lists[lru]);
 697		__count_vm_event(PGROTATED);
 698	}
 699
 700	if (active)
 701		__count_vm_event(PGDEACTIVATE);
 702	update_page_reclaim_stat(lruvec, file, 0);
 703}
 704
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 705/*
 706 * Drain pages out of the cpu's pagevecs.
 707 * Either "cpu" is the current CPU, and preemption has already been
 708 * disabled; or "cpu" is being hot-unplugged, and is already dead.
 709 */
 710void lru_add_drain_cpu(int cpu)
 711{
 712	struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu);
 713
 714	if (pagevec_count(pvec))
 715		__pagevec_lru_add(pvec);
 716
 717	pvec = &per_cpu(lru_rotate_pvecs, cpu);
 718	if (pagevec_count(pvec)) {
 719		unsigned long flags;
 720
 721		/* No harm done if a racing interrupt already did this */
 722		local_irq_save(flags);
 723		pagevec_move_tail(pvec);
 724		local_irq_restore(flags);
 725	}
 726
 
 
 
 
 727	pvec = &per_cpu(lru_deactivate_pvecs, cpu);
 728	if (pagevec_count(pvec))
 729		pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
 730
 731	activate_page_drain(cpu);
 732}
 733
 734/**
 735 * deactivate_page - forcefully deactivate a page
 736 * @page: page to deactivate
 737 *
 738 * This function hints the VM that @page is a good reclaim candidate,
 739 * for example if its invalidation fails due to the page being dirty
 740 * or under writeback.
 741 */
 742void deactivate_page(struct page *page)
 743{
 744	/*
 745	 * In a workload with many unevictable page such as mprotect, unevictable
 746	 * page deactivation for accelerating reclaim is pointless.
 747	 */
 748	if (PageUnevictable(page))
 749		return;
 750
 751	if (likely(get_page_unless_zero(page))) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 752		struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
 753
 
 754		if (!pagevec_add(pvec, page))
 755			pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
 756		put_cpu_var(lru_deactivate_pvecs);
 757	}
 758}
 759
 760void lru_add_drain(void)
 761{
 762	lru_add_drain_cpu(get_cpu());
 763	put_cpu();
 764}
 765
 766static void lru_add_drain_per_cpu(struct work_struct *dummy)
 767{
 768	lru_add_drain();
 769}
 770
 771static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
 772
 773void lru_add_drain_all(void)
 774{
 775	static DEFINE_MUTEX(lock);
 776	static struct cpumask has_work;
 777	int cpu;
 778
 779	mutex_lock(&lock);
 780	get_online_cpus();
 781	cpumask_clear(&has_work);
 782
 783	for_each_online_cpu(cpu) {
 784		struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
 785
 786		if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
 787		    pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
 
 788		    pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
 789		    need_activate_page_drain(cpu)) {
 790			INIT_WORK(work, lru_add_drain_per_cpu);
 791			schedule_work_on(cpu, work);
 792			cpumask_set_cpu(cpu, &has_work);
 793		}
 794	}
 795
 796	for_each_cpu(cpu, &has_work)
 797		flush_work(&per_cpu(lru_add_drain_work, cpu));
 798
 799	put_online_cpus();
 800	mutex_unlock(&lock);
 801}
 802
 803/*
 804 * Batched page_cache_release().  Decrement the reference count on all the
 805 * passed pages.  If it fell to zero then remove the page from the LRU and
 806 * free it.
 807 *
 808 * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
 809 * for the remainder of the operation.
 810 *
 811 * The locking in this function is against shrink_inactive_list(): we recheck
 812 * the page count inside the lock to see whether shrink_inactive_list()
 813 * grabbed the page via the LRU.  If it did, give up: shrink_inactive_list()
 814 * will free it.
 815 */
 816void release_pages(struct page **pages, int nr, int cold)
 817{
 818	int i;
 819	LIST_HEAD(pages_to_free);
 820	struct zone *zone = NULL;
 821	struct lruvec *lruvec;
 822	unsigned long uninitialized_var(flags);
 
 823
 824	for (i = 0; i < nr; i++) {
 825		struct page *page = pages[i];
 826
 827		if (unlikely(PageCompound(page))) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 828			if (zone) {
 829				spin_unlock_irqrestore(&zone->lru_lock, flags);
 830				zone = NULL;
 831			}
 832			put_compound_page(page);
 833			continue;
 834		}
 835
 836		if (!put_page_testzero(page))
 837			continue;
 838
 839		if (PageLRU(page)) {
 840			struct zone *pagezone = page_zone(page);
 841
 842			if (pagezone != zone) {
 843				if (zone)
 844					spin_unlock_irqrestore(&zone->lru_lock,
 845									flags);
 
 846				zone = pagezone;
 847				spin_lock_irqsave(&zone->lru_lock, flags);
 848			}
 849
 850			lruvec = mem_cgroup_page_lruvec(page, zone);
 851			VM_BUG_ON_PAGE(!PageLRU(page), page);
 852			__ClearPageLRU(page);
 853			del_page_from_lru_list(page, lruvec, page_off_lru(page));
 854		}
 855
 856		/* Clear Active bit in case of parallel mark_page_accessed */
 857		ClearPageActive(page);
 858
 859		list_add(&page->lru, &pages_to_free);
 860	}
 861	if (zone)
 862		spin_unlock_irqrestore(&zone->lru_lock, flags);
 863
 
 864	free_hot_cold_page_list(&pages_to_free, cold);
 865}
 866EXPORT_SYMBOL(release_pages);
 867
 868/*
 869 * The pages which we're about to release may be in the deferred lru-addition
 870 * queues.  That would prevent them from really being freed right now.  That's
 871 * OK from a correctness point of view but is inefficient - those pages may be
 872 * cache-warm and we want to give them back to the page allocator ASAP.
 873 *
 874 * So __pagevec_release() will drain those queues here.  __pagevec_lru_add()
 875 * and __pagevec_lru_add_active() call release_pages() directly to avoid
 876 * mutual recursion.
 877 */
 878void __pagevec_release(struct pagevec *pvec)
 879{
 880	lru_add_drain();
 881	release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
 882	pagevec_reinit(pvec);
 883}
 884EXPORT_SYMBOL(__pagevec_release);
 885
 886#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 887/* used by __split_huge_page_refcount() */
 888void lru_add_page_tail(struct page *page, struct page *page_tail,
 889		       struct lruvec *lruvec, struct list_head *list)
 890{
 891	const int file = 0;
 892
 893	VM_BUG_ON_PAGE(!PageHead(page), page);
 894	VM_BUG_ON_PAGE(PageCompound(page_tail), page);
 895	VM_BUG_ON_PAGE(PageLRU(page_tail), page);
 896	VM_BUG_ON(NR_CPUS != 1 &&
 897		  !spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
 898
 899	if (!list)
 900		SetPageLRU(page_tail);
 901
 902	if (likely(PageLRU(page)))
 903		list_add_tail(&page_tail->lru, &page->lru);
 904	else if (list) {
 905		/* page reclaim is reclaiming a huge page */
 906		get_page(page_tail);
 907		list_add_tail(&page_tail->lru, list);
 908	} else {
 909		struct list_head *list_head;
 910		/*
 911		 * Head page has not yet been counted, as an hpage,
 912		 * so we must account for each subpage individually.
 913		 *
 914		 * Use the standard add function to put page_tail on the list,
 915		 * but then correct its position so they all end up in order.
 916		 */
 917		add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail));
 918		list_head = page_tail->lru.prev;
 919		list_move_tail(&page_tail->lru, list_head);
 920	}
 921
 922	if (!PageUnevictable(page))
 923		update_page_reclaim_stat(lruvec, file, PageActive(page_tail));
 924}
 925#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 926
 927static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
 928				 void *arg)
 929{
 930	int file = page_is_file_cache(page);
 931	int active = PageActive(page);
 932	enum lru_list lru = page_lru(page);
 933
 934	VM_BUG_ON_PAGE(PageLRU(page), page);
 935
 936	SetPageLRU(page);
 937	add_page_to_lru_list(page, lruvec, lru);
 938	update_page_reclaim_stat(lruvec, file, active);
 939	trace_mm_lru_insertion(page, page_to_pfn(page), lru, trace_pagemap_flags(page));
 940}
 941
 942/*
 943 * Add the passed pages to the LRU, then drop the caller's refcount
 944 * on them.  Reinitialises the caller's pagevec.
 945 */
 946void __pagevec_lru_add(struct pagevec *pvec)
 947{
 948	pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL);
 949}
 950EXPORT_SYMBOL(__pagevec_lru_add);
 951
 952/**
 953 * pagevec_lookup_entries - gang pagecache lookup
 954 * @pvec:	Where the resulting entries are placed
 955 * @mapping:	The address_space to search
 956 * @start:	The starting entry index
 957 * @nr_entries:	The maximum number of entries
 958 * @indices:	The cache indices corresponding to the entries in @pvec
 959 *
 960 * pagevec_lookup_entries() will search for and return a group of up
 961 * to @nr_entries pages and shadow entries in the mapping.  All
 962 * entries are placed in @pvec.  pagevec_lookup_entries() takes a
 963 * reference against actual pages in @pvec.
 964 *
 965 * The search returns a group of mapping-contiguous entries with
 966 * ascending indexes.  There may be holes in the indices due to
 967 * not-present entries.
 968 *
 969 * pagevec_lookup_entries() returns the number of entries which were
 970 * found.
 971 */
 972unsigned pagevec_lookup_entries(struct pagevec *pvec,
 973				struct address_space *mapping,
 974				pgoff_t start, unsigned nr_pages,
 975				pgoff_t *indices)
 976{
 977	pvec->nr = find_get_entries(mapping, start, nr_pages,
 978				    pvec->pages, indices);
 979	return pagevec_count(pvec);
 980}
 981
 982/**
 983 * pagevec_remove_exceptionals - pagevec exceptionals pruning
 984 * @pvec:	The pagevec to prune
 985 *
 986 * pagevec_lookup_entries() fills both pages and exceptional radix
 987 * tree entries into the pagevec.  This function prunes all
 988 * exceptionals from @pvec without leaving holes, so that it can be
 989 * passed on to page-only pagevec operations.
 990 */
 991void pagevec_remove_exceptionals(struct pagevec *pvec)
 992{
 993	int i, j;
 994
 995	for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
 996		struct page *page = pvec->pages[i];
 997		if (!radix_tree_exceptional_entry(page))
 998			pvec->pages[j++] = page;
 999	}
1000	pvec->nr = j;
1001}
1002
1003/**
1004 * pagevec_lookup - gang pagecache lookup
1005 * @pvec:	Where the resulting pages are placed
1006 * @mapping:	The address_space to search
1007 * @start:	The starting page index
1008 * @nr_pages:	The maximum number of pages
1009 *
1010 * pagevec_lookup() will search for and return a group of up to @nr_pages pages
1011 * in the mapping.  The pages are placed in @pvec.  pagevec_lookup() takes a
1012 * reference against the pages in @pvec.
1013 *
1014 * The search returns a group of mapping-contiguous pages with ascending
1015 * indexes.  There may be holes in the indices due to not-present pages.
1016 *
1017 * pagevec_lookup() returns the number of pages which were found.
1018 */
1019unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
1020		pgoff_t start, unsigned nr_pages)
1021{
1022	pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
1023	return pagevec_count(pvec);
1024}
1025EXPORT_SYMBOL(pagevec_lookup);
1026
1027unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
1028		pgoff_t *index, int tag, unsigned nr_pages)
1029{
1030	pvec->nr = find_get_pages_tag(mapping, index, tag,
1031					nr_pages, pvec->pages);
1032	return pagevec_count(pvec);
1033}
1034EXPORT_SYMBOL(pagevec_lookup_tag);
1035
1036/*
1037 * Perform any setup for the swap system
1038 */
1039void __init swap_setup(void)
1040{
1041	unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
1042#ifdef CONFIG_SWAP
1043	int i;
1044
1045	if (bdi_init(swapper_spaces[0].backing_dev_info))
1046		panic("Failed to init swap bdi");
1047	for (i = 0; i < MAX_SWAPFILES; i++) {
1048		spin_lock_init(&swapper_spaces[i].tree_lock);
1049		INIT_LIST_HEAD(&swapper_spaces[i].i_mmap_nonlinear);
1050	}
1051#endif
1052
1053	/* Use a smaller cluster for small-memory machines */
1054	if (megs < 16)
1055		page_cluster = 2;
1056	else
1057		page_cluster = 3;
1058	/*
1059	 * Right now other parts of the system means that we
1060	 * _really_ don't want to cluster much more
1061	 */
1062}