Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 *  linux/mm/swap.c
  3 *
  4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  5 */
  6
  7/*
  8 * This file contains the default values for the operation of the
  9 * Linux VM subsystem. Fine-tuning documentation can be found in
 10 * Documentation/sysctl/vm.txt.
 11 * Started 18.12.91
 12 * Swap aging added 23.2.95, Stephen Tweedie.
 13 * Buffermem limits added 12.3.98, Rik van Riel.
 14 */
 15
 16#include <linux/mm.h>
 17#include <linux/sched.h>
 18#include <linux/kernel_stat.h>
 19#include <linux/swap.h>
 20#include <linux/mman.h>
 21#include <linux/pagemap.h>
 22#include <linux/pagevec.h>
 23#include <linux/init.h>
 24#include <linux/module.h>
 25#include <linux/mm_inline.h>
 26#include <linux/buffer_head.h>	/* for try_to_release_page() */
 27#include <linux/percpu_counter.h>
 
 28#include <linux/percpu.h>
 29#include <linux/cpu.h>
 30#include <linux/notifier.h>
 31#include <linux/backing-dev.h>
 32#include <linux/memcontrol.h>
 33#include <linux/gfp.h>
 
 
 
 34
 35#include "internal.h"
 36
 
 
 
 37/* How many pages do we try to swap or page in/out together? */
 38int page_cluster;
 39
 40static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
 41static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
 
 42static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
 
 
 
 
 43
 44/*
 45 * This path almost never happens for VM activity - pages are normally
 46 * freed via pagevecs.  But it gets used by networking.
 47 */
 48static void __page_cache_release(struct page *page)
 49{
 50	if (PageLRU(page)) {
 
 
 51		unsigned long flags;
 52		struct zone *zone = page_zone(page);
 53
 54		spin_lock_irqsave(&zone->lru_lock, flags);
 55		VM_BUG_ON(!PageLRU(page));
 
 56		__ClearPageLRU(page);
 57		del_page_from_lru(zone, page);
 58		spin_unlock_irqrestore(&zone->lru_lock, flags);
 59	}
 
 60}
 61
 62static void __put_single_page(struct page *page)
 63{
 64	__page_cache_release(page);
 65	free_hot_cold_page(page, 0);
 
 66}
 67
 68static void __put_compound_page(struct page *page)
 69{
 70	compound_page_dtor *dtor;
 71
 72	__page_cache_release(page);
 
 
 
 
 
 
 
 73	dtor = get_compound_page_dtor(page);
 74	(*dtor)(page);
 75}
 76
 77static void put_compound_page(struct page *page)
 78{
 79	if (unlikely(PageTail(page))) {
 80		/* __split_huge_page_refcount can run under us */
 81		struct page *page_head = page->first_page;
 82		smp_rmb();
 83		/*
 84		 * If PageTail is still set after smp_rmb() we can be sure
 85		 * that the page->first_page we read wasn't a dangling pointer.
 86		 * See __split_huge_page_refcount() smp_wmb().
 87		 */
 88		if (likely(PageTail(page) && get_page_unless_zero(page_head))) {
 89			unsigned long flags;
 90			/*
 91			 * Verify that our page_head wasn't converted
 92			 * to a a regular page before we got a
 93			 * reference on it.
 94			 */
 95			if (unlikely(!PageHead(page_head))) {
 96				/* PageHead is cleared after PageTail */
 97				smp_rmb();
 98				VM_BUG_ON(PageTail(page));
 99				goto out_put_head;
100			}
101			/*
102			 * Only run compound_lock on a valid PageHead,
103			 * after having it pinned with
104			 * get_page_unless_zero() above.
105			 */
106			smp_mb();
107			/* page_head wasn't a dangling pointer */
108			flags = compound_lock_irqsave(page_head);
109			if (unlikely(!PageTail(page))) {
110				/* __split_huge_page_refcount run before us */
111				compound_unlock_irqrestore(page_head, flags);
112				VM_BUG_ON(PageHead(page_head));
113			out_put_head:
114				if (put_page_testzero(page_head))
115					__put_single_page(page_head);
116			out_put_single:
117				if (put_page_testzero(page))
118					__put_single_page(page);
119				return;
120			}
121			VM_BUG_ON(page_head != page->first_page);
122			/*
123			 * We can release the refcount taken by
124			 * get_page_unless_zero now that
125			 * split_huge_page_refcount is blocked on the
126			 * compound_lock.
127			 */
128			if (put_page_testzero(page_head))
129				VM_BUG_ON(1);
130			/* __split_huge_page_refcount will wait now */
131			VM_BUG_ON(atomic_read(&page->_count) <= 0);
132			atomic_dec(&page->_count);
133			VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
134			compound_unlock_irqrestore(page_head, flags);
135			if (put_page_testzero(page_head)) {
136				if (PageHead(page_head))
137					__put_compound_page(page_head);
138				else
139					__put_single_page(page_head);
140			}
141		} else {
142			/* page_head is a dangling pointer */
143			VM_BUG_ON(PageTail(page));
144			goto out_put_single;
145		}
146	} else if (put_page_testzero(page)) {
147		if (PageHead(page))
148			__put_compound_page(page);
149		else
150			__put_single_page(page);
151	}
152}
153
154void put_page(struct page *page)
155{
156	if (unlikely(PageCompound(page)))
157		put_compound_page(page);
158	else if (put_page_testzero(page))
159		__put_single_page(page);
160}
161EXPORT_SYMBOL(put_page);
162
163/**
164 * put_pages_list() - release a list of pages
165 * @pages: list of pages threaded on page->lru
166 *
167 * Release a list of pages which are strung together on page.lru.  Currently
168 * used by read_cache_pages() and related error recovery code.
169 */
170void put_pages_list(struct list_head *pages)
171{
172	while (!list_empty(pages)) {
173		struct page *victim;
174
175		victim = list_entry(pages->prev, struct page, lru);
176		list_del(&victim->lru);
177		page_cache_release(victim);
178	}
179}
180EXPORT_SYMBOL(put_pages_list);
181
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
182static void pagevec_lru_move_fn(struct pagevec *pvec,
183				void (*move_fn)(struct page *page, void *arg),
184				void *arg)
185{
186	int i;
187	struct zone *zone = NULL;
 
188	unsigned long flags = 0;
189
190	for (i = 0; i < pagevec_count(pvec); i++) {
191		struct page *page = pvec->pages[i];
192		struct zone *pagezone = page_zone(page);
193
194		if (pagezone != zone) {
195			if (zone)
196				spin_unlock_irqrestore(&zone->lru_lock, flags);
197			zone = pagezone;
198			spin_lock_irqsave(&zone->lru_lock, flags);
199		}
200
201		(*move_fn)(page, arg);
 
202	}
203	if (zone)
204		spin_unlock_irqrestore(&zone->lru_lock, flags);
205	release_pages(pvec->pages, pvec->nr, pvec->cold);
206	pagevec_reinit(pvec);
207}
208
209static void pagevec_move_tail_fn(struct page *page, void *arg)
 
210{
211	int *pgmoved = arg;
212	struct zone *zone = page_zone(page);
213
214	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
215		enum lru_list lru = page_lru_base_type(page);
216		list_move_tail(&page->lru, &zone->lru[lru].list);
217		mem_cgroup_rotate_reclaimable_page(page);
218		(*pgmoved)++;
219	}
220}
221
222/*
223 * pagevec_move_tail() must be called with IRQ disabled.
224 * Otherwise this may cause nasty races.
225 */
226static void pagevec_move_tail(struct pagevec *pvec)
227{
228	int pgmoved = 0;
229
230	pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
231	__count_vm_events(PGROTATED, pgmoved);
232}
233
234/*
235 * Writeback is about to end against a page which has been marked for immediate
236 * reclaim.  If it still appears to be reclaimable, move it to the tail of the
237 * inactive list.
238 */
239void rotate_reclaimable_page(struct page *page)
240{
241	if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
242	    !PageUnevictable(page) && PageLRU(page)) {
243		struct pagevec *pvec;
244		unsigned long flags;
245
246		page_cache_get(page);
247		local_irq_save(flags);
248		pvec = &__get_cpu_var(lru_rotate_pvecs);
249		if (!pagevec_add(pvec, page))
250			pagevec_move_tail(pvec);
251		local_irq_restore(flags);
252	}
253}
254
255static void update_page_reclaim_stat(struct zone *zone, struct page *page,
256				     int file, int rotated)
257{
258	struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat;
259	struct zone_reclaim_stat *memcg_reclaim_stat;
260
261	memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
262
263	reclaim_stat->recent_scanned[file]++;
264	if (rotated)
265		reclaim_stat->recent_rotated[file]++;
266
267	if (!memcg_reclaim_stat)
268		return;
269
270	memcg_reclaim_stat->recent_scanned[file]++;
271	if (rotated)
272		memcg_reclaim_stat->recent_rotated[file]++;
273}
274
275static void __activate_page(struct page *page, void *arg)
 
276{
277	struct zone *zone = page_zone(page);
278
279	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
280		int file = page_is_file_cache(page);
281		int lru = page_lru_base_type(page);
282		del_page_from_lru_list(zone, page, lru);
283
 
284		SetPageActive(page);
285		lru += LRU_ACTIVE;
286		add_page_to_lru_list(zone, page, lru);
287		__count_vm_event(PGACTIVATE);
288
289		update_page_reclaim_stat(zone, page, file, 1);
 
290	}
291}
292
293#ifdef CONFIG_SMP
294static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
295
296static void activate_page_drain(int cpu)
297{
298	struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
299
300	if (pagevec_count(pvec))
301		pagevec_lru_move_fn(pvec, __activate_page, NULL);
302}
303
 
 
 
 
 
304void activate_page(struct page *page)
305{
 
306	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
307		struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
308
309		page_cache_get(page);
310		if (!pagevec_add(pvec, page))
311			pagevec_lru_move_fn(pvec, __activate_page, NULL);
312		put_cpu_var(activate_page_pvecs);
313	}
314}
315
316#else
317static inline void activate_page_drain(int cpu)
318{
319}
320
321void activate_page(struct page *page)
322{
323	struct zone *zone = page_zone(page);
324
325	spin_lock_irq(&zone->lru_lock);
326	__activate_page(page, NULL);
327	spin_unlock_irq(&zone->lru_lock);
 
328}
329#endif
330
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331/*
332 * Mark a page as having seen activity.
333 *
334 * inactive,unreferenced	->	inactive,referenced
335 * inactive,referenced		->	active,unreferenced
336 * active,unreferenced		->	active,referenced
 
 
 
337 */
338void mark_page_accessed(struct page *page)
339{
 
340	if (!PageActive(page) && !PageUnevictable(page) &&
341			PageReferenced(page) && PageLRU(page)) {
342		activate_page(page);
 
 
 
 
 
 
 
 
 
 
343		ClearPageReferenced(page);
 
 
344	} else if (!PageReferenced(page)) {
345		SetPageReferenced(page);
346	}
 
 
347}
348
349EXPORT_SYMBOL(mark_page_accessed);
350
351void __lru_cache_add(struct page *page, enum lru_list lru)
352{
353	struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
354
355	page_cache_get(page);
356	if (!pagevec_add(pvec, page))
357		____pagevec_lru_add(pvec, lru);
358	put_cpu_var(lru_add_pvecs);
359}
360EXPORT_SYMBOL(__lru_cache_add);
361
362/**
363 * lru_cache_add_lru - add a page to a page list
364 * @page: the page to be added to the LRU.
365 * @lru: the LRU list to which the page is added.
366 */
367void lru_cache_add_lru(struct page *page, enum lru_list lru)
368{
369	if (PageActive(page)) {
370		VM_BUG_ON(PageUnevictable(page));
371		ClearPageActive(page);
372	} else if (PageUnevictable(page)) {
373		VM_BUG_ON(PageActive(page));
374		ClearPageUnevictable(page);
375	}
376
377	VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
378	__lru_cache_add(page, lru);
 
 
 
379}
 
380
381/**
382 * add_page_to_unevictable_list - add a page to the unevictable list
383 * @page:  the page to be added to the unevictable list
384 *
385 * Add page directly to its zone's unevictable list.  To avoid races with
386 * tasks that might be making the page evictable, through eg. munlock,
387 * munmap or exit, while it's not on the lru, we want to add the page
388 * while it's locked or otherwise "invisible" to other tasks.  This is
389 * difficult to do when using the pagevec cache, so bypass that.
390 */
391void add_page_to_unevictable_list(struct page *page)
392{
393	struct zone *zone = page_zone(page);
 
 
 
394
395	spin_lock_irq(&zone->lru_lock);
396	SetPageUnevictable(page);
397	SetPageLRU(page);
398	add_page_to_lru_list(zone, page, LRU_UNEVICTABLE);
399	spin_unlock_irq(&zone->lru_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
400}
401
402/*
403 * If the page can not be invalidated, it is moved to the
404 * inactive list to speed up its reclaim.  It is moved to the
405 * head of the list, rather than the tail, to give the flusher
406 * threads some time to write it out, as this is much more
407 * effective than the single-page writeout from reclaim.
408 *
409 * If the page isn't page_mapped and dirty/writeback, the page
410 * could reclaim asap using PG_reclaim.
411 *
412 * 1. active, mapped page -> none
413 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
414 * 3. inactive, mapped page -> none
415 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
416 * 5. inactive, clean -> inactive, tail
417 * 6. Others -> none
418 *
419 * In 4, why it moves inactive's head, the VM expects the page would
420 * be write it out by flusher threads as this is much more effective
421 * than the single-page writeout from reclaim.
422 */
423static void lru_deactivate_fn(struct page *page, void *arg)
 
424{
425	int lru, file;
426	bool active;
427	struct zone *zone = page_zone(page);
428
429	if (!PageLRU(page))
430		return;
431
432	if (PageUnevictable(page))
433		return;
434
435	/* Some processes are using the page */
436	if (page_mapped(page))
437		return;
438
439	active = PageActive(page);
440
441	file = page_is_file_cache(page);
442	lru = page_lru_base_type(page);
443	del_page_from_lru_list(zone, page, lru + active);
 
444	ClearPageActive(page);
445	ClearPageReferenced(page);
446	add_page_to_lru_list(zone, page, lru);
447
448	if (PageWriteback(page) || PageDirty(page)) {
449		/*
450		 * PG_reclaim could be raced with end_page_writeback
451		 * It can make readahead confusing.  But race window
452		 * is _really_ small and  it's non-critical problem.
453		 */
 
454		SetPageReclaim(page);
455	} else {
456		/*
457		 * The page's writeback ends up during pagevec
458		 * We moves tha page into tail of inactive.
459		 */
460		list_move_tail(&page->lru, &zone->lru[lru].list);
461		mem_cgroup_rotate_reclaimable_page(page);
462		__count_vm_event(PGROTATED);
463	}
464
465	if (active)
466		__count_vm_event(PGDEACTIVATE);
467	update_page_reclaim_stat(zone, page, file, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
468}
469
470/*
471 * Drain pages out of the cpu's pagevecs.
472 * Either "cpu" is the current CPU, and preemption has already been
473 * disabled; or "cpu" is being hot-unplugged, and is already dead.
474 */
475static void drain_cpu_pagevecs(int cpu)
476{
477	struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu);
478	struct pagevec *pvec;
479	int lru;
480
481	for_each_lru(lru) {
482		pvec = &pvecs[lru - LRU_BASE];
483		if (pagevec_count(pvec))
484			____pagevec_lru_add(pvec, lru);
485	}
486
487	pvec = &per_cpu(lru_rotate_pvecs, cpu);
488	if (pagevec_count(pvec)) {
489		unsigned long flags;
490
491		/* No harm done if a racing interrupt already did this */
492		local_irq_save(flags);
493		pagevec_move_tail(pvec);
494		local_irq_restore(flags);
495	}
496
 
 
 
 
497	pvec = &per_cpu(lru_deactivate_pvecs, cpu);
498	if (pagevec_count(pvec))
499		pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
500
 
 
 
 
501	activate_page_drain(cpu);
502}
503
504/**
505 * deactivate_page - forcefully deactivate a page
506 * @page: page to deactivate
507 *
508 * This function hints the VM that @page is a good reclaim candidate,
509 * for example if its invalidation fails due to the page being dirty
510 * or under writeback.
511 */
512void deactivate_page(struct page *page)
513{
514	/*
515	 * In a workload with many unevictable page such as mprotect, unevictable
516	 * page deactivation for accelerating reclaim is pointless.
517	 */
518	if (PageUnevictable(page))
519		return;
520
521	if (likely(get_page_unless_zero(page))) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
522		struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
523
524		if (!pagevec_add(pvec, page))
 
525			pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
526		put_cpu_var(lru_deactivate_pvecs);
527	}
528}
529
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
530void lru_add_drain(void)
531{
532	drain_cpu_pagevecs(get_cpu());
533	put_cpu();
534}
535
 
 
 
 
536static void lru_add_drain_per_cpu(struct work_struct *dummy)
537{
538	lru_add_drain();
539}
540
541/*
542 * Returns 0 for success
 
 
 
 
543 */
544int lru_add_drain_all(void)
545{
546	return schedule_on_each_cpu(lru_add_drain_per_cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
547}
 
 
 
 
 
 
548
549/*
550 * Batched page_cache_release().  Decrement the reference count on all the
551 * passed pages.  If it fell to zero then remove the page from the LRU and
552 * free it.
553 *
554 * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
555 * for the remainder of the operation.
556 *
557 * The locking in this function is against shrink_inactive_list(): we recheck
558 * the page count inside the lock to see whether shrink_inactive_list()
559 * grabbed the page via the LRU.  If it did, give up: shrink_inactive_list()
560 * will free it.
561 */
562void release_pages(struct page **pages, int nr, int cold)
563{
564	int i;
565	struct pagevec pages_to_free;
566	struct zone *zone = NULL;
 
567	unsigned long uninitialized_var(flags);
 
568
569	pagevec_init(&pages_to_free, cold);
570	for (i = 0; i < nr; i++) {
571		struct page *page = pages[i];
572
573		if (unlikely(PageCompound(page))) {
574			if (zone) {
575				spin_unlock_irqrestore(&zone->lru_lock, flags);
576				zone = NULL;
577			}
578			put_compound_page(page);
 
 
 
 
 
579			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
580		}
581
 
582		if (!put_page_testzero(page))
583			continue;
584
 
 
 
 
 
 
 
 
 
585		if (PageLRU(page)) {
586			struct zone *pagezone = page_zone(page);
587
588			if (pagezone != zone) {
589				if (zone)
590					spin_unlock_irqrestore(&zone->lru_lock,
591									flags);
592				zone = pagezone;
593				spin_lock_irqsave(&zone->lru_lock, flags);
 
594			}
595			VM_BUG_ON(!PageLRU(page));
 
 
596			__ClearPageLRU(page);
597			del_page_from_lru(zone, page);
598		}
599
600		if (!pagevec_add(&pages_to_free, page)) {
601			if (zone) {
602				spin_unlock_irqrestore(&zone->lru_lock, flags);
603				zone = NULL;
604			}
605			__pagevec_free(&pages_to_free);
606			pagevec_reinit(&pages_to_free);
607  		}
608	}
609	if (zone)
610		spin_unlock_irqrestore(&zone->lru_lock, flags);
611
612	pagevec_free(&pages_to_free);
 
613}
614EXPORT_SYMBOL(release_pages);
615
616/*
617 * The pages which we're about to release may be in the deferred lru-addition
618 * queues.  That would prevent them from really being freed right now.  That's
619 * OK from a correctness point of view but is inefficient - those pages may be
620 * cache-warm and we want to give them back to the page allocator ASAP.
621 *
622 * So __pagevec_release() will drain those queues here.  __pagevec_lru_add()
623 * and __pagevec_lru_add_active() call release_pages() directly to avoid
624 * mutual recursion.
625 */
626void __pagevec_release(struct pagevec *pvec)
627{
628	lru_add_drain();
629	release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
 
 
 
630	pagevec_reinit(pvec);
631}
632
633EXPORT_SYMBOL(__pagevec_release);
634
 
635/* used by __split_huge_page_refcount() */
636void lru_add_page_tail(struct zone* zone,
637		       struct page *page, struct page *page_tail)
638{
639	int active;
640	enum lru_list lru;
641	const int file = 0;
642	struct list_head *head;
643
644	VM_BUG_ON(!PageHead(page));
645	VM_BUG_ON(PageCompound(page_tail));
646	VM_BUG_ON(PageLRU(page_tail));
647	VM_BUG_ON(!spin_is_locked(&zone->lru_lock));
648
649	SetPageLRU(page_tail);
650
651	if (page_evictable(page_tail, NULL)) {
652		if (PageActive(page)) {
653			SetPageActive(page_tail);
654			active = 1;
655			lru = LRU_ACTIVE_ANON;
656		} else {
657			active = 0;
658			lru = LRU_INACTIVE_ANON;
659		}
660		update_page_reclaim_stat(zone, page_tail, file, active);
661		if (likely(PageLRU(page)))
662			head = page->lru.prev;
663		else
664			head = &zone->lru[lru].list;
665		__add_page_to_lru_list(zone, page_tail, lru, head);
666	} else {
667		SetPageUnevictable(page_tail);
668		add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE);
 
 
 
 
 
 
 
669	}
 
 
 
670}
 
671
672static void ____pagevec_lru_add_fn(struct page *page, void *arg)
 
673{
674	enum lru_list lru = (enum lru_list)arg;
675	struct zone *zone = page_zone(page);
676	int file = is_file_lru(lru);
677	int active = is_active_lru(lru);
678
679	VM_BUG_ON(PageActive(page));
680	VM_BUG_ON(PageUnevictable(page));
681	VM_BUG_ON(PageLRU(page));
682
683	SetPageLRU(page);
684	if (active)
685		SetPageActive(page);
686	update_page_reclaim_stat(zone, page, file, active);
687	add_page_to_lru_list(zone, page, lru);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
688}
689
690/*
691 * Add the passed pages to the LRU, then drop the caller's refcount
692 * on them.  Reinitialises the caller's pagevec.
693 */
694void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
695{
696	VM_BUG_ON(is_unevictable_lru(lru));
697
698	pagevec_lru_move_fn(pvec, ____pagevec_lru_add_fn, (void *)lru);
699}
 
700
701EXPORT_SYMBOL(____pagevec_lru_add);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
702
703/*
704 * Try to drop buffers from the pages in a pagevec
 
 
 
 
 
 
705 */
706void pagevec_strip(struct pagevec *pvec)
707{
708	int i;
709
710	for (i = 0; i < pagevec_count(pvec); i++) {
711		struct page *page = pvec->pages[i];
712
713		if (page_has_private(page) && trylock_page(page)) {
714			if (page_has_private(page))
715				try_to_release_page(page, 0);
716			unlock_page(page);
717		}
718	}
 
719}
720
721/**
722 * pagevec_lookup - gang pagecache lookup
723 * @pvec:	Where the resulting pages are placed
724 * @mapping:	The address_space to search
725 * @start:	The starting page index
726 * @nr_pages:	The maximum number of pages
727 *
728 * pagevec_lookup() will search for and return a group of up to @nr_pages pages
729 * in the mapping.  The pages are placed in @pvec.  pagevec_lookup() takes a
 
730 * reference against the pages in @pvec.
731 *
732 * The search returns a group of mapping-contiguous pages with ascending
733 * indexes.  There may be holes in the indices due to not-present pages.
 
734 *
735 * pagevec_lookup() returns the number of pages which were found.
 
 
736 */
737unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
738		pgoff_t start, unsigned nr_pages)
739{
740	pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
 
741	return pagevec_count(pvec);
742}
 
743
744EXPORT_SYMBOL(pagevec_lookup);
745
746unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
747		pgoff_t *index, int tag, unsigned nr_pages)
748{
749	pvec->nr = find_get_pages_tag(mapping, index, tag,
750					nr_pages, pvec->pages);
751	return pagevec_count(pvec);
752}
 
753
754EXPORT_SYMBOL(pagevec_lookup_tag);
755
 
 
 
 
 
 
 
756/*
757 * Perform any setup for the swap system
758 */
759void __init swap_setup(void)
760{
761	unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
762
763#ifdef CONFIG_SWAP
764	bdi_init(swapper_space.backing_dev_info);
765#endif
766
767	/* Use a smaller cluster for small-memory machines */
768	if (megs < 16)
769		page_cluster = 2;
770	else
771		page_cluster = 3;
772	/*
773	 * Right now other parts of the system means that we
774	 * _really_ don't want to cluster much more
775	 */
776}
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/mm/swap.c
   4 *
   5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   6 */
   7
   8/*
   9 * This file contains the default values for the operation of the
  10 * Linux VM subsystem. Fine-tuning documentation can be found in
  11 * Documentation/admin-guide/sysctl/vm.rst.
  12 * Started 18.12.91
  13 * Swap aging added 23.2.95, Stephen Tweedie.
  14 * Buffermem limits added 12.3.98, Rik van Riel.
  15 */
  16
  17#include <linux/mm.h>
  18#include <linux/sched.h>
  19#include <linux/kernel_stat.h>
  20#include <linux/swap.h>
  21#include <linux/mman.h>
  22#include <linux/pagemap.h>
  23#include <linux/pagevec.h>
  24#include <linux/init.h>
  25#include <linux/export.h>
  26#include <linux/mm_inline.h>
 
  27#include <linux/percpu_counter.h>
  28#include <linux/memremap.h>
  29#include <linux/percpu.h>
  30#include <linux/cpu.h>
  31#include <linux/notifier.h>
  32#include <linux/backing-dev.h>
  33#include <linux/memcontrol.h>
  34#include <linux/gfp.h>
  35#include <linux/uio.h>
  36#include <linux/hugetlb.h>
  37#include <linux/page_idle.h>
  38
  39#include "internal.h"
  40
  41#define CREATE_TRACE_POINTS
  42#include <trace/events/pagemap.h>
  43
  44/* How many pages do we try to swap or page in/out together? */
  45int page_cluster;
  46
  47static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
  48static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
  49static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
  50static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
  51static DEFINE_PER_CPU(struct pagevec, lru_lazyfree_pvecs);
  52#ifdef CONFIG_SMP
  53static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
  54#endif
  55
  56/*
  57 * This path almost never happens for VM activity - pages are normally
  58 * freed via pagevecs.  But it gets used by networking.
  59 */
  60static void __page_cache_release(struct page *page)
  61{
  62	if (PageLRU(page)) {
  63		pg_data_t *pgdat = page_pgdat(page);
  64		struct lruvec *lruvec;
  65		unsigned long flags;
 
  66
  67		spin_lock_irqsave(&pgdat->lru_lock, flags);
  68		lruvec = mem_cgroup_page_lruvec(page, pgdat);
  69		VM_BUG_ON_PAGE(!PageLRU(page), page);
  70		__ClearPageLRU(page);
  71		del_page_from_lru_list(page, lruvec, page_off_lru(page));
  72		spin_unlock_irqrestore(&pgdat->lru_lock, flags);
  73	}
  74	__ClearPageWaiters(page);
  75}
  76
  77static void __put_single_page(struct page *page)
  78{
  79	__page_cache_release(page);
  80	mem_cgroup_uncharge(page);
  81	free_unref_page(page);
  82}
  83
  84static void __put_compound_page(struct page *page)
  85{
  86	compound_page_dtor *dtor;
  87
  88	/*
  89	 * __page_cache_release() is supposed to be called for thp, not for
  90	 * hugetlb. This is because hugetlb page does never have PageLRU set
  91	 * (it's never listed to any LRU lists) and no memcg routines should
  92	 * be called for hugetlb (it has a separate hugetlb_cgroup.)
  93	 */
  94	if (!PageHuge(page))
  95		__page_cache_release(page);
  96	dtor = get_compound_page_dtor(page);
  97	(*dtor)(page);
  98}
  99
 100void __put_page(struct page *page)
 101{
 102	if (is_zone_device_page(page)) {
 103		put_dev_pagemap(page->pgmap);
 104
 
 105		/*
 106		 * The page belongs to the device that created pgmap. Do
 107		 * not return it to page allocator.
 
 108		 */
 109		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 110	}
 
 111
 
 
 112	if (unlikely(PageCompound(page)))
 113		__put_compound_page(page);
 114	else
 115		__put_single_page(page);
 116}
 117EXPORT_SYMBOL(__put_page);
 118
 119/**
 120 * put_pages_list() - release a list of pages
 121 * @pages: list of pages threaded on page->lru
 122 *
 123 * Release a list of pages which are strung together on page.lru.  Currently
 124 * used by read_cache_pages() and related error recovery code.
 125 */
 126void put_pages_list(struct list_head *pages)
 127{
 128	while (!list_empty(pages)) {
 129		struct page *victim;
 130
 131		victim = lru_to_page(pages);
 132		list_del(&victim->lru);
 133		put_page(victim);
 134	}
 135}
 136EXPORT_SYMBOL(put_pages_list);
 137
 138/*
 139 * get_kernel_pages() - pin kernel pages in memory
 140 * @kiov:	An array of struct kvec structures
 141 * @nr_segs:	number of segments to pin
 142 * @write:	pinning for read/write, currently ignored
 143 * @pages:	array that receives pointers to the pages pinned.
 144 *		Should be at least nr_segs long.
 145 *
 146 * Returns number of pages pinned. This may be fewer than the number
 147 * requested. If nr_pages is 0 or negative, returns 0. If no pages
 148 * were pinned, returns -errno. Each page returned must be released
 149 * with a put_page() call when it is finished with.
 150 */
 151int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
 152		struct page **pages)
 153{
 154	int seg;
 155
 156	for (seg = 0; seg < nr_segs; seg++) {
 157		if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
 158			return seg;
 159
 160		pages[seg] = kmap_to_page(kiov[seg].iov_base);
 161		get_page(pages[seg]);
 162	}
 163
 164	return seg;
 165}
 166EXPORT_SYMBOL_GPL(get_kernel_pages);
 167
 168/*
 169 * get_kernel_page() - pin a kernel page in memory
 170 * @start:	starting kernel address
 171 * @write:	pinning for read/write, currently ignored
 172 * @pages:	array that receives pointer to the page pinned.
 173 *		Must be at least nr_segs long.
 174 *
 175 * Returns 1 if page is pinned. If the page was not pinned, returns
 176 * -errno. The page returned must be released with a put_page() call
 177 * when it is finished with.
 178 */
 179int get_kernel_page(unsigned long start, int write, struct page **pages)
 180{
 181	const struct kvec kiov = {
 182		.iov_base = (void *)start,
 183		.iov_len = PAGE_SIZE
 184	};
 185
 186	return get_kernel_pages(&kiov, 1, write, pages);
 187}
 188EXPORT_SYMBOL_GPL(get_kernel_page);
 189
 190static void pagevec_lru_move_fn(struct pagevec *pvec,
 191	void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
 192	void *arg)
 193{
 194	int i;
 195	struct pglist_data *pgdat = NULL;
 196	struct lruvec *lruvec;
 197	unsigned long flags = 0;
 198
 199	for (i = 0; i < pagevec_count(pvec); i++) {
 200		struct page *page = pvec->pages[i];
 201		struct pglist_data *pagepgdat = page_pgdat(page);
 202
 203		if (pagepgdat != pgdat) {
 204			if (pgdat)
 205				spin_unlock_irqrestore(&pgdat->lru_lock, flags);
 206			pgdat = pagepgdat;
 207			spin_lock_irqsave(&pgdat->lru_lock, flags);
 208		}
 209
 210		lruvec = mem_cgroup_page_lruvec(page, pgdat);
 211		(*move_fn)(page, lruvec, arg);
 212	}
 213	if (pgdat)
 214		spin_unlock_irqrestore(&pgdat->lru_lock, flags);
 215	release_pages(pvec->pages, pvec->nr);
 216	pagevec_reinit(pvec);
 217}
 218
 219static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
 220				 void *arg)
 221{
 222	int *pgmoved = arg;
 
 223
 224	if (PageLRU(page) && !PageUnevictable(page)) {
 225		del_page_from_lru_list(page, lruvec, page_lru(page));
 226		ClearPageActive(page);
 227		add_page_to_lru_list_tail(page, lruvec, page_lru(page));
 228		(*pgmoved)++;
 229	}
 230}
 231
 232/*
 233 * pagevec_move_tail() must be called with IRQ disabled.
 234 * Otherwise this may cause nasty races.
 235 */
 236static void pagevec_move_tail(struct pagevec *pvec)
 237{
 238	int pgmoved = 0;
 239
 240	pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
 241	__count_vm_events(PGROTATED, pgmoved);
 242}
 243
 244/*
 245 * Writeback is about to end against a page which has been marked for immediate
 246 * reclaim.  If it still appears to be reclaimable, move it to the tail of the
 247 * inactive list.
 248 */
 249void rotate_reclaimable_page(struct page *page)
 250{
 251	if (!PageLocked(page) && !PageDirty(page) &&
 252	    !PageUnevictable(page) && PageLRU(page)) {
 253		struct pagevec *pvec;
 254		unsigned long flags;
 255
 256		get_page(page);
 257		local_irq_save(flags);
 258		pvec = this_cpu_ptr(&lru_rotate_pvecs);
 259		if (!pagevec_add(pvec, page) || PageCompound(page))
 260			pagevec_move_tail(pvec);
 261		local_irq_restore(flags);
 262	}
 263}
 264
 265static void update_page_reclaim_stat(struct lruvec *lruvec,
 266				     int file, int rotated)
 267{
 268	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
 
 
 
 269
 270	reclaim_stat->recent_scanned[file]++;
 271	if (rotated)
 272		reclaim_stat->recent_rotated[file]++;
 
 
 
 
 
 
 
 273}
 274
 275static void __activate_page(struct page *page, struct lruvec *lruvec,
 276			    void *arg)
 277{
 
 
 278	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
 279		int file = page_is_file_cache(page);
 280		int lru = page_lru_base_type(page);
 
 281
 282		del_page_from_lru_list(page, lruvec, lru);
 283		SetPageActive(page);
 284		lru += LRU_ACTIVE;
 285		add_page_to_lru_list(page, lruvec, lru);
 286		trace_mm_lru_activate(page);
 287
 288		__count_vm_event(PGACTIVATE);
 289		update_page_reclaim_stat(lruvec, file, 1);
 290	}
 291}
 292
 293#ifdef CONFIG_SMP
 
 
 294static void activate_page_drain(int cpu)
 295{
 296	struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
 297
 298	if (pagevec_count(pvec))
 299		pagevec_lru_move_fn(pvec, __activate_page, NULL);
 300}
 301
 302static bool need_activate_page_drain(int cpu)
 303{
 304	return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0;
 305}
 306
 307void activate_page(struct page *page)
 308{
 309	page = compound_head(page);
 310	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
 311		struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
 312
 313		get_page(page);
 314		if (!pagevec_add(pvec, page) || PageCompound(page))
 315			pagevec_lru_move_fn(pvec, __activate_page, NULL);
 316		put_cpu_var(activate_page_pvecs);
 317	}
 318}
 319
 320#else
 321static inline void activate_page_drain(int cpu)
 322{
 323}
 324
 325void activate_page(struct page *page)
 326{
 327	pg_data_t *pgdat = page_pgdat(page);
 328
 329	page = compound_head(page);
 330	spin_lock_irq(&pgdat->lru_lock);
 331	__activate_page(page, mem_cgroup_page_lruvec(page, pgdat), NULL);
 332	spin_unlock_irq(&pgdat->lru_lock);
 333}
 334#endif
 335
 336static void __lru_cache_activate_page(struct page *page)
 337{
 338	struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
 339	int i;
 340
 341	/*
 342	 * Search backwards on the optimistic assumption that the page being
 343	 * activated has just been added to this pagevec. Note that only
 344	 * the local pagevec is examined as a !PageLRU page could be in the
 345	 * process of being released, reclaimed, migrated or on a remote
 346	 * pagevec that is currently being drained. Furthermore, marking
 347	 * a remote pagevec's page PageActive potentially hits a race where
 348	 * a page is marked PageActive just after it is added to the inactive
 349	 * list causing accounting errors and BUG_ON checks to trigger.
 350	 */
 351	for (i = pagevec_count(pvec) - 1; i >= 0; i--) {
 352		struct page *pagevec_page = pvec->pages[i];
 353
 354		if (pagevec_page == page) {
 355			SetPageActive(page);
 356			break;
 357		}
 358	}
 359
 360	put_cpu_var(lru_add_pvec);
 361}
 362
 363/*
 364 * Mark a page as having seen activity.
 365 *
 366 * inactive,unreferenced	->	inactive,referenced
 367 * inactive,referenced		->	active,unreferenced
 368 * active,unreferenced		->	active,referenced
 369 *
 370 * When a newly allocated page is not yet visible, so safe for non-atomic ops,
 371 * __SetPageReferenced(page) may be substituted for mark_page_accessed(page).
 372 */
 373void mark_page_accessed(struct page *page)
 374{
 375	page = compound_head(page);
 376	if (!PageActive(page) && !PageUnevictable(page) &&
 377			PageReferenced(page)) {
 378
 379		/*
 380		 * If the page is on the LRU, queue it for activation via
 381		 * activate_page_pvecs. Otherwise, assume the page is on a
 382		 * pagevec, mark it active and it'll be moved to the active
 383		 * LRU on the next drain.
 384		 */
 385		if (PageLRU(page))
 386			activate_page(page);
 387		else
 388			__lru_cache_activate_page(page);
 389		ClearPageReferenced(page);
 390		if (page_is_file_cache(page))
 391			workingset_activation(page);
 392	} else if (!PageReferenced(page)) {
 393		SetPageReferenced(page);
 394	}
 395	if (page_is_idle(page))
 396		clear_page_idle(page);
 397}
 
 398EXPORT_SYMBOL(mark_page_accessed);
 399
 400static void __lru_cache_add(struct page *page)
 401{
 402	struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
 403
 404	get_page(page);
 405	if (!pagevec_add(pvec, page) || PageCompound(page))
 406		__pagevec_lru_add(pvec);
 407	put_cpu_var(lru_add_pvec);
 408}
 
 409
 410/**
 411 * lru_cache_add_anon - add a page to the page lists
 412 * @page: the page to add
 
 413 */
 414void lru_cache_add_anon(struct page *page)
 415{
 416	if (PageActive(page))
 
 417		ClearPageActive(page);
 418	__lru_cache_add(page);
 419}
 
 
 420
 421void lru_cache_add_file(struct page *page)
 422{
 423	if (PageActive(page))
 424		ClearPageActive(page);
 425	__lru_cache_add(page);
 426}
 427EXPORT_SYMBOL(lru_cache_add_file);
 428
 429/**
 430 * lru_cache_add - add a page to a page list
 431 * @page: the page to be added to the LRU.
 432 *
 433 * Queue the page for addition to the LRU via pagevec. The decision on whether
 434 * to add the page to the [in]active [file|anon] list is deferred until the
 435 * pagevec is drained. This gives a chance for the caller of lru_cache_add()
 436 * have the page added to the active list using mark_page_accessed().
 
 437 */
 438void lru_cache_add(struct page *page)
 439{
 440	VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
 441	VM_BUG_ON_PAGE(PageLRU(page), page);
 442	__lru_cache_add(page);
 443}
 444
 445/**
 446 * lru_cache_add_active_or_unevictable
 447 * @page:  the page to be added to LRU
 448 * @vma:   vma in which page is mapped for determining reclaimability
 449 *
 450 * Place @page on the active or unevictable LRU list, depending on its
 451 * evictability.  Note that if the page is not evictable, it goes
 452 * directly back onto it's zone's unevictable list, it does NOT use a
 453 * per cpu pagevec.
 454 */
 455void lru_cache_add_active_or_unevictable(struct page *page,
 456					 struct vm_area_struct *vma)
 457{
 458	VM_BUG_ON_PAGE(PageLRU(page), page);
 459
 460	if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
 461		SetPageActive(page);
 462	else if (!TestSetPageMlocked(page)) {
 463		/*
 464		 * We use the irq-unsafe __mod_zone_page_stat because this
 465		 * counter is not modified from interrupt context, and the pte
 466		 * lock is held(spinlock), which implies preemption disabled.
 467		 */
 468		__mod_zone_page_state(page_zone(page), NR_MLOCK,
 469				    hpage_nr_pages(page));
 470		count_vm_event(UNEVICTABLE_PGMLOCKED);
 471	}
 472	lru_cache_add(page);
 473}
 474
 475/*
 476 * If the page can not be invalidated, it is moved to the
 477 * inactive list to speed up its reclaim.  It is moved to the
 478 * head of the list, rather than the tail, to give the flusher
 479 * threads some time to write it out, as this is much more
 480 * effective than the single-page writeout from reclaim.
 481 *
 482 * If the page isn't page_mapped and dirty/writeback, the page
 483 * could reclaim asap using PG_reclaim.
 484 *
 485 * 1. active, mapped page -> none
 486 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
 487 * 3. inactive, mapped page -> none
 488 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
 489 * 5. inactive, clean -> inactive, tail
 490 * 6. Others -> none
 491 *
 492 * In 4, why it moves inactive's head, the VM expects the page would
 493 * be write it out by flusher threads as this is much more effective
 494 * than the single-page writeout from reclaim.
 495 */
 496static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
 497			      void *arg)
 498{
 499	int lru, file;
 500	bool active;
 
 501
 502	if (!PageLRU(page))
 503		return;
 504
 505	if (PageUnevictable(page))
 506		return;
 507
 508	/* Some processes are using the page */
 509	if (page_mapped(page))
 510		return;
 511
 512	active = PageActive(page);
 
 513	file = page_is_file_cache(page);
 514	lru = page_lru_base_type(page);
 515
 516	del_page_from_lru_list(page, lruvec, lru + active);
 517	ClearPageActive(page);
 518	ClearPageReferenced(page);
 
 519
 520	if (PageWriteback(page) || PageDirty(page)) {
 521		/*
 522		 * PG_reclaim could be raced with end_page_writeback
 523		 * It can make readahead confusing.  But race window
 524		 * is _really_ small and  it's non-critical problem.
 525		 */
 526		add_page_to_lru_list(page, lruvec, lru);
 527		SetPageReclaim(page);
 528	} else {
 529		/*
 530		 * The page's writeback ends up during pagevec
 531		 * We moves tha page into tail of inactive.
 532		 */
 533		add_page_to_lru_list_tail(page, lruvec, lru);
 
 534		__count_vm_event(PGROTATED);
 535	}
 536
 537	if (active)
 538		__count_vm_event(PGDEACTIVATE);
 539	update_page_reclaim_stat(lruvec, file, 0);
 540}
 541
 542static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
 543			    void *arg)
 544{
 545	if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
 546		int file = page_is_file_cache(page);
 547		int lru = page_lru_base_type(page);
 548
 549		del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
 550		ClearPageActive(page);
 551		ClearPageReferenced(page);
 552		add_page_to_lru_list(page, lruvec, lru);
 553
 554		__count_vm_events(PGDEACTIVATE, hpage_nr_pages(page));
 555		update_page_reclaim_stat(lruvec, file, 0);
 556	}
 557}
 558
 559static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
 560			    void *arg)
 561{
 562	if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
 563	    !PageSwapCache(page) && !PageUnevictable(page)) {
 564		bool active = PageActive(page);
 565
 566		del_page_from_lru_list(page, lruvec,
 567				       LRU_INACTIVE_ANON + active);
 568		ClearPageActive(page);
 569		ClearPageReferenced(page);
 570		/*
 571		 * lazyfree pages are clean anonymous pages. They have
 572		 * SwapBacked flag cleared to distinguish normal anonymous
 573		 * pages
 574		 */
 575		ClearPageSwapBacked(page);
 576		add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
 577
 578		__count_vm_events(PGLAZYFREE, hpage_nr_pages(page));
 579		count_memcg_page_event(page, PGLAZYFREE);
 580		update_page_reclaim_stat(lruvec, 1, 0);
 581	}
 582}
 583
 584/*
 585 * Drain pages out of the cpu's pagevecs.
 586 * Either "cpu" is the current CPU, and preemption has already been
 587 * disabled; or "cpu" is being hot-unplugged, and is already dead.
 588 */
 589void lru_add_drain_cpu(int cpu)
 590{
 591	struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu);
 592
 593	if (pagevec_count(pvec))
 594		__pagevec_lru_add(pvec);
 
 
 
 
 
 595
 596	pvec = &per_cpu(lru_rotate_pvecs, cpu);
 597	if (pagevec_count(pvec)) {
 598		unsigned long flags;
 599
 600		/* No harm done if a racing interrupt already did this */
 601		local_irq_save(flags);
 602		pagevec_move_tail(pvec);
 603		local_irq_restore(flags);
 604	}
 605
 606	pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
 607	if (pagevec_count(pvec))
 608		pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
 609
 610	pvec = &per_cpu(lru_deactivate_pvecs, cpu);
 611	if (pagevec_count(pvec))
 612		pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
 613
 614	pvec = &per_cpu(lru_lazyfree_pvecs, cpu);
 615	if (pagevec_count(pvec))
 616		pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
 617
 618	activate_page_drain(cpu);
 619}
 620
 621/**
 622 * deactivate_file_page - forcefully deactivate a file page
 623 * @page: page to deactivate
 624 *
 625 * This function hints the VM that @page is a good reclaim candidate,
 626 * for example if its invalidation fails due to the page being dirty
 627 * or under writeback.
 628 */
 629void deactivate_file_page(struct page *page)
 630{
 631	/*
 632	 * In a workload with many unevictable page such as mprotect,
 633	 * unevictable page deactivation for accelerating reclaim is pointless.
 634	 */
 635	if (PageUnevictable(page))
 636		return;
 637
 638	if (likely(get_page_unless_zero(page))) {
 639		struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
 640
 641		if (!pagevec_add(pvec, page) || PageCompound(page))
 642			pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
 643		put_cpu_var(lru_deactivate_file_pvecs);
 644	}
 645}
 646
 647/*
 648 * deactivate_page - deactivate a page
 649 * @page: page to deactivate
 650 *
 651 * deactivate_page() moves @page to the inactive list if @page was on the active
 652 * list and was not an unevictable page.  This is done to accelerate the reclaim
 653 * of @page.
 654 */
 655void deactivate_page(struct page *page)
 656{
 657	if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
 658		struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
 659
 660		get_page(page);
 661		if (!pagevec_add(pvec, page) || PageCompound(page))
 662			pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
 663		put_cpu_var(lru_deactivate_pvecs);
 664	}
 665}
 666
 667/**
 668 * mark_page_lazyfree - make an anon page lazyfree
 669 * @page: page to deactivate
 670 *
 671 * mark_page_lazyfree() moves @page to the inactive file list.
 672 * This is done to accelerate the reclaim of @page.
 673 */
 674void mark_page_lazyfree(struct page *page)
 675{
 676	if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
 677	    !PageSwapCache(page) && !PageUnevictable(page)) {
 678		struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs);
 679
 680		get_page(page);
 681		if (!pagevec_add(pvec, page) || PageCompound(page))
 682			pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
 683		put_cpu_var(lru_lazyfree_pvecs);
 684	}
 685}
 686
 687void lru_add_drain(void)
 688{
 689	lru_add_drain_cpu(get_cpu());
 690	put_cpu();
 691}
 692
 693#ifdef CONFIG_SMP
 694
 695static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
 696
 697static void lru_add_drain_per_cpu(struct work_struct *dummy)
 698{
 699	lru_add_drain();
 700}
 701
 702/*
 703 * Doesn't need any cpu hotplug locking because we do rely on per-cpu
 704 * kworkers being shut down before our page_alloc_cpu_dead callback is
 705 * executed on the offlined cpu.
 706 * Calling this function with cpu hotplug locks held can actually lead
 707 * to obscure indirect dependencies via WQ context.
 708 */
 709void lru_add_drain_all(void)
 710{
 711	static DEFINE_MUTEX(lock);
 712	static struct cpumask has_work;
 713	int cpu;
 714
 715	/*
 716	 * Make sure nobody triggers this path before mm_percpu_wq is fully
 717	 * initialized.
 718	 */
 719	if (WARN_ON(!mm_percpu_wq))
 720		return;
 721
 722	mutex_lock(&lock);
 723	cpumask_clear(&has_work);
 724
 725	for_each_online_cpu(cpu) {
 726		struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
 727
 728		if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
 729		    pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
 730		    pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
 731		    pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
 732		    pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) ||
 733		    need_activate_page_drain(cpu)) {
 734			INIT_WORK(work, lru_add_drain_per_cpu);
 735			queue_work_on(cpu, mm_percpu_wq, work);
 736			cpumask_set_cpu(cpu, &has_work);
 737		}
 738	}
 739
 740	for_each_cpu(cpu, &has_work)
 741		flush_work(&per_cpu(lru_add_drain_work, cpu));
 742
 743	mutex_unlock(&lock);
 744}
 745#else
 746void lru_add_drain_all(void)
 747{
 748	lru_add_drain();
 749}
 750#endif
 751
 752/**
 753 * release_pages - batched put_page()
 754 * @pages: array of pages to release
 755 * @nr: number of pages
 
 
 
 756 *
 757 * Decrement the reference count on all the pages in @pages.  If it
 758 * fell to zero, remove the page from the LRU and free it.
 
 
 759 */
 760void release_pages(struct page **pages, int nr)
 761{
 762	int i;
 763	LIST_HEAD(pages_to_free);
 764	struct pglist_data *locked_pgdat = NULL;
 765	struct lruvec *lruvec;
 766	unsigned long uninitialized_var(flags);
 767	unsigned int uninitialized_var(lock_batch);
 768
 
 769	for (i = 0; i < nr; i++) {
 770		struct page *page = pages[i];
 771
 772		/*
 773		 * Make sure the IRQ-safe lock-holding time does not get
 774		 * excessive with a continuous string of pages from the
 775		 * same pgdat. The lock is held only if pgdat != NULL.
 776		 */
 777		if (locked_pgdat && ++lock_batch == SWAP_CLUSTER_MAX) {
 778			spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
 779			locked_pgdat = NULL;
 780		}
 781
 782		if (is_huge_zero_page(page))
 783			continue;
 784
 785		if (is_zone_device_page(page)) {
 786			if (locked_pgdat) {
 787				spin_unlock_irqrestore(&locked_pgdat->lru_lock,
 788						       flags);
 789				locked_pgdat = NULL;
 790			}
 791			/*
 792			 * ZONE_DEVICE pages that return 'false' from
 793			 * put_devmap_managed_page() do not require special
 794			 * processing, and instead, expect a call to
 795			 * put_page_testzero().
 796			 */
 797			if (put_devmap_managed_page(page))
 798				continue;
 799		}
 800
 801		page = compound_head(page);
 802		if (!put_page_testzero(page))
 803			continue;
 804
 805		if (PageCompound(page)) {
 806			if (locked_pgdat) {
 807				spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
 808				locked_pgdat = NULL;
 809			}
 810			__put_compound_page(page);
 811			continue;
 812		}
 813
 814		if (PageLRU(page)) {
 815			struct pglist_data *pgdat = page_pgdat(page);
 816
 817			if (pgdat != locked_pgdat) {
 818				if (locked_pgdat)
 819					spin_unlock_irqrestore(&locked_pgdat->lru_lock,
 820									flags);
 821				lock_batch = 0;
 822				locked_pgdat = pgdat;
 823				spin_lock_irqsave(&locked_pgdat->lru_lock, flags);
 824			}
 825
 826			lruvec = mem_cgroup_page_lruvec(page, locked_pgdat);
 827			VM_BUG_ON_PAGE(!PageLRU(page), page);
 828			__ClearPageLRU(page);
 829			del_page_from_lru_list(page, lruvec, page_off_lru(page));
 830		}
 831
 832		/* Clear Active bit in case of parallel mark_page_accessed */
 833		__ClearPageActive(page);
 834		__ClearPageWaiters(page);
 835
 836		list_add(&page->lru, &pages_to_free);
 
 
 
 837	}
 838	if (locked_pgdat)
 839		spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
 840
 841	mem_cgroup_uncharge_list(&pages_to_free);
 842	free_unref_page_list(&pages_to_free);
 843}
 844EXPORT_SYMBOL(release_pages);
 845
 846/*
 847 * The pages which we're about to release may be in the deferred lru-addition
 848 * queues.  That would prevent them from really being freed right now.  That's
 849 * OK from a correctness point of view but is inefficient - those pages may be
 850 * cache-warm and we want to give them back to the page allocator ASAP.
 851 *
 852 * So __pagevec_release() will drain those queues here.  __pagevec_lru_add()
 853 * and __pagevec_lru_add_active() call release_pages() directly to avoid
 854 * mutual recursion.
 855 */
 856void __pagevec_release(struct pagevec *pvec)
 857{
 858	if (!pvec->percpu_pvec_drained) {
 859		lru_add_drain();
 860		pvec->percpu_pvec_drained = true;
 861	}
 862	release_pages(pvec->pages, pagevec_count(pvec));
 863	pagevec_reinit(pvec);
 864}
 
 865EXPORT_SYMBOL(__pagevec_release);
 866
 867#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 868/* used by __split_huge_page_refcount() */
 869void lru_add_page_tail(struct page *page, struct page *page_tail,
 870		       struct lruvec *lruvec, struct list_head *list)
 871{
 
 
 872	const int file = 0;
 
 873
 874	VM_BUG_ON_PAGE(!PageHead(page), page);
 875	VM_BUG_ON_PAGE(PageCompound(page_tail), page);
 876	VM_BUG_ON_PAGE(PageLRU(page_tail), page);
 877	lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock);
 878
 879	if (!list)
 880		SetPageLRU(page_tail);
 881
 882	if (likely(PageLRU(page)))
 883		list_add_tail(&page_tail->lru, &page->lru);
 884	else if (list) {
 885		/* page reclaim is reclaiming a huge page */
 886		get_page(page_tail);
 887		list_add_tail(&page_tail->lru, list);
 
 
 
 
 
 
 
 
 888	} else {
 889		/*
 890		 * Head page has not yet been counted, as an hpage,
 891		 * so we must account for each subpage individually.
 892		 *
 893		 * Put page_tail on the list at the correct position
 894		 * so they all end up in order.
 895		 */
 896		add_page_to_lru_list_tail(page_tail, lruvec,
 897					  page_lru(page_tail));
 898	}
 899
 900	if (!PageUnevictable(page))
 901		update_page_reclaim_stat(lruvec, file, PageActive(page_tail));
 902}
 903#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 904
 905static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
 906				 void *arg)
 907{
 908	enum lru_list lru;
 909	int was_unevictable = TestClearPageUnevictable(page);
 
 
 910
 911	VM_BUG_ON_PAGE(PageLRU(page), page);
 
 
 912
 913	SetPageLRU(page);
 914	/*
 915	 * Page becomes evictable in two ways:
 916	 * 1) Within LRU lock [munlock_vma_page() and __munlock_pagevec()].
 917	 * 2) Before acquiring LRU lock to put the page to correct LRU and then
 918	 *   a) do PageLRU check with lock [check_move_unevictable_pages]
 919	 *   b) do PageLRU check before lock [clear_page_mlock]
 920	 *
 921	 * (1) & (2a) are ok as LRU lock will serialize them. For (2b), we need
 922	 * following strict ordering:
 923	 *
 924	 * #0: __pagevec_lru_add_fn		#1: clear_page_mlock
 925	 *
 926	 * SetPageLRU()				TestClearPageMlocked()
 927	 * smp_mb() // explicit ordering	// above provides strict
 928	 *					// ordering
 929	 * PageMlocked()			PageLRU()
 930	 *
 931	 *
 932	 * if '#1' does not observe setting of PG_lru by '#0' and fails
 933	 * isolation, the explicit barrier will make sure that page_evictable
 934	 * check will put the page in correct LRU. Without smp_mb(), SetPageLRU
 935	 * can be reordered after PageMlocked check and can make '#1' to fail
 936	 * the isolation of the page whose Mlocked bit is cleared (#0 is also
 937	 * looking at the same page) and the evictable page will be stranded
 938	 * in an unevictable LRU.
 939	 */
 940	smp_mb();
 941
 942	if (page_evictable(page)) {
 943		lru = page_lru(page);
 944		update_page_reclaim_stat(lruvec, page_is_file_cache(page),
 945					 PageActive(page));
 946		if (was_unevictable)
 947			count_vm_event(UNEVICTABLE_PGRESCUED);
 948	} else {
 949		lru = LRU_UNEVICTABLE;
 950		ClearPageActive(page);
 951		SetPageUnevictable(page);
 952		if (!was_unevictable)
 953			count_vm_event(UNEVICTABLE_PGCULLED);
 954	}
 955
 956	add_page_to_lru_list(page, lruvec, lru);
 957	trace_mm_lru_insertion(page, lru);
 958}
 959
 960/*
 961 * Add the passed pages to the LRU, then drop the caller's refcount
 962 * on them.  Reinitialises the caller's pagevec.
 963 */
 964void __pagevec_lru_add(struct pagevec *pvec)
 965{
 966	pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL);
 
 
 967}
 968EXPORT_SYMBOL(__pagevec_lru_add);
 969
 970/**
 971 * pagevec_lookup_entries - gang pagecache lookup
 972 * @pvec:	Where the resulting entries are placed
 973 * @mapping:	The address_space to search
 974 * @start:	The starting entry index
 975 * @nr_entries:	The maximum number of pages
 976 * @indices:	The cache indices corresponding to the entries in @pvec
 977 *
 978 * pagevec_lookup_entries() will search for and return a group of up
 979 * to @nr_pages pages and shadow entries in the mapping.  All
 980 * entries are placed in @pvec.  pagevec_lookup_entries() takes a
 981 * reference against actual pages in @pvec.
 982 *
 983 * The search returns a group of mapping-contiguous entries with
 984 * ascending indexes.  There may be holes in the indices due to
 985 * not-present entries.
 986 *
 987 * pagevec_lookup_entries() returns the number of entries which were
 988 * found.
 989 */
 990unsigned pagevec_lookup_entries(struct pagevec *pvec,
 991				struct address_space *mapping,
 992				pgoff_t start, unsigned nr_entries,
 993				pgoff_t *indices)
 994{
 995	pvec->nr = find_get_entries(mapping, start, nr_entries,
 996				    pvec->pages, indices);
 997	return pagevec_count(pvec);
 998}
 999
1000/**
1001 * pagevec_remove_exceptionals - pagevec exceptionals pruning
1002 * @pvec:	The pagevec to prune
1003 *
1004 * pagevec_lookup_entries() fills both pages and exceptional radix
1005 * tree entries into the pagevec.  This function prunes all
1006 * exceptionals from @pvec without leaving holes, so that it can be
1007 * passed on to page-only pagevec operations.
1008 */
1009void pagevec_remove_exceptionals(struct pagevec *pvec)
1010{
1011	int i, j;
1012
1013	for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
1014		struct page *page = pvec->pages[i];
1015		if (!xa_is_value(page))
1016			pvec->pages[j++] = page;
 
 
 
 
1017	}
1018	pvec->nr = j;
1019}
1020
1021/**
1022 * pagevec_lookup_range - gang pagecache lookup
1023 * @pvec:	Where the resulting pages are placed
1024 * @mapping:	The address_space to search
1025 * @start:	The starting page index
1026 * @end:	The final page index
1027 *
1028 * pagevec_lookup_range() will search for & return a group of up to PAGEVEC_SIZE
1029 * pages in the mapping starting from index @start and upto index @end
1030 * (inclusive).  The pages are placed in @pvec.  pagevec_lookup() takes a
1031 * reference against the pages in @pvec.
1032 *
1033 * The search returns a group of mapping-contiguous pages with ascending
1034 * indexes.  There may be holes in the indices due to not-present pages. We
1035 * also update @start to index the next page for the traversal.
1036 *
1037 * pagevec_lookup_range() returns the number of pages which were found. If this
1038 * number is smaller than PAGEVEC_SIZE, the end of specified range has been
1039 * reached.
1040 */
1041unsigned pagevec_lookup_range(struct pagevec *pvec,
1042		struct address_space *mapping, pgoff_t *start, pgoff_t end)
1043{
1044	pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE,
1045					pvec->pages);
1046	return pagevec_count(pvec);
1047}
1048EXPORT_SYMBOL(pagevec_lookup_range);
1049
1050unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
1051		struct address_space *mapping, pgoff_t *index, pgoff_t end,
1052		xa_mark_t tag)
 
1053{
1054	pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
1055					PAGEVEC_SIZE, pvec->pages);
1056	return pagevec_count(pvec);
1057}
1058EXPORT_SYMBOL(pagevec_lookup_range_tag);
1059
1060unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec,
1061		struct address_space *mapping, pgoff_t *index, pgoff_t end,
1062		xa_mark_t tag, unsigned max_pages)
1063{
1064	pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
1065		min_t(unsigned int, max_pages, PAGEVEC_SIZE), pvec->pages);
1066	return pagevec_count(pvec);
1067}
1068EXPORT_SYMBOL(pagevec_lookup_range_nr_tag);
1069/*
1070 * Perform any setup for the swap system
1071 */
1072void __init swap_setup(void)
1073{
1074	unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT);
 
 
 
 
1075
1076	/* Use a smaller cluster for small-memory machines */
1077	if (megs < 16)
1078		page_cluster = 2;
1079	else
1080		page_cluster = 3;
1081	/*
1082	 * Right now other parts of the system means that we
1083	 * _really_ don't want to cluster much more
1084	 */
1085}