Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v4.17
 
   1/*
   2 *  linux/mm/swap.c
   3 *
   4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5 */
   6
   7/*
   8 * This file contains the default values for the operation of the
   9 * Linux VM subsystem. Fine-tuning documentation can be found in
  10 * Documentation/sysctl/vm.txt.
  11 * Started 18.12.91
  12 * Swap aging added 23.2.95, Stephen Tweedie.
  13 * Buffermem limits added 12.3.98, Rik van Riel.
  14 */
  15
  16#include <linux/mm.h>
  17#include <linux/sched.h>
  18#include <linux/kernel_stat.h>
  19#include <linux/swap.h>
  20#include <linux/mman.h>
  21#include <linux/pagemap.h>
  22#include <linux/pagevec.h>
  23#include <linux/init.h>
  24#include <linux/export.h>
  25#include <linux/mm_inline.h>
  26#include <linux/percpu_counter.h>
  27#include <linux/memremap.h>
  28#include <linux/percpu.h>
  29#include <linux/cpu.h>
  30#include <linux/notifier.h>
  31#include <linux/backing-dev.h>
  32#include <linux/memcontrol.h>
  33#include <linux/gfp.h>
  34#include <linux/uio.h>
  35#include <linux/hugetlb.h>
  36#include <linux/page_idle.h>
 
 
  37
  38#include "internal.h"
  39
  40#define CREATE_TRACE_POINTS
  41#include <trace/events/pagemap.h>
  42
  43/* How many pages do we try to swap or page in/out together? */
  44int page_cluster;
 
  45
  46static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
  47static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
  48static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
  49static DEFINE_PER_CPU(struct pagevec, lru_lazyfree_pvecs);
 
 
 
 
 
 
  50#ifdef CONFIG_SMP
  51static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
  52#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  53
  54/*
  55 * This path almost never happens for VM activity - pages are normally
  56 * freed via pagevecs.  But it gets used by networking.
  57 */
  58static void __page_cache_release(struct page *page)
  59{
  60	if (PageLRU(page)) {
  61		struct zone *zone = page_zone(page);
  62		struct lruvec *lruvec;
  63		unsigned long flags;
  64
  65		spin_lock_irqsave(zone_lru_lock(zone), flags);
  66		lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
  67		VM_BUG_ON_PAGE(!PageLRU(page), page);
  68		__ClearPageLRU(page);
  69		del_page_from_lru_list(page, lruvec, page_off_lru(page));
  70		spin_unlock_irqrestore(zone_lru_lock(zone), flags);
  71	}
  72	__ClearPageWaiters(page);
  73	mem_cgroup_uncharge(page);
  74}
  75
  76static void __put_single_page(struct page *page)
  77{
  78	__page_cache_release(page);
  79	free_unref_page(page);
  80}
 
  81
  82static void __put_compound_page(struct page *page)
  83{
  84	compound_page_dtor *dtor;
 
  85
  86	/*
  87	 * __page_cache_release() is supposed to be called for thp, not for
  88	 * hugetlb. This is because hugetlb page does never have PageLRU set
  89	 * (it's never listed to any LRU lists) and no memcg routines should
  90	 * be called for hugetlb (it has a separate hugetlb_cgroup.)
  91	 */
  92	if (!PageHuge(page))
  93		__page_cache_release(page);
  94	dtor = get_compound_page_dtor(page);
  95	(*dtor)(page);
  96}
 
 
 
  97
  98void __put_page(struct page *page)
  99{
 100	if (is_zone_device_page(page)) {
 101		put_dev_pagemap(page->pgmap);
 102
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 103		/*
 104		 * The page belongs to the device that created pgmap. Do
 105		 * not return it to page allocator.
 
 
 
 106		 */
 107		return;
 
 
 108	}
 109
 110	if (unlikely(PageCompound(page)))
 111		__put_compound_page(page);
 112	else
 113		__put_single_page(page);
 114}
 115EXPORT_SYMBOL(__put_page);
 116
 117/**
 118 * put_pages_list() - release a list of pages
 119 * @pages: list of pages threaded on page->lru
 120 *
 121 * Release a list of pages which are strung together on page.lru.  Currently
 122 * used by read_cache_pages() and related error recovery code.
 123 */
 124void put_pages_list(struct list_head *pages)
 125{
 126	while (!list_empty(pages)) {
 127		struct page *victim;
 
 
 
 
 
 
 
 128
 129		victim = list_entry(pages->prev, struct page, lru);
 130		list_del(&victim->lru);
 131		put_page(victim);
 132	}
 
 
 
 
 133}
 134EXPORT_SYMBOL(put_pages_list);
 135
 136/*
 137 * get_kernel_pages() - pin kernel pages in memory
 138 * @kiov:	An array of struct kvec structures
 139 * @nr_segs:	number of segments to pin
 140 * @write:	pinning for read/write, currently ignored
 141 * @pages:	array that receives pointers to the pages pinned.
 142 *		Should be at least nr_segs long.
 143 *
 144 * Returns number of pages pinned. This may be fewer than the number
 145 * requested. If nr_pages is 0 or negative, returns 0. If no pages
 146 * were pinned, returns -errno. Each page returned must be released
 147 * with a put_page() call when it is finished with.
 148 */
 149int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
 150		struct page **pages)
 151{
 152	int seg;
 153
 154	for (seg = 0; seg < nr_segs; seg++) {
 155		if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
 156			return seg;
 157
 158		pages[seg] = kmap_to_page(kiov[seg].iov_base);
 159		get_page(pages[seg]);
 160	}
 161
 162	return seg;
 163}
 164EXPORT_SYMBOL_GPL(get_kernel_pages);
 
 165
 166/*
 167 * get_kernel_page() - pin a kernel page in memory
 168 * @start:	starting kernel address
 169 * @write:	pinning for read/write, currently ignored
 170 * @pages:	array that receives pointer to the page pinned.
 171 *		Must be at least nr_segs long.
 172 *
 173 * Returns 1 if page is pinned. If the page was not pinned, returns
 174 * -errno. The page returned must be released with a put_page() call
 175 * when it is finished with.
 176 */
 177int get_kernel_page(unsigned long start, int write, struct page **pages)
 178{
 179	const struct kvec kiov = {
 180		.iov_base = (void *)start,
 181		.iov_len = PAGE_SIZE
 182	};
 183
 184	return get_kernel_pages(&kiov, 1, write, pages);
 
 
 
 185}
 186EXPORT_SYMBOL_GPL(get_kernel_page);
 187
 188static void pagevec_lru_move_fn(struct pagevec *pvec,
 189	void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
 190	void *arg)
 191{
 192	int i;
 193	struct pglist_data *pgdat = NULL;
 194	struct lruvec *lruvec;
 195	unsigned long flags = 0;
 196
 197	for (i = 0; i < pagevec_count(pvec); i++) {
 198		struct page *page = pvec->pages[i];
 199		struct pglist_data *pagepgdat = page_pgdat(page);
 200
 201		if (pagepgdat != pgdat) {
 202			if (pgdat)
 203				spin_unlock_irqrestore(&pgdat->lru_lock, flags);
 204			pgdat = pagepgdat;
 205			spin_lock_irqsave(&pgdat->lru_lock, flags);
 206		}
 207
 208		lruvec = mem_cgroup_page_lruvec(page, pgdat);
 209		(*move_fn)(page, lruvec, arg);
 210	}
 211	if (pgdat)
 212		spin_unlock_irqrestore(&pgdat->lru_lock, flags);
 213	release_pages(pvec->pages, pvec->nr);
 214	pagevec_reinit(pvec);
 215}
 216
 217static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
 218				 void *arg)
 219{
 220	int *pgmoved = arg;
 221
 222	if (PageLRU(page) && !PageUnevictable(page)) {
 223		del_page_from_lru_list(page, lruvec, page_lru(page));
 224		ClearPageActive(page);
 225		add_page_to_lru_list_tail(page, lruvec, page_lru(page));
 226		(*pgmoved)++;
 227	}
 228}
 229
 230/*
 231 * pagevec_move_tail() must be called with IRQ disabled.
 232 * Otherwise this may cause nasty races.
 
 
 
 233 */
 234static void pagevec_move_tail(struct pagevec *pvec)
 235{
 236	int pgmoved = 0;
 
 
 237
 238	pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
 239	__count_vm_events(PGROTATED, pgmoved);
 240}
 241
 242/*
 243 * Writeback is about to end against a page which has been marked for immediate
 244 * reclaim.  If it still appears to be reclaimable, move it to the tail of the
 245 * inactive list.
 246 */
 247void rotate_reclaimable_page(struct page *page)
 248{
 249	if (!PageLocked(page) && !PageDirty(page) &&
 250	    !PageUnevictable(page) && PageLRU(page)) {
 251		struct pagevec *pvec;
 252		unsigned long flags;
 253
 254		get_page(page);
 255		local_irq_save(flags);
 256		pvec = this_cpu_ptr(&lru_rotate_pvecs);
 257		if (!pagevec_add(pvec, page) || PageCompound(page))
 258			pagevec_move_tail(pvec);
 259		local_irq_restore(flags);
 260	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 261}
 262
 263static void update_page_reclaim_stat(struct lruvec *lruvec,
 264				     int file, int rotated)
 265{
 266	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
 267
 268	reclaim_stat->recent_scanned[file]++;
 269	if (rotated)
 270		reclaim_stat->recent_rotated[file]++;
 271}
 272
 273static void __activate_page(struct page *page, struct lruvec *lruvec,
 274			    void *arg)
 275{
 276	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
 277		int file = page_is_file_cache(page);
 278		int lru = page_lru_base_type(page);
 279
 280		del_page_from_lru_list(page, lruvec, lru);
 281		SetPageActive(page);
 282		lru += LRU_ACTIVE;
 283		add_page_to_lru_list(page, lruvec, lru);
 284		trace_mm_lru_activate(page);
 285
 286		__count_vm_event(PGACTIVATE);
 287		update_page_reclaim_stat(lruvec, file, 1);
 288	}
 289}
 290
 291#ifdef CONFIG_SMP
 292static void activate_page_drain(int cpu)
 293{
 294	struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
 295
 296	if (pagevec_count(pvec))
 297		pagevec_lru_move_fn(pvec, __activate_page, NULL);
 298}
 299
 300static bool need_activate_page_drain(int cpu)
 
 301{
 302	return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0;
 
 
 
 303}
 304
 305void activate_page(struct page *page)
 306{
 307	page = compound_head(page);
 308	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
 309		struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
 310
 311		get_page(page);
 312		if (!pagevec_add(pvec, page) || PageCompound(page))
 313			pagevec_lru_move_fn(pvec, __activate_page, NULL);
 314		put_cpu_var(activate_page_pvecs);
 315	}
 316}
 317
 318#else
 319static inline void activate_page_drain(int cpu)
 320{
 321}
 322
 323static bool need_activate_page_drain(int cpu)
 324{
 325	return false;
 326}
 327
 328void activate_page(struct page *page)
 329{
 330	struct zone *zone = page_zone(page);
 331
 332	page = compound_head(page);
 333	spin_lock_irq(zone_lru_lock(zone));
 334	__activate_page(page, mem_cgroup_page_lruvec(page, zone->zone_pgdat), NULL);
 335	spin_unlock_irq(zone_lru_lock(zone));
 336}
 337#endif
 338
 339static void __lru_cache_activate_page(struct page *page)
 340{
 341	struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
 342	int i;
 343
 
 
 
 344	/*
 345	 * Search backwards on the optimistic assumption that the page being
 346	 * activated has just been added to this pagevec. Note that only
 347	 * the local pagevec is examined as a !PageLRU page could be in the
 348	 * process of being released, reclaimed, migrated or on a remote
 349	 * pagevec that is currently being drained. Furthermore, marking
 350	 * a remote pagevec's page PageActive potentially hits a race where
 351	 * a page is marked PageActive just after it is added to the inactive
 352	 * list causing accounting errors and BUG_ON checks to trigger.
 353	 */
 354	for (i = pagevec_count(pvec) - 1; i >= 0; i--) {
 355		struct page *pagevec_page = pvec->pages[i];
 356
 357		if (pagevec_page == page) {
 358			SetPageActive(page);
 359			break;
 360		}
 361	}
 362
 363	put_cpu_var(lru_add_pvec);
 364}
 365
 366/*
 367 * Mark a page as having seen activity.
 368 *
 369 * inactive,unreferenced	->	inactive,referenced
 370 * inactive,referenced		->	active,unreferenced
 371 * active,unreferenced		->	active,referenced
 372 *
 373 * When a newly allocated page is not yet visible, so safe for non-atomic ops,
 374 * __SetPageReferenced(page) may be substituted for mark_page_accessed(page).
 375 */
 376void mark_page_accessed(struct page *page)
 377{
 378	page = compound_head(page);
 379	if (!PageActive(page) && !PageUnevictable(page) &&
 380			PageReferenced(page)) {
 381
 382		/*
 383		 * If the page is on the LRU, queue it for activation via
 384		 * activate_page_pvecs. Otherwise, assume the page is on a
 385		 * pagevec, mark it active and it'll be moved to the active
 386		 * LRU on the next drain.
 387		 */
 388		if (PageLRU(page))
 389			activate_page(page);
 390		else
 391			__lru_cache_activate_page(page);
 392		ClearPageReferenced(page);
 393		if (page_is_file_cache(page))
 394			workingset_activation(page);
 395	} else if (!PageReferenced(page)) {
 396		SetPageReferenced(page);
 397	}
 398	if (page_is_idle(page))
 399		clear_page_idle(page);
 400}
 401EXPORT_SYMBOL(mark_page_accessed);
 402
 403static void __lru_cache_add(struct page *page)
 404{
 405	struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
 
 
 
 
 
 
 
 406
 407	get_page(page);
 408	if (!pagevec_add(pvec, page) || PageCompound(page))
 409		__pagevec_lru_add(pvec);
 410	put_cpu_var(lru_add_pvec);
 
 
 
 411}
 
 412
 413/**
 414 * lru_cache_add_anon - add a page to the page lists
 415 * @page: the page to add
 
 
 
 
 
 
 
 
 
 416 */
 417void lru_cache_add_anon(struct page *page)
 418{
 419	if (PageActive(page))
 420		ClearPageActive(page);
 421	__lru_cache_add(page);
 422}
 423
 424void lru_cache_add_file(struct page *page)
 425{
 426	if (PageActive(page))
 427		ClearPageActive(page);
 428	__lru_cache_add(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 429}
 430EXPORT_SYMBOL(lru_cache_add_file);
 431
 432/**
 433 * lru_cache_add - add a page to a page list
 434 * @page: the page to be added to the LRU.
 435 *
 436 * Queue the page for addition to the LRU via pagevec. The decision on whether
 437 * to add the page to the [in]active [file|anon] list is deferred until the
 438 * pagevec is drained. This gives a chance for the caller of lru_cache_add()
 439 * have the page added to the active list using mark_page_accessed().
 440 */
 441void lru_cache_add(struct page *page)
 442{
 443	VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
 444	VM_BUG_ON_PAGE(PageLRU(page), page);
 445	__lru_cache_add(page);
 
 
 
 
 
 
 
 446}
 
 447
 448/**
 449 * lru_cache_add_active_or_unevictable
 450 * @page:  the page to be added to LRU
 451 * @vma:   vma in which page is mapped for determining reclaimability
 452 *
 453 * Place @page on the active or unevictable LRU list, depending on its
 454 * evictability.  Note that if the page is not evictable, it goes
 455 * directly back onto it's zone's unevictable list, it does NOT use a
 456 * per cpu pagevec.
 457 */
 458void lru_cache_add_active_or_unevictable(struct page *page,
 459					 struct vm_area_struct *vma)
 460{
 461	VM_BUG_ON_PAGE(PageLRU(page), page);
 462
 463	if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
 464		SetPageActive(page);
 465	else if (!TestSetPageMlocked(page)) {
 466		/*
 467		 * We use the irq-unsafe __mod_zone_page_stat because this
 468		 * counter is not modified from interrupt context, and the pte
 469		 * lock is held(spinlock), which implies preemption disabled.
 470		 */
 471		__mod_zone_page_state(page_zone(page), NR_MLOCK,
 472				    hpage_nr_pages(page));
 473		count_vm_event(UNEVICTABLE_PGMLOCKED);
 474	}
 475	lru_cache_add(page);
 476}
 477
 478/*
 479 * If the page can not be invalidated, it is moved to the
 480 * inactive list to speed up its reclaim.  It is moved to the
 481 * head of the list, rather than the tail, to give the flusher
 482 * threads some time to write it out, as this is much more
 483 * effective than the single-page writeout from reclaim.
 484 *
 485 * If the page isn't page_mapped and dirty/writeback, the page
 486 * could reclaim asap using PG_reclaim.
 487 *
 488 * 1. active, mapped page -> none
 489 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
 490 * 3. inactive, mapped page -> none
 491 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
 492 * 5. inactive, clean -> inactive, tail
 493 * 6. Others -> none
 494 *
 495 * In 4, why it moves inactive's head, the VM expects the page would
 496 * be write it out by flusher threads as this is much more effective
 497 * than the single-page writeout from reclaim.
 498 */
 499static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
 500			      void *arg)
 501{
 502	int lru, file;
 503	bool active;
 504
 505	if (!PageLRU(page))
 506		return;
 507
 508	if (PageUnevictable(page))
 
 509		return;
 510
 511	/* Some processes are using the page */
 512	if (page_mapped(page))
 513		return;
 514
 515	active = PageActive(page);
 516	file = page_is_file_cache(page);
 517	lru = page_lru_base_type(page);
 518
 519	del_page_from_lru_list(page, lruvec, lru + active);
 520	ClearPageActive(page);
 521	ClearPageReferenced(page);
 522	add_page_to_lru_list(page, lruvec, lru);
 523
 524	if (PageWriteback(page) || PageDirty(page)) {
 525		/*
 526		 * PG_reclaim could be raced with end_page_writeback
 527		 * It can make readahead confusing.  But race window
 528		 * is _really_ small and  it's non-critical problem.
 
 529		 */
 530		SetPageReclaim(page);
 
 531	} else {
 532		/*
 533		 * The page's writeback ends up during pagevec
 534		 * We moves tha page into tail of inactive.
 535		 */
 536		list_move_tail(&page->lru, &lruvec->lists[lru]);
 537		__count_vm_event(PGROTATED);
 538	}
 539
 540	if (active)
 541		__count_vm_event(PGDEACTIVATE);
 542	update_page_reclaim_stat(lruvec, file, 0);
 
 
 543}
 544
 
 
 
 545
 546static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
 547			    void *arg)
 
 
 
 
 
 
 
 
 
 
 
 548{
 549	if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
 550	    !PageSwapCache(page) && !PageUnevictable(page)) {
 551		bool active = PageActive(page);
 552
 553		del_page_from_lru_list(page, lruvec,
 554				       LRU_INACTIVE_ANON + active);
 555		ClearPageActive(page);
 556		ClearPageReferenced(page);
 557		/*
 558		 * lazyfree pages are clean anonymous pages. They have
 559		 * SwapBacked flag cleared to distinguish normal anonymous
 560		 * pages
 561		 */
 562		ClearPageSwapBacked(page);
 563		add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
 564
 565		__count_vm_events(PGLAZYFREE, hpage_nr_pages(page));
 566		count_memcg_page_event(page, PGLAZYFREE);
 567		update_page_reclaim_stat(lruvec, 1, 0);
 568	}
 
 
 
 
 
 
 
 
 
 569}
 570
 571/*
 572 * Drain pages out of the cpu's pagevecs.
 573 * Either "cpu" is the current CPU, and preemption has already been
 574 * disabled; or "cpu" is being hot-unplugged, and is already dead.
 575 */
 576void lru_add_drain_cpu(int cpu)
 577{
 578	struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu);
 
 579
 580	if (pagevec_count(pvec))
 581		__pagevec_lru_add(pvec);
 582
 583	pvec = &per_cpu(lru_rotate_pvecs, cpu);
 584	if (pagevec_count(pvec)) {
 
 585		unsigned long flags;
 586
 587		/* No harm done if a racing interrupt already did this */
 588		local_irq_save(flags);
 589		pagevec_move_tail(pvec);
 590		local_irq_restore(flags);
 591	}
 592
 593	pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
 594	if (pagevec_count(pvec))
 595		pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
 596
 597	pvec = &per_cpu(lru_lazyfree_pvecs, cpu);
 598	if (pagevec_count(pvec))
 599		pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
 
 
 
 
 600
 601	activate_page_drain(cpu);
 602}
 603
 604/**
 605 * deactivate_file_page - forcefully deactivate a file page
 606 * @page: page to deactivate
 607 *
 608 * This function hints the VM that @page is a good reclaim candidate,
 609 * for example if its invalidation fails due to the page being dirty
 610 * or under writeback.
 
 
 611 */
 612void deactivate_file_page(struct page *page)
 613{
 614	/*
 615	 * In a workload with many unevictable page such as mprotect,
 616	 * unevictable page deactivation for accelerating reclaim is pointless.
 617	 */
 618	if (PageUnevictable(page))
 619		return;
 620
 621	if (likely(get_page_unless_zero(page))) {
 622		struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 623
 624		if (!pagevec_add(pvec, page) || PageCompound(page))
 625			pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
 626		put_cpu_var(lru_deactivate_file_pvecs);
 627	}
 628}
 629
 630/**
 631 * mark_page_lazyfree - make an anon page lazyfree
 632 * @page: page to deactivate
 633 *
 634 * mark_page_lazyfree() moves @page to the inactive file list.
 635 * This is done to accelerate the reclaim of @page.
 636 */
 637void mark_page_lazyfree(struct page *page)
 638{
 639	if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
 640	    !PageSwapCache(page) && !PageUnevictable(page)) {
 641		struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs);
 642
 643		get_page(page);
 644		if (!pagevec_add(pvec, page) || PageCompound(page))
 645			pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
 646		put_cpu_var(lru_lazyfree_pvecs);
 647	}
 648}
 649
 650void lru_add_drain(void)
 651{
 652	lru_add_drain_cpu(get_cpu());
 653	put_cpu();
 
 
 654}
 655
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 656static void lru_add_drain_per_cpu(struct work_struct *dummy)
 657{
 658	lru_add_drain();
 659}
 660
 661static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
 
 
 
 
 
 
 
 
 
 
 
 
 
 662
 663/*
 664 * Doesn't need any cpu hotplug locking because we do rely on per-cpu
 665 * kworkers being shut down before our page_alloc_cpu_dead callback is
 666 * executed on the offlined cpu.
 667 * Calling this function with cpu hotplug locks held can actually lead
 668 * to obscure indirect dependencies via WQ context.
 669 */
 670void lru_add_drain_all(void)
 671{
 672	static DEFINE_MUTEX(lock);
 
 
 
 
 
 
 
 
 
 
 673	static struct cpumask has_work;
 674	int cpu;
 
 675
 676	/*
 677	 * Make sure nobody triggers this path before mm_percpu_wq is fully
 678	 * initialized.
 679	 */
 680	if (WARN_ON(!mm_percpu_wq))
 681		return;
 682
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 683	mutex_lock(&lock);
 684	cpumask_clear(&has_work);
 685
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 686	for_each_online_cpu(cpu) {
 687		struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
 688
 689		if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
 690		    pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
 691		    pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
 692		    pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) ||
 693		    need_activate_page_drain(cpu)) {
 694			INIT_WORK(work, lru_add_drain_per_cpu);
 695			queue_work_on(cpu, mm_percpu_wq, work);
 696			cpumask_set_cpu(cpu, &has_work);
 697		}
 698	}
 699
 700	for_each_cpu(cpu, &has_work)
 701		flush_work(&per_cpu(lru_add_drain_work, cpu));
 702
 
 703	mutex_unlock(&lock);
 704}
 705
 706/**
 707 * release_pages - batched put_page()
 708 * @pages: array of pages to release
 709 * @nr: number of pages
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 710 *
 711 * Decrement the reference count on all the pages in @pages.  If it
 712 * fell to zero, remove the page from the LRU and free it.
 713 */
 714void release_pages(struct page **pages, int nr)
 715{
 716	int i;
 717	LIST_HEAD(pages_to_free);
 718	struct pglist_data *locked_pgdat = NULL;
 719	struct lruvec *lruvec;
 720	unsigned long uninitialized_var(flags);
 721	unsigned int uninitialized_var(lock_batch);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 722
 723	for (i = 0; i < nr; i++) {
 724		struct page *page = pages[i];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 725
 726		/*
 727		 * Make sure the IRQ-safe lock-holding time does not get
 728		 * excessive with a continuous string of pages from the
 729		 * same pgdat. The lock is held only if pgdat != NULL.
 730		 */
 731		if (locked_pgdat && ++lock_batch == SWAP_CLUSTER_MAX) {
 732			spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
 733			locked_pgdat = NULL;
 734		}
 735
 736		if (is_huge_zero_page(page))
 737			continue;
 738
 739		/* Device public page can not be huge page */
 740		if (is_device_public_page(page)) {
 741			if (locked_pgdat) {
 742				spin_unlock_irqrestore(&locked_pgdat->lru_lock,
 743						       flags);
 744				locked_pgdat = NULL;
 745			}
 746			put_zone_device_private_or_public_page(page);
 
 
 
 747			continue;
 748		}
 749
 750		page = compound_head(page);
 751		if (!put_page_testzero(page))
 752			continue;
 753
 754		if (PageCompound(page)) {
 755			if (locked_pgdat) {
 756				spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
 757				locked_pgdat = NULL;
 
 758			}
 759			__put_compound_page(page);
 760			continue;
 761		}
 
 
 762
 763		if (PageLRU(page)) {
 764			struct pglist_data *pgdat = page_pgdat(page);
 
 
 
 
 
 
 
 
 765
 766			if (pgdat != locked_pgdat) {
 767				if (locked_pgdat)
 768					spin_unlock_irqrestore(&locked_pgdat->lru_lock,
 769									flags);
 770				lock_batch = 0;
 771				locked_pgdat = pgdat;
 772				spin_lock_irqsave(&locked_pgdat->lru_lock, flags);
 773			}
 774
 775			lruvec = mem_cgroup_page_lruvec(page, locked_pgdat);
 776			VM_BUG_ON_PAGE(!PageLRU(page), page);
 777			__ClearPageLRU(page);
 778			del_page_from_lru_list(page, lruvec, page_off_lru(page));
 779		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 780
 781		/* Clear Active bit in case of parallel mark_page_accessed */
 782		__ClearPageActive(page);
 783		__ClearPageWaiters(page);
 
 784
 785		list_add(&page->lru, &pages_to_free);
 
 
 
 
 
 
 
 
 786	}
 787	if (locked_pgdat)
 788		spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
 789
 790	mem_cgroup_uncharge_list(&pages_to_free);
 791	free_unref_page_list(&pages_to_free);
 792}
 793EXPORT_SYMBOL(release_pages);
 794
 795/*
 796 * The pages which we're about to release may be in the deferred lru-addition
 797 * queues.  That would prevent them from really being freed right now.  That's
 798 * OK from a correctness point of view but is inefficient - those pages may be
 799 * cache-warm and we want to give them back to the page allocator ASAP.
 800 *
 801 * So __pagevec_release() will drain those queues here.  __pagevec_lru_add()
 802 * and __pagevec_lru_add_active() call release_pages() directly to avoid
 803 * mutual recursion.
 804 */
 805void __pagevec_release(struct pagevec *pvec)
 806{
 807	if (!pvec->percpu_pvec_drained) {
 808		lru_add_drain();
 809		pvec->percpu_pvec_drained = true;
 810	}
 811	release_pages(pvec->pages, pagevec_count(pvec));
 812	pagevec_reinit(pvec);
 813}
 814EXPORT_SYMBOL(__pagevec_release);
 815
 816#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 817/* used by __split_huge_page_refcount() */
 818void lru_add_page_tail(struct page *page, struct page *page_tail,
 819		       struct lruvec *lruvec, struct list_head *list)
 820{
 821	const int file = 0;
 822
 823	VM_BUG_ON_PAGE(!PageHead(page), page);
 824	VM_BUG_ON_PAGE(PageCompound(page_tail), page);
 825	VM_BUG_ON_PAGE(PageLRU(page_tail), page);
 826	VM_BUG_ON(NR_CPUS != 1 &&
 827		  !spin_is_locked(&lruvec_pgdat(lruvec)->lru_lock));
 828
 829	if (!list)
 830		SetPageLRU(page_tail);
 831
 832	if (likely(PageLRU(page)))
 833		list_add_tail(&page_tail->lru, &page->lru);
 834	else if (list) {
 835		/* page reclaim is reclaiming a huge page */
 836		get_page(page_tail);
 837		list_add_tail(&page_tail->lru, list);
 838	} else {
 839		struct list_head *list_head;
 840		/*
 841		 * Head page has not yet been counted, as an hpage,
 842		 * so we must account for each subpage individually.
 843		 *
 844		 * Use the standard add function to put page_tail on the list,
 845		 * but then correct its position so they all end up in order.
 846		 */
 847		add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail));
 848		list_head = page_tail->lru.prev;
 849		list_move_tail(&page_tail->lru, list_head);
 850	}
 851
 852	if (!PageUnevictable(page))
 853		update_page_reclaim_stat(lruvec, file, PageActive(page_tail));
 854}
 855#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 856
 857static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
 858				 void *arg)
 859{
 860	enum lru_list lru;
 861	int was_unevictable = TestClearPageUnevictable(page);
 862
 863	VM_BUG_ON_PAGE(PageLRU(page), page);
 864
 865	SetPageLRU(page);
 866	/*
 867	 * Page becomes evictable in two ways:
 868	 * 1) Within LRU lock [munlock_vma_pages() and __munlock_pagevec()].
 869	 * 2) Before acquiring LRU lock to put the page to correct LRU and then
 870	 *   a) do PageLRU check with lock [check_move_unevictable_pages]
 871	 *   b) do PageLRU check before lock [clear_page_mlock]
 872	 *
 873	 * (1) & (2a) are ok as LRU lock will serialize them. For (2b), we need
 874	 * following strict ordering:
 875	 *
 876	 * #0: __pagevec_lru_add_fn		#1: clear_page_mlock
 877	 *
 878	 * SetPageLRU()				TestClearPageMlocked()
 879	 * smp_mb() // explicit ordering	// above provides strict
 880	 *					// ordering
 881	 * PageMlocked()			PageLRU()
 882	 *
 883	 *
 884	 * if '#1' does not observe setting of PG_lru by '#0' and fails
 885	 * isolation, the explicit barrier will make sure that page_evictable
 886	 * check will put the page in correct LRU. Without smp_mb(), SetPageLRU
 887	 * can be reordered after PageMlocked check and can make '#1' to fail
 888	 * the isolation of the page whose Mlocked bit is cleared (#0 is also
 889	 * looking at the same page) and the evictable page will be stranded
 890	 * in an unevictable LRU.
 891	 */
 892	smp_mb();
 893
 894	if (page_evictable(page)) {
 895		lru = page_lru(page);
 896		update_page_reclaim_stat(lruvec, page_is_file_cache(page),
 897					 PageActive(page));
 898		if (was_unevictable)
 899			count_vm_event(UNEVICTABLE_PGRESCUED);
 900	} else {
 901		lru = LRU_UNEVICTABLE;
 902		ClearPageActive(page);
 903		SetPageUnevictable(page);
 904		if (!was_unevictable)
 905			count_vm_event(UNEVICTABLE_PGCULLED);
 906	}
 907
 908	add_page_to_lru_list(page, lruvec, lru);
 909	trace_mm_lru_insertion(page, lru);
 910}
 911
 912/*
 913 * Add the passed pages to the LRU, then drop the caller's refcount
 914 * on them.  Reinitialises the caller's pagevec.
 915 */
 916void __pagevec_lru_add(struct pagevec *pvec)
 917{
 918	pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL);
 919}
 920EXPORT_SYMBOL(__pagevec_lru_add);
 921
 922/**
 923 * pagevec_lookup_entries - gang pagecache lookup
 924 * @pvec:	Where the resulting entries are placed
 925 * @mapping:	The address_space to search
 926 * @start:	The starting entry index
 927 * @nr_entries:	The maximum number of pages
 928 * @indices:	The cache indices corresponding to the entries in @pvec
 929 *
 930 * pagevec_lookup_entries() will search for and return a group of up
 931 * to @nr_pages pages and shadow entries in the mapping.  All
 932 * entries are placed in @pvec.  pagevec_lookup_entries() takes a
 933 * reference against actual pages in @pvec.
 934 *
 935 * The search returns a group of mapping-contiguous entries with
 936 * ascending indexes.  There may be holes in the indices due to
 937 * not-present entries.
 938 *
 939 * pagevec_lookup_entries() returns the number of entries which were
 940 * found.
 
 
 941 */
 942unsigned pagevec_lookup_entries(struct pagevec *pvec,
 943				struct address_space *mapping,
 944				pgoff_t start, unsigned nr_entries,
 945				pgoff_t *indices)
 946{
 947	pvec->nr = find_get_entries(mapping, start, nr_entries,
 948				    pvec->pages, indices);
 949	return pagevec_count(pvec);
 950}
 951
 952/**
 953 * pagevec_remove_exceptionals - pagevec exceptionals pruning
 954 * @pvec:	The pagevec to prune
 955 *
 956 * pagevec_lookup_entries() fills both pages and exceptional radix
 957 * tree entries into the pagevec.  This function prunes all
 958 * exceptionals from @pvec without leaving holes, so that it can be
 959 * passed on to page-only pagevec operations.
 960 */
 961void pagevec_remove_exceptionals(struct pagevec *pvec)
 962{
 963	int i, j;
 964
 965	for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
 966		struct page *page = pvec->pages[i];
 967		if (!radix_tree_exceptional_entry(page))
 968			pvec->pages[j++] = page;
 969	}
 970	pvec->nr = j;
 971}
 972
 973/**
 974 * pagevec_lookup_range - gang pagecache lookup
 975 * @pvec:	Where the resulting pages are placed
 976 * @mapping:	The address_space to search
 977 * @start:	The starting page index
 978 * @end:	The final page index
 979 *
 980 * pagevec_lookup_range() will search for & return a group of up to PAGEVEC_SIZE
 981 * pages in the mapping starting from index @start and upto index @end
 982 * (inclusive).  The pages are placed in @pvec.  pagevec_lookup() takes a
 983 * reference against the pages in @pvec.
 984 *
 985 * The search returns a group of mapping-contiguous pages with ascending
 986 * indexes.  There may be holes in the indices due to not-present pages. We
 987 * also update @start to index the next page for the traversal.
 988 *
 989 * pagevec_lookup_range() returns the number of pages which were found. If this
 990 * number is smaller than PAGEVEC_SIZE, the end of specified range has been
 991 * reached.
 992 */
 993unsigned pagevec_lookup_range(struct pagevec *pvec,
 994		struct address_space *mapping, pgoff_t *start, pgoff_t end)
 995{
 996	pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE,
 997					pvec->pages);
 998	return pagevec_count(pvec);
 999}
1000EXPORT_SYMBOL(pagevec_lookup_range);
1001
1002unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
1003		struct address_space *mapping, pgoff_t *index, pgoff_t end,
1004		int tag)
1005{
1006	pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
1007					PAGEVEC_SIZE, pvec->pages);
1008	return pagevec_count(pvec);
1009}
1010EXPORT_SYMBOL(pagevec_lookup_range_tag);
1011
1012unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec,
1013		struct address_space *mapping, pgoff_t *index, pgoff_t end,
1014		int tag, unsigned max_pages)
1015{
1016	pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
1017		min_t(unsigned int, max_pages, PAGEVEC_SIZE), pvec->pages);
1018	return pagevec_count(pvec);
1019}
1020EXPORT_SYMBOL(pagevec_lookup_range_nr_tag);
1021/*
1022 * Perform any setup for the swap system
1023 */
1024void __init swap_setup(void)
1025{
1026	unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
1027
1028	/* Use a smaller cluster for small-memory machines */
1029	if (megs < 16)
1030		page_cluster = 2;
1031	else
1032		page_cluster = 3;
1033	/*
1034	 * Right now other parts of the system means that we
1035	 * _really_ don't want to cluster much more
1036	 */
1037}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/mm/swap.c
   4 *
   5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   6 */
   7
   8/*
   9 * This file contains the default values for the operation of the
  10 * Linux VM subsystem. Fine-tuning documentation can be found in
  11 * Documentation/admin-guide/sysctl/vm.rst.
  12 * Started 18.12.91
  13 * Swap aging added 23.2.95, Stephen Tweedie.
  14 * Buffermem limits added 12.3.98, Rik van Riel.
  15 */
  16
  17#include <linux/mm.h>
  18#include <linux/sched.h>
  19#include <linux/kernel_stat.h>
  20#include <linux/swap.h>
  21#include <linux/mman.h>
  22#include <linux/pagemap.h>
  23#include <linux/pagevec.h>
  24#include <linux/init.h>
  25#include <linux/export.h>
  26#include <linux/mm_inline.h>
  27#include <linux/percpu_counter.h>
  28#include <linux/memremap.h>
  29#include <linux/percpu.h>
  30#include <linux/cpu.h>
  31#include <linux/notifier.h>
  32#include <linux/backing-dev.h>
  33#include <linux/memcontrol.h>
  34#include <linux/gfp.h>
  35#include <linux/uio.h>
  36#include <linux/hugetlb.h>
  37#include <linux/page_idle.h>
  38#include <linux/local_lock.h>
  39#include <linux/buffer_head.h>
  40
  41#include "internal.h"
  42
  43#define CREATE_TRACE_POINTS
  44#include <trace/events/pagemap.h>
  45
  46/* How many pages do we try to swap or page in/out together? As a power of 2 */
  47int page_cluster;
  48const int page_cluster_max = 31;
  49
  50struct cpu_fbatches {
  51	/*
  52	 * The following folio batches are grouped together because they are protected
  53	 * by disabling preemption (and interrupts remain enabled).
  54	 */
  55	local_lock_t lock;
  56	struct folio_batch lru_add;
  57	struct folio_batch lru_deactivate_file;
  58	struct folio_batch lru_deactivate;
  59	struct folio_batch lru_lazyfree;
  60#ifdef CONFIG_SMP
  61	struct folio_batch lru_activate;
  62#endif
  63	/* Protecting the following batches which require disabling interrupts */
  64	local_lock_t lock_irq;
  65	struct folio_batch lru_move_tail;
  66};
  67
  68static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = {
  69	.lock = INIT_LOCAL_LOCK(lock),
  70	.lock_irq = INIT_LOCAL_LOCK(lock_irq),
  71};
  72
  73static void __page_cache_release(struct folio *folio, struct lruvec **lruvecp,
  74		unsigned long *flagsp)
  75{
  76	if (folio_test_lru(folio)) {
  77		folio_lruvec_relock_irqsave(folio, lruvecp, flagsp);
  78		lruvec_del_folio(*lruvecp, folio);
  79		__folio_clear_lru_flags(folio);
  80	}
  81}
  82
  83/*
  84 * This path almost never happens for VM activity - pages are normally freed
  85 * in batches.  But it gets used by networking - and for compound pages.
  86 */
  87static void page_cache_release(struct folio *folio)
  88{
  89	struct lruvec *lruvec = NULL;
  90	unsigned long flags;
 
 
  91
  92	__page_cache_release(folio, &lruvec, &flags);
  93	if (lruvec)
  94		unlock_page_lruvec_irqrestore(lruvec, flags);
 
 
 
 
 
 
  95}
  96
  97void __folio_put(struct folio *folio)
  98{
  99	if (unlikely(folio_is_zone_device(folio))) {
 100		free_zone_device_folio(folio);
 101		return;
 102	}
 103
 104	if (folio_test_hugetlb(folio)) {
 105		free_huge_folio(folio);
 106		return;
 107	}
 108
 109	page_cache_release(folio);
 110	folio_unqueue_deferred_split(folio);
 111	mem_cgroup_uncharge(folio);
 112	free_unref_page(&folio->page, folio_order(folio));
 
 
 
 
 
 
 113}
 114EXPORT_SYMBOL(__folio_put);
 115
 116typedef void (*move_fn_t)(struct lruvec *lruvec, struct folio *folio);
 117
 118static void lru_add(struct lruvec *lruvec, struct folio *folio)
 119{
 120	int was_unevictable = folio_test_clear_unevictable(folio);
 121	long nr_pages = folio_nr_pages(folio);
 122
 123	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
 124
 125	/*
 126	 * Is an smp_mb__after_atomic() still required here, before
 127	 * folio_evictable() tests the mlocked flag, to rule out the possibility
 128	 * of stranding an evictable folio on an unevictable LRU?  I think
 129	 * not, because __munlock_folio() only clears the mlocked flag
 130	 * while the LRU lock is held.
 131	 *
 132	 * (That is not true of __page_cache_release(), and not necessarily
 133	 * true of folios_put(): but those only clear the mlocked flag after
 134	 * folio_put_testzero() has excluded any other users of the folio.)
 135	 */
 136	if (folio_evictable(folio)) {
 137		if (was_unevictable)
 138			__count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
 139	} else {
 140		folio_clear_active(folio);
 141		folio_set_unevictable(folio);
 142		/*
 143		 * folio->mlock_count = !!folio_test_mlocked(folio)?
 144		 * But that leaves __mlock_folio() in doubt whether another
 145		 * actor has already counted the mlock or not.  Err on the
 146		 * safe side, underestimate, let page reclaim fix it, rather
 147		 * than leaving a page on the unevictable LRU indefinitely.
 148		 */
 149		folio->mlock_count = 0;
 150		if (!was_unevictable)
 151			__count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
 152	}
 153
 154	lruvec_add_folio(lruvec, folio);
 155	trace_mm_lru_insertion(folio);
 
 
 156}
 
 157
 158static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
 
 
 
 
 
 
 
 159{
 160	int i;
 161	struct lruvec *lruvec = NULL;
 162	unsigned long flags = 0;
 163
 164	for (i = 0; i < folio_batch_count(fbatch); i++) {
 165		struct folio *folio = fbatch->folios[i];
 166
 167		folio_lruvec_relock_irqsave(folio, &lruvec, &flags);
 168		move_fn(lruvec, folio);
 169
 170		folio_set_lru(folio);
 
 
 171	}
 172
 173	if (lruvec)
 174		unlock_page_lruvec_irqrestore(lruvec, flags);
 175	folios_put(fbatch);
 176}
 
 177
 178static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
 179		struct folio *folio, move_fn_t move_fn,
 180		bool on_lru, bool disable_irq)
 
 
 
 
 
 
 
 
 
 
 
 
 181{
 182	unsigned long flags;
 183
 184	if (on_lru && !folio_test_clear_lru(folio))
 185		return;
 
 186
 187	folio_get(folio);
 
 
 188
 189	if (disable_irq)
 190		local_lock_irqsave(&cpu_fbatches.lock_irq, flags);
 191	else
 192		local_lock(&cpu_fbatches.lock);
 193
 194	if (!folio_batch_add(this_cpu_ptr(fbatch), folio) || folio_test_large(folio) ||
 195	    lru_cache_disabled())
 196		folio_batch_move_lru(this_cpu_ptr(fbatch), move_fn);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 197
 198	if (disable_irq)
 199		local_unlock_irqrestore(&cpu_fbatches.lock_irq, flags);
 200	else
 201		local_unlock(&cpu_fbatches.lock);
 202}
 
 203
 204#define folio_batch_add_and_move(folio, op, on_lru)						\
 205	__folio_batch_add_and_move(								\
 206		&cpu_fbatches.op,								\
 207		folio,										\
 208		op,										\
 209		on_lru,										\
 210		offsetof(struct cpu_fbatches, op) >= offsetof(struct cpu_fbatches, lock_irq)	\
 211	)
 212
 213static void lru_move_tail(struct lruvec *lruvec, struct folio *folio)
 214{
 215	if (folio_test_unevictable(folio))
 216		return;
 
 
 
 
 
 
 217
 218	lruvec_del_folio(lruvec, folio);
 219	folio_clear_active(folio);
 220	lruvec_add_folio_tail(lruvec, folio);
 221	__count_vm_events(PGROTATED, folio_nr_pages(folio));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 222}
 223
 224/*
 225 * Writeback is about to end against a folio which has been marked for
 226 * immediate reclaim.  If it still appears to be reclaimable, move it
 227 * to the tail of the inactive list.
 228 *
 229 * folio_rotate_reclaimable() must disable IRQs, to prevent nasty races.
 230 */
 231void folio_rotate_reclaimable(struct folio *folio)
 232{
 233	if (folio_test_locked(folio) || folio_test_dirty(folio) ||
 234	    folio_test_unevictable(folio))
 235		return;
 236
 237	folio_batch_add_and_move(folio, lru_move_tail, true);
 
 238}
 239
 240void lru_note_cost(struct lruvec *lruvec, bool file,
 241		   unsigned int nr_io, unsigned int nr_rotated)
 
 
 
 
 242{
 243	unsigned long cost;
 
 
 
 244
 245	/*
 246	 * Reflect the relative cost of incurring IO and spending CPU
 247	 * time on rotations. This doesn't attempt to make a precise
 248	 * comparison, it just says: if reloads are about comparable
 249	 * between the LRU lists, or rotations are overwhelmingly
 250	 * different between them, adjust scan balance for CPU work.
 251	 */
 252	cost = nr_io * SWAP_CLUSTER_MAX + nr_rotated;
 253
 254	do {
 255		unsigned long lrusize;
 256
 257		/*
 258		 * Hold lruvec->lru_lock is safe here, since
 259		 * 1) The pinned lruvec in reclaim, or
 260		 * 2) From a pre-LRU page during refault (which also holds the
 261		 *    rcu lock, so would be safe even if the page was on the LRU
 262		 *    and could move simultaneously to a new lruvec).
 263		 */
 264		spin_lock_irq(&lruvec->lru_lock);
 265		/* Record cost event */
 266		if (file)
 267			lruvec->file_cost += cost;
 268		else
 269			lruvec->anon_cost += cost;
 270
 271		/*
 272		 * Decay previous events
 273		 *
 274		 * Because workloads change over time (and to avoid
 275		 * overflow) we keep these statistics as a floating
 276		 * average, which ends up weighing recent refaults
 277		 * more than old ones.
 278		 */
 279		lrusize = lruvec_page_state(lruvec, NR_INACTIVE_ANON) +
 280			  lruvec_page_state(lruvec, NR_ACTIVE_ANON) +
 281			  lruvec_page_state(lruvec, NR_INACTIVE_FILE) +
 282			  lruvec_page_state(lruvec, NR_ACTIVE_FILE);
 283
 284		if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) {
 285			lruvec->file_cost /= 2;
 286			lruvec->anon_cost /= 2;
 287		}
 288		spin_unlock_irq(&lruvec->lru_lock);
 289	} while ((lruvec = parent_lruvec(lruvec)));
 290}
 291
 292void lru_note_cost_refault(struct folio *folio)
 
 293{
 294	lru_note_cost(folio_lruvec(folio), folio_is_file_lru(folio),
 295		      folio_nr_pages(folio), 0);
 
 
 
 296}
 297
 298static void lru_activate(struct lruvec *lruvec, struct folio *folio)
 
 299{
 300	long nr_pages = folio_nr_pages(folio);
 
 
 301
 302	if (folio_test_active(folio) || folio_test_unevictable(folio))
 303		return;
 
 
 
 304
 
 
 
 
 305
 306	lruvec_del_folio(lruvec, folio);
 307	folio_set_active(folio);
 308	lruvec_add_folio(lruvec, folio);
 309	trace_mm_lru_activate(folio);
 310
 311	__count_vm_events(PGACTIVATE, nr_pages);
 312	__count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE, nr_pages);
 313}
 314
 315#ifdef CONFIG_SMP
 316static void folio_activate_drain(int cpu)
 317{
 318	struct folio_batch *fbatch = &per_cpu(cpu_fbatches.lru_activate, cpu);
 319
 320	if (folio_batch_count(fbatch))
 321		folio_batch_move_lru(fbatch, lru_activate);
 322}
 323
 324void folio_activate(struct folio *folio)
 325{
 326	if (folio_test_active(folio) || folio_test_unevictable(folio))
 327		return;
 
 328
 329	folio_batch_add_and_move(folio, lru_activate, true);
 
 
 
 
 330}
 331
 332#else
 333static inline void folio_activate_drain(int cpu)
 334{
 335}
 336
 337void folio_activate(struct folio *folio)
 338{
 339	struct lruvec *lruvec;
 
 340
 341	if (!folio_test_clear_lru(folio))
 342		return;
 
 343
 344	lruvec = folio_lruvec_lock_irq(folio);
 345	lru_activate(lruvec, folio);
 346	unlock_page_lruvec_irq(lruvec);
 347	folio_set_lru(folio);
 348}
 349#endif
 350
 351static void __lru_cache_activate_folio(struct folio *folio)
 352{
 353	struct folio_batch *fbatch;
 354	int i;
 355
 356	local_lock(&cpu_fbatches.lock);
 357	fbatch = this_cpu_ptr(&cpu_fbatches.lru_add);
 358
 359	/*
 360	 * Search backwards on the optimistic assumption that the folio being
 361	 * activated has just been added to this batch. Note that only
 362	 * the local batch is examined as a !LRU folio could be in the
 363	 * process of being released, reclaimed, migrated or on a remote
 364	 * batch that is currently being drained. Furthermore, marking
 365	 * a remote batch's folio active potentially hits a race where
 366	 * a folio is marked active just after it is added to the inactive
 367	 * list causing accounting errors and BUG_ON checks to trigger.
 368	 */
 369	for (i = folio_batch_count(fbatch) - 1; i >= 0; i--) {
 370		struct folio *batch_folio = fbatch->folios[i];
 371
 372		if (batch_folio == folio) {
 373			folio_set_active(folio);
 374			break;
 375		}
 376	}
 377
 378	local_unlock(&cpu_fbatches.lock);
 379}
 380
 381#ifdef CONFIG_LRU_GEN
 382static void folio_inc_refs(struct folio *folio)
 
 
 
 
 
 
 
 
 
 383{
 384	unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
 
 
 385
 386	if (folio_test_unevictable(folio))
 387		return;
 388
 389	if (!folio_test_referenced(folio)) {
 390		folio_set_referenced(folio);
 391		return;
 
 
 
 
 
 
 
 
 
 392	}
 
 
 
 
 393
 394	if (!folio_test_workingset(folio)) {
 395		folio_set_workingset(folio);
 396		return;
 397	}
 398
 399	/* see the comment on MAX_NR_TIERS */
 400	do {
 401		new_flags = old_flags & LRU_REFS_MASK;
 402		if (new_flags == LRU_REFS_MASK)
 403			break;
 404
 405		new_flags += BIT(LRU_REFS_PGOFF);
 406		new_flags |= old_flags & ~LRU_REFS_MASK;
 407	} while (!try_cmpxchg(&folio->flags, &old_flags, new_flags));
 408}
 409#else
 410static void folio_inc_refs(struct folio *folio)
 411{
 412}
 413#endif /* CONFIG_LRU_GEN */
 414
 415/**
 416 * folio_mark_accessed - Mark a folio as having seen activity.
 417 * @folio: The folio to mark.
 418 *
 419 * This function will perform one of the following transitions:
 420 *
 421 * * inactive,unreferenced	->	inactive,referenced
 422 * * inactive,referenced	->	active,unreferenced
 423 * * active,unreferenced	->	active,referenced
 424 *
 425 * When a newly allocated folio is not yet visible, so safe for non-atomic ops,
 426 * __folio_set_referenced() may be substituted for folio_mark_accessed().
 427 */
 428void folio_mark_accessed(struct folio *folio)
 429{
 430	if (lru_gen_enabled()) {
 431		folio_inc_refs(folio);
 432		return;
 433	}
 434
 435	if (!folio_test_referenced(folio)) {
 436		folio_set_referenced(folio);
 437	} else if (folio_test_unevictable(folio)) {
 438		/*
 439		 * Unevictable pages are on the "LRU_UNEVICTABLE" list. But,
 440		 * this list is never rotated or maintained, so marking an
 441		 * unevictable page accessed has no effect.
 442		 */
 443	} else if (!folio_test_active(folio)) {
 444		/*
 445		 * If the folio is on the LRU, queue it for activation via
 446		 * cpu_fbatches.lru_activate. Otherwise, assume the folio is in a
 447		 * folio_batch, mark it active and it'll be moved to the active
 448		 * LRU on the next drain.
 449		 */
 450		if (folio_test_lru(folio))
 451			folio_activate(folio);
 452		else
 453			__lru_cache_activate_folio(folio);
 454		folio_clear_referenced(folio);
 455		workingset_activation(folio);
 456	}
 457	if (folio_test_idle(folio))
 458		folio_clear_idle(folio);
 459}
 460EXPORT_SYMBOL(folio_mark_accessed);
 461
 462/**
 463 * folio_add_lru - Add a folio to an LRU list.
 464 * @folio: The folio to be added to the LRU.
 465 *
 466 * Queue the folio for addition to the LRU. The decision on whether
 467 * to add the page to the [in]active [file|anon] list is deferred until the
 468 * folio_batch is drained. This gives a chance for the caller of folio_add_lru()
 469 * have the folio added to the active list using folio_mark_accessed().
 470 */
 471void folio_add_lru(struct folio *folio)
 472{
 473	VM_BUG_ON_FOLIO(folio_test_active(folio) &&
 474			folio_test_unevictable(folio), folio);
 475	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
 476
 477	/* see the comment in lru_gen_add_folio() */
 478	if (lru_gen_enabled() && !folio_test_unevictable(folio) &&
 479	    lru_gen_in_fault() && !(current->flags & PF_MEMALLOC))
 480		folio_set_active(folio);
 481
 482	folio_batch_add_and_move(folio, lru_add, false);
 483}
 484EXPORT_SYMBOL(folio_add_lru);
 485
 486/**
 487 * folio_add_lru_vma() - Add a folio to the appropate LRU list for this VMA.
 488 * @folio: The folio to be added to the LRU.
 489 * @vma: VMA in which the folio is mapped.
 490 *
 491 * If the VMA is mlocked, @folio is added to the unevictable list.
 492 * Otherwise, it is treated the same way as folio_add_lru().
 
 
 493 */
 494void folio_add_lru_vma(struct folio *folio, struct vm_area_struct *vma)
 
 495{
 496	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
 497
 498	if (unlikely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED))
 499		mlock_new_folio(folio);
 500	else
 501		folio_add_lru(folio);
 
 
 
 
 
 
 
 
 
 502}
 503
 504/*
 505 * If the folio cannot be invalidated, it is moved to the
 506 * inactive list to speed up its reclaim.  It is moved to the
 507 * head of the list, rather than the tail, to give the flusher
 508 * threads some time to write it out, as this is much more
 509 * effective than the single-page writeout from reclaim.
 510 *
 511 * If the folio isn't mapped and dirty/writeback, the folio
 512 * could be reclaimed asap using the reclaim flag.
 513 *
 514 * 1. active, mapped folio -> none
 515 * 2. active, dirty/writeback folio -> inactive, head, reclaim
 516 * 3. inactive, mapped folio -> none
 517 * 4. inactive, dirty/writeback folio -> inactive, head, reclaim
 518 * 5. inactive, clean -> inactive, tail
 519 * 6. Others -> none
 520 *
 521 * In 4, it moves to the head of the inactive list so the folio is
 522 * written out by flusher threads as this is much more efficient
 523 * than the single-page writeout from reclaim.
 524 */
 525static void lru_deactivate_file(struct lruvec *lruvec, struct folio *folio)
 
 526{
 527	bool active = folio_test_active(folio);
 528	long nr_pages = folio_nr_pages(folio);
 529
 530	if (folio_test_unevictable(folio))
 531		return;
 532
 533	/* Some processes are using the folio */
 534	if (folio_mapped(folio))
 535		return;
 536
 537	lruvec_del_folio(lruvec, folio);
 538	folio_clear_active(folio);
 539	folio_clear_referenced(folio);
 540
 541	if (folio_test_writeback(folio) || folio_test_dirty(folio)) {
 
 
 
 
 
 
 
 
 
 542		/*
 543		 * Setting the reclaim flag could race with
 544		 * folio_end_writeback() and confuse readahead.  But the
 545		 * race window is _really_ small and  it's not a critical
 546		 * problem.
 547		 */
 548		lruvec_add_folio(lruvec, folio);
 549		folio_set_reclaim(folio);
 550	} else {
 551		/*
 552		 * The folio's writeback ended while it was in the batch.
 553		 * We move that folio to the tail of the inactive list.
 554		 */
 555		lruvec_add_folio_tail(lruvec, folio);
 556		__count_vm_events(PGROTATED, nr_pages);
 557	}
 558
 559	if (active) {
 560		__count_vm_events(PGDEACTIVATE, nr_pages);
 561		__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
 562				     nr_pages);
 563	}
 564}
 565
 566static void lru_deactivate(struct lruvec *lruvec, struct folio *folio)
 567{
 568	long nr_pages = folio_nr_pages(folio);
 569
 570	if (folio_test_unevictable(folio) || !(folio_test_active(folio) || lru_gen_enabled()))
 571		return;
 572
 573	lruvec_del_folio(lruvec, folio);
 574	folio_clear_active(folio);
 575	folio_clear_referenced(folio);
 576	lruvec_add_folio(lruvec, folio);
 577
 578	__count_vm_events(PGDEACTIVATE, nr_pages);
 579	__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_pages);
 580}
 581
 582static void lru_lazyfree(struct lruvec *lruvec, struct folio *folio)
 583{
 584	long nr_pages = folio_nr_pages(folio);
 
 
 585
 586	if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) ||
 587	    folio_test_swapcache(folio) || folio_test_unevictable(folio))
 588		return;
 
 
 
 
 
 
 
 
 589
 590	lruvec_del_folio(lruvec, folio);
 591	folio_clear_active(folio);
 592	folio_clear_referenced(folio);
 593	/*
 594	 * Lazyfree folios are clean anonymous folios.  They have
 595	 * the swapbacked flag cleared, to distinguish them from normal
 596	 * anonymous folios
 597	 */
 598	folio_clear_swapbacked(folio);
 599	lruvec_add_folio(lruvec, folio);
 600
 601	__count_vm_events(PGLAZYFREE, nr_pages);
 602	__count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE, nr_pages);
 603}
 604
 605/*
 606 * Drain pages out of the cpu's folio_batch.
 607 * Either "cpu" is the current CPU, and preemption has already been
 608 * disabled; or "cpu" is being hot-unplugged, and is already dead.
 609 */
 610void lru_add_drain_cpu(int cpu)
 611{
 612	struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu);
 613	struct folio_batch *fbatch = &fbatches->lru_add;
 614
 615	if (folio_batch_count(fbatch))
 616		folio_batch_move_lru(fbatch, lru_add);
 617
 618	fbatch = &fbatches->lru_move_tail;
 619	/* Disabling interrupts below acts as a compiler barrier. */
 620	if (data_race(folio_batch_count(fbatch))) {
 621		unsigned long flags;
 622
 623		/* No harm done if a racing interrupt already did this */
 624		local_lock_irqsave(&cpu_fbatches.lock_irq, flags);
 625		folio_batch_move_lru(fbatch, lru_move_tail);
 626		local_unlock_irqrestore(&cpu_fbatches.lock_irq, flags);
 627	}
 628
 629	fbatch = &fbatches->lru_deactivate_file;
 630	if (folio_batch_count(fbatch))
 631		folio_batch_move_lru(fbatch, lru_deactivate_file);
 632
 633	fbatch = &fbatches->lru_deactivate;
 634	if (folio_batch_count(fbatch))
 635		folio_batch_move_lru(fbatch, lru_deactivate);
 636
 637	fbatch = &fbatches->lru_lazyfree;
 638	if (folio_batch_count(fbatch))
 639		folio_batch_move_lru(fbatch, lru_lazyfree);
 640
 641	folio_activate_drain(cpu);
 642}
 643
 644/**
 645 * deactivate_file_folio() - Deactivate a file folio.
 646 * @folio: Folio to deactivate.
 647 *
 648 * This function hints to the VM that @folio is a good reclaim candidate,
 649 * for example if its invalidation fails due to the folio being dirty
 650 * or under writeback.
 651 *
 652 * Context: Caller holds a reference on the folio.
 653 */
 654void deactivate_file_folio(struct folio *folio)
 655{
 656	/* Deactivating an unevictable folio will not accelerate reclaim */
 657	if (folio_test_unevictable(folio))
 
 
 
 658		return;
 659
 660	folio_batch_add_and_move(folio, lru_deactivate_file, true);
 661}
 662
 663/*
 664 * folio_deactivate - deactivate a folio
 665 * @folio: folio to deactivate
 666 *
 667 * folio_deactivate() moves @folio to the inactive list if @folio was on the
 668 * active list and was not unevictable. This is done to accelerate the
 669 * reclaim of @folio.
 670 */
 671void folio_deactivate(struct folio *folio)
 672{
 673	if (folio_test_unevictable(folio) || !(folio_test_active(folio) || lru_gen_enabled()))
 674		return;
 675
 676	folio_batch_add_and_move(folio, lru_deactivate, true);
 
 
 
 677}
 678
 679/**
 680 * folio_mark_lazyfree - make an anon folio lazyfree
 681 * @folio: folio to deactivate
 682 *
 683 * folio_mark_lazyfree() moves @folio to the inactive file list.
 684 * This is done to accelerate the reclaim of @folio.
 685 */
 686void folio_mark_lazyfree(struct folio *folio)
 687{
 688	if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) ||
 689	    folio_test_swapcache(folio) || folio_test_unevictable(folio))
 690		return;
 691
 692	folio_batch_add_and_move(folio, lru_lazyfree, true);
 
 
 
 
 693}
 694
 695void lru_add_drain(void)
 696{
 697	local_lock(&cpu_fbatches.lock);
 698	lru_add_drain_cpu(smp_processor_id());
 699	local_unlock(&cpu_fbatches.lock);
 700	mlock_drain_local();
 701}
 702
 703/*
 704 * It's called from per-cpu workqueue context in SMP case so
 705 * lru_add_drain_cpu and invalidate_bh_lrus_cpu should run on
 706 * the same cpu. It shouldn't be a problem in !SMP case since
 707 * the core is only one and the locks will disable preemption.
 708 */
 709static void lru_add_and_bh_lrus_drain(void)
 710{
 711	local_lock(&cpu_fbatches.lock);
 712	lru_add_drain_cpu(smp_processor_id());
 713	local_unlock(&cpu_fbatches.lock);
 714	invalidate_bh_lrus_cpu();
 715	mlock_drain_local();
 716}
 717
 718void lru_add_drain_cpu_zone(struct zone *zone)
 719{
 720	local_lock(&cpu_fbatches.lock);
 721	lru_add_drain_cpu(smp_processor_id());
 722	drain_local_pages(zone);
 723	local_unlock(&cpu_fbatches.lock);
 724	mlock_drain_local();
 725}
 726
 727#ifdef CONFIG_SMP
 728
 729static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
 730
 731static void lru_add_drain_per_cpu(struct work_struct *dummy)
 732{
 733	lru_add_and_bh_lrus_drain();
 734}
 735
 736static bool cpu_needs_drain(unsigned int cpu)
 737{
 738	struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu);
 739
 740	/* Check these in order of likelihood that they're not zero */
 741	return folio_batch_count(&fbatches->lru_add) ||
 742		folio_batch_count(&fbatches->lru_move_tail) ||
 743		folio_batch_count(&fbatches->lru_deactivate_file) ||
 744		folio_batch_count(&fbatches->lru_deactivate) ||
 745		folio_batch_count(&fbatches->lru_lazyfree) ||
 746		folio_batch_count(&fbatches->lru_activate) ||
 747		need_mlock_drain(cpu) ||
 748		has_bh_in_lru(cpu, NULL);
 749}
 750
 751/*
 752 * Doesn't need any cpu hotplug locking because we do rely on per-cpu
 753 * kworkers being shut down before our page_alloc_cpu_dead callback is
 754 * executed on the offlined cpu.
 755 * Calling this function with cpu hotplug locks held can actually lead
 756 * to obscure indirect dependencies via WQ context.
 757 */
 758static inline void __lru_add_drain_all(bool force_all_cpus)
 759{
 760	/*
 761	 * lru_drain_gen - Global pages generation number
 762	 *
 763	 * (A) Definition: global lru_drain_gen = x implies that all generations
 764	 *     0 < n <= x are already *scheduled* for draining.
 765	 *
 766	 * This is an optimization for the highly-contended use case where a
 767	 * user space workload keeps constantly generating a flow of pages for
 768	 * each CPU.
 769	 */
 770	static unsigned int lru_drain_gen;
 771	static struct cpumask has_work;
 772	static DEFINE_MUTEX(lock);
 773	unsigned cpu, this_gen;
 774
 775	/*
 776	 * Make sure nobody triggers this path before mm_percpu_wq is fully
 777	 * initialized.
 778	 */
 779	if (WARN_ON(!mm_percpu_wq))
 780		return;
 781
 782	/*
 783	 * Guarantee folio_batch counter stores visible by this CPU
 784	 * are visible to other CPUs before loading the current drain
 785	 * generation.
 786	 */
 787	smp_mb();
 788
 789	/*
 790	 * (B) Locally cache global LRU draining generation number
 791	 *
 792	 * The read barrier ensures that the counter is loaded before the mutex
 793	 * is taken. It pairs with smp_mb() inside the mutex critical section
 794	 * at (D).
 795	 */
 796	this_gen = smp_load_acquire(&lru_drain_gen);
 797
 798	mutex_lock(&lock);
 
 799
 800	/*
 801	 * (C) Exit the draining operation if a newer generation, from another
 802	 * lru_add_drain_all(), was already scheduled for draining. Check (A).
 803	 */
 804	if (unlikely(this_gen != lru_drain_gen && !force_all_cpus))
 805		goto done;
 806
 807	/*
 808	 * (D) Increment global generation number
 809	 *
 810	 * Pairs with smp_load_acquire() at (B), outside of the critical
 811	 * section. Use a full memory barrier to guarantee that the
 812	 * new global drain generation number is stored before loading
 813	 * folio_batch counters.
 814	 *
 815	 * This pairing must be done here, before the for_each_online_cpu loop
 816	 * below which drains the page vectors.
 817	 *
 818	 * Let x, y, and z represent some system CPU numbers, where x < y < z.
 819	 * Assume CPU #z is in the middle of the for_each_online_cpu loop
 820	 * below and has already reached CPU #y's per-cpu data. CPU #x comes
 821	 * along, adds some pages to its per-cpu vectors, then calls
 822	 * lru_add_drain_all().
 823	 *
 824	 * If the paired barrier is done at any later step, e.g. after the
 825	 * loop, CPU #x will just exit at (C) and miss flushing out all of its
 826	 * added pages.
 827	 */
 828	WRITE_ONCE(lru_drain_gen, lru_drain_gen + 1);
 829	smp_mb();
 830
 831	cpumask_clear(&has_work);
 832	for_each_online_cpu(cpu) {
 833		struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
 834
 835		if (cpu_needs_drain(cpu)) {
 
 
 
 
 836			INIT_WORK(work, lru_add_drain_per_cpu);
 837			queue_work_on(cpu, mm_percpu_wq, work);
 838			__cpumask_set_cpu(cpu, &has_work);
 839		}
 840	}
 841
 842	for_each_cpu(cpu, &has_work)
 843		flush_work(&per_cpu(lru_add_drain_work, cpu));
 844
 845done:
 846	mutex_unlock(&lock);
 847}
 848
 849void lru_add_drain_all(void)
 850{
 851	__lru_add_drain_all(false);
 852}
 853#else
 854void lru_add_drain_all(void)
 855{
 856	lru_add_drain();
 857}
 858#endif /* CONFIG_SMP */
 859
 860atomic_t lru_disable_count = ATOMIC_INIT(0);
 861
 862/*
 863 * lru_cache_disable() needs to be called before we start compiling
 864 * a list of folios to be migrated using folio_isolate_lru().
 865 * It drains folios on LRU cache and then disable on all cpus until
 866 * lru_cache_enable is called.
 867 *
 868 * Must be paired with a call to lru_cache_enable().
 
 869 */
 870void lru_cache_disable(void)
 871{
 872	atomic_inc(&lru_disable_count);
 873	/*
 874	 * Readers of lru_disable_count are protected by either disabling
 875	 * preemption or rcu_read_lock:
 876	 *
 877	 * preempt_disable, local_irq_disable  [bh_lru_lock()]
 878	 * rcu_read_lock		       [rt_spin_lock CONFIG_PREEMPT_RT]
 879	 * preempt_disable		       [local_lock !CONFIG_PREEMPT_RT]
 880	 *
 881	 * Since v5.1 kernel, synchronize_rcu() is guaranteed to wait on
 882	 * preempt_disable() regions of code. So any CPU which sees
 883	 * lru_disable_count = 0 will have exited the critical
 884	 * section when synchronize_rcu() returns.
 885	 */
 886	synchronize_rcu_expedited();
 887#ifdef CONFIG_SMP
 888	__lru_add_drain_all(true);
 889#else
 890	lru_add_and_bh_lrus_drain();
 891#endif
 892}
 893
 894/**
 895 * folios_put_refs - Reduce the reference count on a batch of folios.
 896 * @folios: The folios.
 897 * @refs: The number of refs to subtract from each folio.
 898 *
 899 * Like folio_put(), but for a batch of folios.  This is more efficient
 900 * than writing the loop yourself as it will optimise the locks which need
 901 * to be taken if the folios are freed.  The folios batch is returned
 902 * empty and ready to be reused for another batch; there is no need
 903 * to reinitialise it.  If @refs is NULL, we subtract one from each
 904 * folio refcount.
 905 *
 906 * Context: May be called in process or interrupt context, but not in NMI
 907 * context.  May be called while holding a spinlock.
 908 */
 909void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
 910{
 911	int i, j;
 912	struct lruvec *lruvec = NULL;
 913	unsigned long flags = 0;
 914
 915	for (i = 0, j = 0; i < folios->nr; i++) {
 916		struct folio *folio = folios->folios[i];
 917		unsigned int nr_refs = refs ? refs[i] : 1;
 
 
 
 
 
 
 918
 919		if (is_huge_zero_folio(folio))
 920			continue;
 921
 922		if (folio_is_zone_device(folio)) {
 923			if (lruvec) {
 924				unlock_page_lruvec_irqrestore(lruvec, flags);
 925				lruvec = NULL;
 
 
 926			}
 927			if (put_devmap_managed_folio_refs(folio, nr_refs))
 928				continue;
 929			if (folio_ref_sub_and_test(folio, nr_refs))
 930				free_zone_device_folio(folio);
 931			continue;
 932		}
 933
 934		if (!folio_ref_sub_and_test(folio, nr_refs))
 
 935			continue;
 936
 937		/* hugetlb has its own memcg */
 938		if (folio_test_hugetlb(folio)) {
 939			if (lruvec) {
 940				unlock_page_lruvec_irqrestore(lruvec, flags);
 941				lruvec = NULL;
 942			}
 943			free_huge_folio(folio);
 944			continue;
 945		}
 946		folio_unqueue_deferred_split(folio);
 947		__page_cache_release(folio, &lruvec, &flags);
 948
 949		if (j != i)
 950			folios->folios[j] = folio;
 951		j++;
 952	}
 953	if (lruvec)
 954		unlock_page_lruvec_irqrestore(lruvec, flags);
 955	if (!j) {
 956		folio_batch_reinit(folios);
 957		return;
 958	}
 959
 960	folios->nr = j;
 961	mem_cgroup_uncharge_folios(folios);
 962	free_unref_folios(folios);
 963}
 964EXPORT_SYMBOL(folios_put_refs);
 
 
 
 965
 966/**
 967 * release_pages - batched put_page()
 968 * @arg: array of pages to release
 969 * @nr: number of pages
 970 *
 971 * Decrement the reference count on all the pages in @arg.  If it
 972 * fell to zero, remove the page from the LRU and free it.
 973 *
 974 * Note that the argument can be an array of pages, encoded pages,
 975 * or folio pointers. We ignore any encoded bits, and turn any of
 976 * them into just a folio that gets free'd.
 977 */
 978void release_pages(release_pages_arg arg, int nr)
 979{
 980	struct folio_batch fbatch;
 981	int refs[PAGEVEC_SIZE];
 982	struct encoded_page **encoded = arg.encoded_pages;
 983	int i;
 984
 985	folio_batch_init(&fbatch);
 986	for (i = 0; i < nr; i++) {
 987		/* Turn any of the argument types into a folio */
 988		struct folio *folio = page_folio(encoded_page_ptr(encoded[i]));
 989
 990		/* Is our next entry actually "nr_pages" -> "nr_refs" ? */
 991		refs[fbatch.nr] = 1;
 992		if (unlikely(encoded_page_flags(encoded[i]) &
 993			     ENCODED_PAGE_BIT_NR_PAGES_NEXT))
 994			refs[fbatch.nr] = encoded_nr_pages(encoded[++i]);
 995
 996		if (folio_batch_add(&fbatch, folio) > 0)
 997			continue;
 998		folios_put_refs(&fbatch, refs);
 999	}
 
 
1000
1001	if (fbatch.nr)
1002		folios_put_refs(&fbatch, refs);
1003}
1004EXPORT_SYMBOL(release_pages);
1005
1006/*
1007 * The folios which we're about to release may be in the deferred lru-addition
1008 * queues.  That would prevent them from really being freed right now.  That's
1009 * OK from a correctness point of view but is inefficient - those folios may be
1010 * cache-warm and we want to give them back to the page allocator ASAP.
1011 *
1012 * So __folio_batch_release() will drain those queues here.
1013 * folio_batch_move_lru() calls folios_put() directly to avoid
1014 * mutual recursion.
1015 */
1016void __folio_batch_release(struct folio_batch *fbatch)
1017{
1018	if (!fbatch->percpu_pvec_drained) {
1019		lru_add_drain();
1020		fbatch->percpu_pvec_drained = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1021	}
1022	folios_put(fbatch);
 
 
1023}
1024EXPORT_SYMBOL(__folio_batch_release);
 
 
 
 
 
 
 
 
 
1025
1026/**
1027 * folio_batch_remove_exceptionals() - Prune non-folios from a batch.
1028 * @fbatch: The batch to prune
 
 
 
 
 
 
 
 
 
 
 
 
 
1029 *
1030 * find_get_entries() fills a batch with both folios and shadow/swap/DAX
1031 * entries.  This function prunes all the non-folio entries from @fbatch
1032 * without leaving holes, so that it can be passed on to folio-only batch
1033 * operations.
1034 */
1035void folio_batch_remove_exceptionals(struct folio_batch *fbatch)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1036{
1037	unsigned int i, j;
1038
1039	for (i = 0, j = 0; i < folio_batch_count(fbatch); i++) {
1040		struct folio *folio = fbatch->folios[i];
1041		if (!xa_is_value(folio))
1042			fbatch->folios[j++] = folio;
1043	}
1044	fbatch->nr = j;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1045}
 
1046
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1047/*
1048 * Perform any setup for the swap system
1049 */
1050void __init swap_setup(void)
1051{
1052	unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT);
1053
1054	/* Use a smaller cluster for small-memory machines */
1055	if (megs < 16)
1056		page_cluster = 2;
1057	else
1058		page_cluster = 3;
1059	/*
1060	 * Right now other parts of the system means that we
1061	 * _really_ don't want to cluster much more
1062	 */
1063}